apolinario's picture
Initial version
86a76f5
raw
history blame
2.01 kB
from diffusers import LatentDiffusionPipeline
import gradio as gr
import PIL.Image
import numpy as np
import random
import torch
ldm_pipeline = LatentDiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
images = []
def predict(prompt, steps=100, seed=42, guidance_scale=5.0):
torch.cuda.empty_cache()
generator = torch.manual_seed(seed)
image = ldm_pipeline([prompt], generator=generator, num_inference_steps=steps, guidance_scale=guidance_scale)
image_processed = image.cpu().permute(0, 2, 3, 1)
image_processed = (image_processed + 1.0) * 127.5
image_processed = image_processed.clamp(0, 255).numpy().astype(np.uint8)
return PIL.Image.fromarray(image_processed[0])
random_seed = random.randint(0, 2147483647)
gr.Interface(
predict,
inputs=[
gr.inputs.Textbox(label='Prompt', default='a chalk pastel drawing of a llama wearing a wizard hat'),
gr.inputs.Slider(1, 100, label='Inference Steps', default=50, step=1),
gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1),
gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=5.0, step=0.1),
],
outputs=gr.Image(shape=[256,256], type="pil", elem_id="output_image"),
css="#output_image{width: 256px}",
title="ldm-text2im-large-256 - 🧨 diffusers library",
description="This Spaces contains a text-to-image Latent Diffusion process for the <a href=\"https://huggingface.co/CompVis/ldm-text2im-large-256\">ldm-text2im-large-256</a> model by <a href=\"https://huggingface.co/CompVis\">CompVis</a> using the <a href=\"https://github.com/huggingface/diffusers\">diffusers library</a>. The goal of this demo is to showcase the diffusers library and you can check how the code works here. If you want the state-of-the-art experience with Latent Diffusion text-to-image check out the <a href=\"https://huggingface.co/spaces/multimodalart/latentdiffusion\">main Spaces</a>.",
).launch()