apolinario's picture
First commit
3f98781
raw
history blame
1.7 kB
# !pip install diffusers
from diffusers import DiffusionPipeline, DDIMPipeline, DDPMPipeline, PNDMPipeline
import gradio as gr
import PIL.Image
import numpy as np
import random
import torch
model_id = "google/ddpm-celebahq-256"
# load model and scheduler
ddpm = DDPMPipeline.from_pretrained(model_id)
ddim = DDIMPipeline.from_pretrained(model_id)
pndm = PNDMPipeline.from_pretrained(model_id)
# run pipeline in inference (sample random noise and denoise)
def predict(steps=100,seed=42,scheduler="ddim"):
generator = torch.manual_seed(seed)
if(scheduler == "ddim"):
image = ddim(generator=generator, num_inference_steps=steps) #does not work (returns random noise)
image = image["sample"]
elif(scheduler == "ddpm"):
image = ddpm(generator=generator) #works, but does not let me set the number of steps
elif(scheduler == "pndm"):
image = pndm(generator=generator, num_inference_steps=steps) #does not work, still detects its DDPM behind the scenes and does not run pndm steps
image = image["sample"]
# process image to PIL
image_processed = image.cpu().permute(0, 2, 3, 1)
image_processed = (image_processed + 1.0) * 127.5
image_processed = image_processed.clamp(0, 255).numpy().astype(np.uint8)
return PIL.Image.fromarray(image_processed[0])
random_seed = random.randint(0, 2147483647)
gr.Interface(
predict,
inputs=[
gr.inputs.Slider(1, 1000, label='Inference Steps', default=1000, step=1),
gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed),
gr.inputs.Radio(["ddim", "ddpm", "pndm"], default="ddpm",label="Diffusion scheduler")
],
outputs="image",
).launch()