import gradio as gr
import modin.pandas as pd
import torch
import numpy as np
from PIL import Image
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import load_image
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16) if torch.cuda.is_available() else AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo")
pipe = pipe.to(device)
def resize(value,img):
img = Image.open(img)
img = img.resize((value,value))
return img
def infer(source_img, prompt, steps, seed, Strength):
generator = torch.Generator(device).manual_seed(seed)
source_image = resize(512, source_img)
source_image.save('source.png')
image = pipe(prompt, image=source_image, strength=Strength, guidance_scale=0.0, num_inference_steps=steps).images[0]
return image
gr.Interface(fn=infer, inputs=[gr.Image(sources=["upload", "webcam", "clipboard"], type="pil"l, label="Raw Image."),
gr.Textbox(label = 'Prompt Input Text. 77 Token (Keyword or Symbol) Maximum'),
gr.Slider(2, 5, value = 2, step = 1, label = 'Number of Iterations'),
gr.Slider(label = "Seed", minimum = 0, maximum = 987654321987654321, step = 1, randomize = True),
gr.Slider(label='Strength', minimum = .5, maximum = 1, step = .05, value = .5)],
outputs='image', title = "Stable Diffusion XL Turbo Image to Image Pipeline CPU", description = "For more information on Stable Diffusion XL 1.0 see https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0
Upload an Image (MUST Be .PNG and 512x512 or 768x768) enter a Prompt, or let it just do its Thing, then click submit. 10 Iterations takes about ~900-1200 seconds currently. For more informationon about Stable Diffusion or Suggestions for prompts, keywords, artists or styles see https://github.com/Maks-s/sd-akashic", article = "Code Monkey: Manjushri").queue(max_size=5).launch()