Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,056 Bytes
3d91128 d8ca99b 74c2aa0 1b23e89 36f5062 74c2aa0 36f5062 1b23e89 74c2aa0 3d91128 1b23e89 3752ce0 1b23e89 fcae3d8 1b23e89 3d91128 74c2aa0 d8ca99b 0684317 1b23e89 8e3317d 0684317 1b23e89 8e3317d 0684317 1b23e89 3b87891 8e3317d 0684317 1b23e89 2edf8a1 1b23e89 7dd41a0 91f11fc 3752ce0 7dd41a0 91f11fc 2743d6e 1b23e89 74c2aa0 5753d99 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
import spaces
import gradio as gr
from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image, AutoencoderKL
from diffusers.utils import load_image
import torch
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
text_pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to("cuda")
text_pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
text_pipeline.set_ip_adapter_scale(0.6)
image_pipeline = AutoPipelineForImage2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to("cuda")
image_pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
image_pipeline.set_ip_adapter_scale(0.6)
@spaces.GPU(enable_queue=True)
def text_to_image(ip, prompt, neg_prompt, width, height, ip_scale, strength, guidance, steps):
ip.thumbnail((1024, 1024))
text_pipeline.set_ip_adapter_scale(ip_scale)
images = text_pipeline(
prompt=prompt,
ip_adapter_image=ip,
negative_prompt=neg_prompt,
width=width,
height=height,
strength=strength,
guidance_scale=guidance,
num_inference_steps=steps,
).images
return images[0]
@spaces.GPU(enable_queue=True)
def image_to_image(ip, image, prompt, neg_prompt, width, height, ip_scale, strength, guidance, steps):
ip.thumbnail((1024, 1024))
image.thumbnail((1024, 1024))
image_pipeline.set_ip_adapter_scale(ip_scale)
images = image_pipeline(
prompt=prompt,
image=image,
ip_adapter_image=ip,
negative_prompt=neg_prompt,
width=width,
height=height,
strength=strength,
guidance_scale=guidance,
num_inference_steps=steps,
).images
return images[0]
with gr.Blocks() as demo:
gr.Markdown("""
# IP-Adapter Playground
by [Tony Assi](https://www.tonyassi.com/)
""")
with gr.Row():
with gr.Tab("Text-to-Image"):
text_ip = gr.Image(label='IP-Adapter Image', type='pil')
text_prompt = gr.Textbox(label='Prompt')
text_button = gr.Button("Generate")
with gr.Tab("Image-to-Image"):
image_ip = gr.Image(label='IP-Adapter Image', type='pil')
image_image = gr.Image(label='Image', type='pil')
image_prompt = gr.Textbox(label='Prompt')
image_button = gr.Button("Generate")
with gr.Tab("Inpainting"):
inpaint_ip = gr.Image(label='IP-Adapter Image', type='pil')
inpaint_editor = gr.ImageEditor(label='Image + Mask')
inpaint_prompt = gr.Textbox(label='Prompt')
inpaint_button = gr.Button("Generate")
output_image = gr.Image(label='Result')
with gr.Accordion("Advanced Settings", open=False):
neg_prompt = gr.Textbox(label='Negative Prompt', value='ugly, deformed, nsfw')
width_slider = gr.Slider(256, 1024, value=1024, step=8, label="Width")
height_slider = gr.Slider(256, 1024, value=1024, step=8, label="Height")
ip_scale_slider = gr.Slider(0.0, 1.0, value=0.6, label="IP-Adapter Scale")
strength_slider = gr.Slider(0.0, 1.0, value=0.7, label="Strength")
guidance_slider = gr.Slider(1.0, 15.0, value=7.5, label="Guidance")
steps_slider = gr.Slider(50, 100, value=75, step=1, label="Steps")
text_button.click(text_to_image, inputs=[text_ip, text_prompt, neg_prompt, width_slider, height_slider, ip_scale_slider, strength_slider, guidance_slider, steps_slider], outputs=output_image)
image_button.click(image_to_image, inputs=[image_ip, image_image, image_prompt, neg_prompt, width_slider, height_slider, ip_scale_slider, strength_slider, guidance_slider, steps_slider], outputs=output_image)
demo.launch() |