Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,692 Bytes
00adabe 85b9ea4 51a7d9e 4b42ada 85b9ea4 efca2cc 85b9ea4 efca2cc 51a7d9e efca2cc 85b9ea4 36093ae 6c44735 36093ae 05fa434 36093ae 6c44735 4b42ada 6c44735 36093ae 00adabe 85b9ea4 00adabe 85b9ea4 51a7d9e 85b9ea4 51a7d9e 85b9ea4 51a7d9e 85b9ea4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
import torch
import spaces
import gradio as gr
from diffusers import FluxInpaintPipeline, FluxTransformer2DModel
import random
import os
import numpy as np
from huggingface_hub import hf_hub_download
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
MAX_SEED = np.iinfo(np.int32).max
model = "black-forest-labs/FLUX.1-dev"
hf_hub_download(repo_id="black-forest-labs/FLUX.1-Fill-dev", filename="ae.safetensors", local_dir=".")
if torch.cuda.is_available():
transformer = FluxTransformer2DModel.from_single_file(
"https://huggingface.co/black-forest-labs/FLUX.1-Fill-dev/blob/main/flux1-fill-dev.safetensors",
low_cpu_mem_usage=False,
ignore_mismatched_sizes=True,
torch_dtype=torch.bfloat16
)
vae = AutoencoderKL.from_pretrained("./ae.safetensors")
pipe = FluxInpaintPipeline.from_pretrained(
model,
vae=vae,
transformer=transformer,
torch_dtype=torch.bfloat16)
pipe.to("cuda")
@spaces.GPU()
def inpaintGen(
imgMask,
inpaint_prompt: str,
strength: float,
guidance: float,
num_steps: int,
seed: int,
randomize_seed: bool,
progress=gr.Progress(track_tqdm=True)):
source_img = imgMask["background"]
mask_img = imgMask["layers"][0]
if not source_path:
raise gr.Error("Please upload an image.")
if not mask_path:
raise gr.Error("Please draw a mask on the image.")
width, height = source_img.size
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device=DEVICE).manual_seed(seed)
result = pipe(
prompt=inpaint_prompt,
image=source_img,
seed=seed,
mask_image=mask_img,
width=width,
height=height,
strength=strength,
num_inference_steps=num_steps,
generator=generator,
guidance_scale=guidance
).images[0]
return result
with gr.Blocks(theme="ocean", title="Flux.1 dev inpaint", css=CSS) as demo:
gr.HTML("<h1><center>Flux.1 dev Inpaint</center></h1>")
gr.HTML("""
<p>
<center>
A partial redraw of the image based on your prompt words and occluded parts.
</center>
</p>
""")
with gr.Row():
with gr.Column():
imgMask = gr.ImageMask(type="pil", label="Image", layers=False, height=800)
inpaint_prompt = gr.Textbox(label='Prompts ✏️', placeholder="A hat...")
with gr.Row():
Inpaint_sendBtn = gr.Button(value="Submit", variant='primary')
Inpaint_clearBtn = gr.ClearButton([imgMask, inpaint_prompt], value="Clear")
image_out = gr.Image(type="pil", label="Output", height=960)
with gr.Accordion("Advanced ⚙️", open=False):
strength = gr.Slider(label="Strength", minimum=0, maximum=1, value=1, step=0.1)
guidance = gr.Slider(label="Guidance scale", minimum=1, maximum=20, value=7.5, step=0.1)
num_steps = gr.Slider(label="Steps", minimum=1, maximum=20, value=20, step=1)
seed = gr.Number(label="Seed", value=42, precision=0)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
gr.on(
triggers = [
inpaint_prompt.submit,
Inpaint_sendBtn.click,
],
fn = inpaintGen,
inputs = [
imgMask,
inpaint_prompt,
strength,
guidance,
num_steps,
seed,
randomize_seed
],
outputs = [image_out, seed]
)
if __name__ == "__main__":
demo.queue(api_open=False).launch(show_api=False, share=False) |