File size: 3,435 Bytes
00adabe
85b9ea4
51a7d9e
85b9ea4
 
 
51a7d9e
85b9ea4
36093ae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
00adabe
 
 
85b9ea4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
00adabe
85b9ea4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51a7d9e
85b9ea4
 
 
 
 
 
 
 
 
51a7d9e
85b9ea4
51a7d9e
 
 
85b9ea4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import torch
import spaces
import gradio as gr
from diffusers import FluxInpaintPipeline
import random
import numpy as np

MAX_SEED = np.iinfo(np.int32).max
model = "black-forest-labs/FLUX.1-dev"

if torch.cuda.is_available():
    transformer = FluxTransformer2DModel.from_single_file(
        "https://huggingface.co/black-forest-labs/FLUX.1-Fill-dev/blob/main/flux1-fill-dev.safetensors",
        torch_dtype=torch.bfloat16
    )
    pipe = FluxPipeline.from_pretrained(
        model, 
        transformer=transformer,
        torch_dtype=torch.bfloat16)
    pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(
        pipe.scheduler.config, use_beta_sigmas=True
    )
    pipe.to("cuda")


@spaces.GPU()
def inpaintGen(
        imgMask,
        inpaint_prompt: str,
        strength: float,
        guidance: float,
        num_steps: int,
        seed: int,
        randomize_seed: bool,
        progress=gr.Progress(track_tqdm=True)):

    source_img = imgMask["background"]
    mask_img = imgMask["layers"][0]

    if not source_path:
        raise gr.Error("Please upload an image.")

    if not mask_path:
        raise gr.Error("Please draw a mask on the image.")
        
    width, height = source_img.size

    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    generator = torch.Generator(device=DEVICE).manual_seed(seed)

    result = pipe(
        prompt=inpaint_prompt,
        image=source_img,
        seed=seed,
        mask_image=mask_img,
        width=width,
        height=height,
        strength=strength,
        num_inference_steps=num_steps,
        generator=generator,
        guidance_scale=guidance
    ).images[0]
    
    return result
 

with gr.Blocks(theme="ocean", title="Flux.1 dev inpaint", css=CSS) as demo:
    gr.HTML("<h1><center>Flux.1 dev Inpaint</center></h1>")
    gr.HTML("""
        <p>
            <center>
                A partial redraw of the image based on your prompt words and occluded parts.
            </center>
        </p>
    """)
    with gr.Row():
        with gr.Column():
            imgMask = gr.ImageMask(type="pil", label="Image", layers=False, height=800)
            inpaint_prompt = gr.Textbox(label='Prompts ✏️', placeholder="A hat...")
            with gr.Row():
                Inpaint_sendBtn = gr.Button(value="Submit", variant='primary')
                Inpaint_clearBtn = gr.ClearButton([imgMask, inpaint_prompt], value="Clear")
        image_out = gr.Image(type="pil", label="Output", height=960)
    with gr.Accordion("Advanced ⚙️", open=False):
        strength = gr.Slider(label="Strength", minimum=0, maximum=1, value=1, step=0.1)
        guidance = gr.Slider(label="Guidance scale", minimum=1, maximum=20, value=7.5, step=0.1)
        num_steps = gr.Slider(label="Steps", minimum=1, maximum=20, value=20, step=1)
        seed = gr.Number(label="Seed", value=42, precision=0)
        randomize_seed = gr.Checkbox(label="Randomize seed", value=True)

    gr.on(
        triggers = [
            inpaint_prompt.submit,
            Inpaint_sendBtn.click,
        ],
        fn = inpaintGen,
        inputs = [
            imgMask,
            inpaint_prompt,
            strength,
            guidance,
            num_steps,
            seed,
            randomize_seed
        ],
        outputs = [image_out, seed]
    )

if __name__ == "__main__":
    demo.queue(api_open=False).launch(show_api=False, share=False)