Update app.py
Browse files
app.py
CHANGED
@@ -10,12 +10,6 @@ import torch
|
|
10 |
from PIL import Image
|
11 |
from diffusers import FluxInpaintPipeline
|
12 |
|
13 |
-
MARKDOWN = """
|
14 |
-
# FLUX.1 Inpainting 🔥
|
15 |
-
Shoutout to [Black Forest Labs](https://huggingface.co/black-forest-labs) team for
|
16 |
-
creating this amazing model, and a big thanks to [Gothos](https://github.com/Gothos)
|
17 |
-
for taking it to the next level by enabling inpainting with the FLUX.
|
18 |
-
"""
|
19 |
|
20 |
MAX_SEED = np.iinfo(np.int32).max
|
21 |
IMAGE_SIZE = 1024
|
@@ -37,33 +31,6 @@ def remove_background(image: Image.Image, threshold: int = 50) -> Image.Image:
|
|
37 |
return image
|
38 |
|
39 |
|
40 |
-
EXAMPLES = [
|
41 |
-
[
|
42 |
-
{
|
43 |
-
"background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
|
44 |
-
"layers": [remove_background(Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-2.png", stream=True).raw))],
|
45 |
-
"composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-2.png", stream=True).raw),
|
46 |
-
},
|
47 |
-
"little lion",
|
48 |
-
42,
|
49 |
-
False,
|
50 |
-
0.85,
|
51 |
-
30
|
52 |
-
],
|
53 |
-
[
|
54 |
-
{
|
55 |
-
"background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
|
56 |
-
"layers": [remove_background(Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-3.png", stream=True).raw))],
|
57 |
-
"composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-3.png", stream=True).raw),
|
58 |
-
},
|
59 |
-
"tribal tattoos",
|
60 |
-
42,
|
61 |
-
False,
|
62 |
-
0.85,
|
63 |
-
30
|
64 |
-
]
|
65 |
-
]
|
66 |
-
|
67 |
pipe = FluxInpaintPipeline.from_pretrained(
|
68 |
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
|
69 |
|
@@ -134,6 +101,7 @@ def process(
|
|
134 |
height=height,
|
135 |
strength=strength_slider,
|
136 |
generator=generator,
|
|
|
137 |
num_inference_steps=num_inference_steps_slider
|
138 |
).images[0]
|
139 |
print('INFERENCE DONE')
|
@@ -205,7 +173,6 @@ with gr.Blocks() as demo:
|
|
205 |
with gr.Row():
|
206 |
gr.Examples(
|
207 |
fn=process,
|
208 |
-
examples=EXAMPLES,
|
209 |
inputs=[
|
210 |
input_image_editor_component,
|
211 |
input_text_component,
|
|
|
10 |
from PIL import Image
|
11 |
from diffusers import FluxInpaintPipeline
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
MAX_SEED = np.iinfo(np.int32).max
|
15 |
IMAGE_SIZE = 1024
|
|
|
31 |
return image
|
32 |
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
pipe = FluxInpaintPipeline.from_pretrained(
|
35 |
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
|
36 |
|
|
|
101 |
height=height,
|
102 |
strength=strength_slider,
|
103 |
generator=generator,
|
104 |
+
joint_attention_kwargs={"scale": 1.2},
|
105 |
num_inference_steps=num_inference_steps_slider
|
106 |
).images[0]
|
107 |
print('INFERENCE DONE')
|
|
|
173 |
with gr.Row():
|
174 |
gr.Examples(
|
175 |
fn=process,
|
|
|
176 |
inputs=[
|
177 |
input_image_editor_component,
|
178 |
input_text_component,
|