Spaces:
Running
on
Zero
Running
on
Zero
upd
Browse files
ICEdit
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit 6e4f95590e5b56ca1313dc7f515a4d6bed49244c
|
app.py
CHANGED
@@ -1,94 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
-
import random
|
4 |
-
|
5 |
-
# import spaces #[uncomment to use ZeroGPU]
|
6 |
-
from diffusers import DiffusionPipeline
|
7 |
import torch
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
torch_dtype = torch.float16
|
14 |
-
else:
|
15 |
-
torch_dtype = torch.float32
|
16 |
-
|
17 |
-
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
18 |
-
pipe = pipe.to(device)
|
19 |
|
20 |
MAX_SEED = np.iinfo(np.int32).max
|
21 |
MAX_IMAGE_SIZE = 1024
|
22 |
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
if randomize_seed:
|
37 |
seed = random.randint(0, MAX_SEED)
|
38 |
|
39 |
-
generator = torch.Generator().manual_seed(seed)
|
40 |
-
|
41 |
image = pipe(
|
42 |
-
prompt=
|
43 |
-
|
|
|
|
|
|
|
44 |
guidance_scale=guidance_scale,
|
45 |
num_inference_steps=num_inference_steps,
|
46 |
-
|
47 |
-
height=height,
|
48 |
-
generator=generator,
|
49 |
).images[0]
|
50 |
|
51 |
-
|
52 |
-
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
examples = [
|
55 |
-
"
|
56 |
-
"
|
57 |
-
"
|
58 |
]
|
59 |
|
60 |
-
css
|
61 |
#col-container {
|
62 |
margin: 0 auto;
|
63 |
-
max-width:
|
64 |
}
|
65 |
"""
|
66 |
|
67 |
with gr.Blocks(css=css) as demo:
|
|
|
68 |
with gr.Column(elem_id="col-container"):
|
69 |
-
gr.Markdown("
|
70 |
-
|
|
|
|
|
71 |
with gr.Row():
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
with gr.Accordion("Advanced Settings", open=False):
|
85 |
-
|
86 |
-
label="Negative prompt",
|
87 |
-
max_lines=1,
|
88 |
-
placeholder="Enter a negative prompt",
|
89 |
-
visible=False,
|
90 |
-
)
|
91 |
-
|
92 |
seed = gr.Slider(
|
93 |
label="Seed",
|
94 |
minimum=0,
|
@@ -96,59 +147,52 @@ with gr.Blocks(css=css) as demo:
|
|
96 |
step=1,
|
97 |
value=0,
|
98 |
)
|
99 |
-
|
100 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
101 |
-
|
102 |
with gr.Row():
|
|
|
103 |
width = gr.Slider(
|
104 |
label="Width",
|
105 |
-
minimum=
|
106 |
maximum=MAX_IMAGE_SIZE,
|
107 |
step=32,
|
108 |
-
value=1024,
|
|
|
109 |
)
|
110 |
-
|
111 |
height = gr.Slider(
|
112 |
label="Height",
|
113 |
-
minimum=
|
114 |
maximum=MAX_IMAGE_SIZE,
|
115 |
step=32,
|
116 |
-
value=1024,
|
|
|
117 |
)
|
118 |
-
|
119 |
with gr.Row():
|
|
|
120 |
guidance_scale = gr.Slider(
|
121 |
-
label="Guidance
|
122 |
-
minimum=
|
123 |
-
maximum=
|
124 |
-
step=0.
|
125 |
-
value=
|
126 |
)
|
127 |
-
|
128 |
num_inference_steps = gr.Slider(
|
129 |
label="Number of inference steps",
|
130 |
minimum=1,
|
131 |
maximum=50,
|
132 |
step=1,
|
133 |
-
value=
|
134 |
)
|
135 |
|
136 |
-
gr.Examples(examples=examples, inputs=[prompt])
|
137 |
gr.on(
|
138 |
triggers=[run_button.click, prompt.submit],
|
139 |
-
fn=infer,
|
140 |
-
inputs=[
|
141 |
-
|
142 |
-
negative_prompt,
|
143 |
-
seed,
|
144 |
-
randomize_seed,
|
145 |
-
width,
|
146 |
-
height,
|
147 |
-
guidance_scale,
|
148 |
-
num_inference_steps,
|
149 |
-
],
|
150 |
-
outputs=[result, seed],
|
151 |
)
|
152 |
|
153 |
-
|
154 |
-
demo.launch()
|
|
|
1 |
+
'''
|
2 |
+
python scripts/gradio_demo.py
|
3 |
+
'''
|
4 |
+
|
5 |
+
import sys
|
6 |
+
import os
|
7 |
+
workspace_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "ICEdit/icedit"))
|
8 |
+
|
9 |
+
if workspace_dir not in sys.path:
|
10 |
+
sys.path.insert(0, workspace_dir)
|
11 |
+
|
12 |
+
from diffusers import FluxFillPipeline
|
13 |
import gradio as gr
|
14 |
import numpy as np
|
|
|
|
|
|
|
|
|
15 |
import torch
|
16 |
+
import spaces
|
17 |
+
import argparse
|
18 |
+
import random
|
19 |
+
from diffusers import FluxFillPipeline
|
20 |
+
from PIL import Image
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
MAX_SEED = np.iinfo(np.int32).max
|
23 |
MAX_IMAGE_SIZE = 1024
|
24 |
|
25 |
|
26 |
+
parser = argparse.ArgumentParser()
|
27 |
+
parser.add_argument("--port", type=int, default=7860, help="Port for the Gradio app")
|
28 |
+
parser.add_argument("--output-dir", type=str, default="gradio_results", help="Directory to save the output image")
|
29 |
+
parser.add_argument("--flux-path", type=str, default='black-forest-labs/flux.1-fill-dev', help="Path to the model")
|
30 |
+
parser.add_argument("--lora-path", type=str, default='sanaka87/ICEdit-MoE-LoRA', help="Path to the LoRA weights")
|
31 |
+
parser.add_argument("--enable-model-cpu-offload", action="store_true", help="Enable CPU offloading for the model")
|
32 |
+
args = parser.parse_args()
|
33 |
+
|
34 |
+
pipe = FluxFillPipeline.from_pretrained(args.flux_path, torch_dtype=torch.bfloat16)
|
35 |
+
pipe.load_lora_weights(args.lora_path)
|
36 |
+
|
37 |
+
if args.enable_model_cpu_offload:
|
38 |
+
pipe.enable_model_cpu_offload()
|
39 |
+
else:
|
40 |
+
pipe = pipe.to("cuda")
|
41 |
+
|
42 |
+
@spaces.GPU
|
43 |
+
def infer(edit_images,
|
44 |
+
prompt,
|
45 |
+
seed=666,
|
46 |
+
randomize_seed=False,
|
47 |
+
width=1024,
|
48 |
+
height=1024,
|
49 |
+
guidance_scale=50,
|
50 |
+
num_inference_steps=28,
|
51 |
+
progress=gr.Progress(track_tqdm=True)
|
52 |
):
|
53 |
+
|
54 |
+
image = edit_images["background"]
|
55 |
+
|
56 |
+
if image.size[0] != 512:
|
57 |
+
print("\033[93m[WARNING] We can only deal with the case where the image's width is 512.\033[0m")
|
58 |
+
new_width = 512
|
59 |
+
scale = new_width / image.size[0]
|
60 |
+
new_height = int(image.size[1] * scale)
|
61 |
+
new_height = (new_height // 8) * 8
|
62 |
+
image = image.resize((new_width, new_height))
|
63 |
+
print(f"\033[93m[WARNING] Resizing the image to {new_width} x {new_height}\033[0m")
|
64 |
+
|
65 |
+
image = image.convert("RGB")
|
66 |
+
width, height = image.size
|
67 |
+
image = image.resize((512, int(512 * height / width)))
|
68 |
+
combined_image = Image.new("RGB", (width * 2, height))
|
69 |
+
combined_image.paste(image, (0, 0))
|
70 |
+
mask_array = np.zeros((height, width * 2), dtype=np.uint8)
|
71 |
+
mask_array[:, width:] = 255
|
72 |
+
mask = Image.fromarray(mask_array)
|
73 |
+
instruction = f'A diptych with two side-by-side images of the same scene. On the right, the scene is exactly the same as on the left but {prompt}'
|
74 |
+
|
75 |
if randomize_seed:
|
76 |
seed = random.randint(0, MAX_SEED)
|
77 |
|
|
|
|
|
78 |
image = pipe(
|
79 |
+
prompt=instruction,
|
80 |
+
image=combined_image,
|
81 |
+
mask_image=mask,
|
82 |
+
height=height,
|
83 |
+
width=width*2,
|
84 |
guidance_scale=guidance_scale,
|
85 |
num_inference_steps=num_inference_steps,
|
86 |
+
generator=torch.Generator("cpu").manual_seed(seed)
|
|
|
|
|
87 |
).images[0]
|
88 |
|
89 |
+
w,h = image.size
|
90 |
+
image = image.crop((w//2, 0, w, h))
|
91 |
|
92 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
93 |
+
|
94 |
+
index = len(os.listdir(args.output_dir))
|
95 |
+
image.save(f"{args.output_dir}/result_{index}.png")
|
96 |
+
|
97 |
+
return image, seed
|
98 |
+
|
99 |
examples = [
|
100 |
+
"a tiny astronaut hatching from an egg on the moon",
|
101 |
+
"a cat holding a sign that says hello world",
|
102 |
+
"an anime illustration of a wiener schnitzel",
|
103 |
]
|
104 |
|
105 |
+
css="""
|
106 |
#col-container {
|
107 |
margin: 0 auto;
|
108 |
+
max-width: 1000px;
|
109 |
}
|
110 |
"""
|
111 |
|
112 |
with gr.Blocks(css=css) as demo:
|
113 |
+
|
114 |
with gr.Column(elem_id="col-container"):
|
115 |
+
gr.Markdown(f"""# IC-Edit
|
116 |
+
A demo for [IC-Edit](https://arxiv.org/pdf/2504.20690).
|
117 |
+
More **open-source**, with **lower costs**, **faster speed** (it takes about 9 seconds to process one image), and **powerful performance**.
|
118 |
+
""")
|
119 |
with gr.Row():
|
120 |
+
with gr.Column():
|
121 |
+
edit_image = gr.ImageEditor(
|
122 |
+
label='Upload and draw mask for inpainting',
|
123 |
+
type='pil',
|
124 |
+
sources=["upload", "webcam"],
|
125 |
+
image_mode='RGB',
|
126 |
+
layers=False,
|
127 |
+
brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"),
|
128 |
+
height=600
|
129 |
+
)
|
130 |
+
prompt = gr.Text(
|
131 |
+
label="Prompt",
|
132 |
+
show_label=False,
|
133 |
+
max_lines=1,
|
134 |
+
placeholder="Enter your prompt",
|
135 |
+
container=False,
|
136 |
+
)
|
137 |
+
run_button = gr.Button("Run")
|
138 |
+
|
139 |
+
result = gr.Image(label="Result", show_label=False)
|
140 |
+
|
141 |
with gr.Accordion("Advanced Settings", open=False):
|
142 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
seed = gr.Slider(
|
144 |
label="Seed",
|
145 |
minimum=0,
|
|
|
147 |
step=1,
|
148 |
value=0,
|
149 |
)
|
150 |
+
|
151 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
152 |
+
|
153 |
with gr.Row():
|
154 |
+
|
155 |
width = gr.Slider(
|
156 |
label="Width",
|
157 |
+
minimum=512,
|
158 |
maximum=MAX_IMAGE_SIZE,
|
159 |
step=32,
|
160 |
+
value=1024,
|
161 |
+
visible=False
|
162 |
)
|
163 |
+
|
164 |
height = gr.Slider(
|
165 |
label="Height",
|
166 |
+
minimum=512,
|
167 |
maximum=MAX_IMAGE_SIZE,
|
168 |
step=32,
|
169 |
+
value=1024,
|
170 |
+
visible=False
|
171 |
)
|
172 |
+
|
173 |
with gr.Row():
|
174 |
+
|
175 |
guidance_scale = gr.Slider(
|
176 |
+
label="Guidance Scale",
|
177 |
+
minimum=1,
|
178 |
+
maximum=50,
|
179 |
+
step=0.5,
|
180 |
+
value=50,
|
181 |
)
|
182 |
+
|
183 |
num_inference_steps = gr.Slider(
|
184 |
label="Number of inference steps",
|
185 |
minimum=1,
|
186 |
maximum=50,
|
187 |
step=1,
|
188 |
+
value=28,
|
189 |
)
|
190 |
|
|
|
191 |
gr.on(
|
192 |
triggers=[run_button.click, prompt.submit],
|
193 |
+
fn = infer,
|
194 |
+
inputs = [edit_image, prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
195 |
+
outputs = [result, seed]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
)
|
197 |
|
198 |
+
demo.launch(server_port=args.port)
|
|