blanchon commited on
Commit
0e2dbb6
·
1 Parent(s): a6264e6
Files changed (4) hide show
  1. README.md +25 -8
  2. app.py +285 -0
  3. pyproject.toml +18 -0
  4. requirements.txt +10 -0
README.md CHANGED
@@ -1,13 +1,30 @@
1
  ---
2
- title: FurnitureBlendingDemo
3
- emoji: 🐢
4
- colorFrom: purple
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.15.0
 
 
8
  app_file: app.py
9
- pinned: false
10
- short_description: Furniture Blending Demo
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
  ---
2
+ title: FurnitureInpaintingDemo
3
+ emoji: 🌖
4
+ colorFrom: pink
5
+ colorTo: red
6
  sdk: gradio
7
+ python_version: 3.12
8
+ sdk_version: 5.12.0
9
+ suggested_hardware: a100-large
10
  app_file: app.py
11
+ # fullWidth: true
12
+ # header: mini
13
+ # models: blanchon/VirtualUnstagingModel
14
+ # datasets: blanchon/VirtualUnstagingDataset
15
+ tags:
16
+ - image-generation
17
+ - image-to-image
18
+ - furniture
19
+ - virtual-staging
20
+ - home-decor
21
+ - home-design
22
+ pinned: true
23
+ # preload_from_hub:
24
+ # - blanchon/VirtualUnstagingModel
25
+ license: mit
26
  ---
27
 
28
+ # VirtualUnstaging
29
+
30
+ ...
app.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ from typing import cast
4
+ import torch
5
+ from PIL import Image, ImageDraw
6
+ from diffusers import DiffusionPipeline
7
+ import gradio as gr
8
+ from gradio.components.image_editor import EditorValue
9
+ import spaces
10
+
11
+ DEVICE = "cuda"
12
+
13
+ MAIN_MODEL_REPO_ID = os.getenv("MAIN_MODEL_REPO_ID", None)
14
+ SUB_MODEL_REPO_ID = os.getenv("SUB_MODEL_REPO_ID", None)
15
+ SUB_MODEL_SUBFOLDER = os.getenv("SUB_MODEL_SUBFOLDER", None)
16
+
17
+ if MAIN_MODEL_REPO_ID is None:
18
+ raise ValueError("MAIN_MODEL_REPO_ID is not set")
19
+ if SUB_MODEL_REPO_ID is None:
20
+ raise ValueError("SUB_MODEL_REPO_ID is not set")
21
+ if SUB_MODEL_SUBFOLDER is None:
22
+ raise ValueError("SUB_MODEL_SUBFOLDER is not set")
23
+
24
+ pipeline = DiffusionPipeline.from_pretrained(
25
+ MAIN_MODEL_REPO_ID,
26
+ torch_dtype=torch.bfloat16,
27
+ custom_pipeline=SUB_MODEL_REPO_ID,
28
+ ).to(DEVICE)
29
+ pipeline.post_init()
30
+
31
+
32
+ def crop_divisible_by_16(image: Image.Image) -> Image.Image:
33
+ w, h = image.size
34
+ w = w - w % 16
35
+ h = h - h % 16
36
+ return image.crop((0, 0, w, h))
37
+
38
+
39
+ @spaces.GPU(duration=150)
40
+ def predict(
41
+ image_and_mask: EditorValue,
42
+ furniture_reference: Image.Image | None,
43
+ seed: int = 0,
44
+ num_inference_steps: int = 28,
45
+ condition_size: int = 512,
46
+ target_size: int = 512,
47
+ condition_scale: float = 1.0,
48
+ progress: gr.Progress = gr.Progress(track_tqdm=True), # noqa: ARG001, B008
49
+ ) -> Image.Image | None:
50
+ # ) -> tuple[Image.Image, Image.Image] | None:
51
+ if not image_and_mask:
52
+ gr.Info("Please upload an image and draw a mask")
53
+ return None
54
+ if not furniture_reference:
55
+ gr.Info("Please upload a furniture reference image")
56
+ return None
57
+
58
+ pipeline.load(
59
+ SUB_MODEL_REPO_ID,
60
+ subfolder=SUB_MODEL_SUBFOLDER,
61
+ )
62
+
63
+ image_np = image_and_mask["background"]
64
+ image_np = cast(np.ndarray, image_np)
65
+
66
+ # If the image is empty, return None
67
+ if np.sum(image_np) == 0:
68
+ gr.Info("Please upload an image")
69
+ return None
70
+
71
+ alpha_channel = image_and_mask["layers"][0]
72
+ alpha_channel = cast(np.ndarray, alpha_channel)
73
+ mask_np = np.where(alpha_channel[:, :, 3] == 0, 0, 255).astype(np.uint8)
74
+
75
+ # if mask_np is empty, return None
76
+ if np.sum(mask_np) == 0:
77
+ gr.Info("Please mark the areas you want to remove")
78
+ return None
79
+
80
+ pipeline.load(
81
+ SUB_MODEL_REPO_ID,
82
+ subfolder=SUB_MODEL_SUBFOLDER,
83
+ )
84
+
85
+ mask_image = Image.fromarray(mask_np).convert("L")
86
+ mask_image.thumbnail((target_size, target_size))
87
+ mask_image_bbox = mask_image.getbbox()
88
+
89
+ target_image = Image.fromarray(image_np).convert("RGB")
90
+ target_image.thumbnail((target_size, target_size))
91
+ target_image_size = target_image.size
92
+
93
+ condition_image = Image.new("RGB", (target_size, target_size), (255, 255, 255))
94
+ condition_image.paste(target_image, (0, 0))
95
+
96
+ # Fill all the bbox area with 255
97
+ draw = ImageDraw.Draw(condition_image)
98
+ draw.rectangle(mask_image_bbox, fill="white", outline="white", width=10)
99
+
100
+ # # Resize the furniture_reference so it fit in the bbox and overlay it on the condition_image
101
+ bbox_width = mask_image_bbox[2] - mask_image_bbox[0]
102
+ bbox_height = mask_image_bbox[3] - mask_image_bbox[1]
103
+ furniture_reference.thumbnail((bbox_width, bbox_height))
104
+ # Paste at the bbox center
105
+ condition_image.paste(
106
+ furniture_reference,
107
+ (
108
+ mask_image_bbox[0] + (bbox_width - furniture_reference.width) // 2,
109
+ mask_image_bbox[1] + (bbox_height - furniture_reference.height) // 2,
110
+ ),
111
+ )
112
+
113
+ # Save condition image
114
+ condition_image.save("condition_image.png")
115
+
116
+ generator = torch.Generator(device="cpu").manual_seed(seed)
117
+
118
+ final_image = pipeline(
119
+ condition_image=condition_image,
120
+ prompt="",
121
+ num_inference_steps=num_inference_steps,
122
+ height=target_size,
123
+ width=target_size,
124
+ union_cond_attn=True,
125
+ add_cond_attn=False,
126
+ latent_lora=False,
127
+ default_lora=False,
128
+ condition_scale=condition_scale,
129
+ generator=generator,
130
+ max_sequence_length=512,
131
+ ).images[0]
132
+
133
+ final_image_crop = final_image.crop((
134
+ 0,
135
+ 0,
136
+ target_image_size[0],
137
+ target_image_size[1],
138
+ ))
139
+
140
+ return final_image_crop
141
+
142
+
143
+ intro_markdown = r"""
144
+ # Furniture Inpainting Demo
145
+ """
146
+
147
+ css = r"""
148
+ #col-left {
149
+ margin: 0 auto;
150
+ max-width: 430px;
151
+ }
152
+ #col-mid {
153
+ margin: 0 auto;
154
+ max-width: 430px;
155
+ }
156
+ #col-right {
157
+ margin: 0 auto;
158
+ max-width: 430px;
159
+ }
160
+ #col-showcase {
161
+ margin: 0 auto;
162
+ max-width: 1100px;
163
+ }
164
+ """
165
+
166
+
167
+ with gr.Blocks(css=css) as demo:
168
+ gr.Markdown(intro_markdown)
169
+
170
+ with gr.Row() as content:
171
+ with gr.Column(elem_id="col-left"):
172
+ gr.HTML(
173
+ """
174
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
175
+ <div>
176
+ Step 1. Upload a room image ⬇️
177
+ </div>
178
+ </div>
179
+ """,
180
+ max_height=50,
181
+ )
182
+ image_and_mask = gr.ImageMask(
183
+ label="Image and Mask",
184
+ layers=False,
185
+ height="full",
186
+ width="full",
187
+ show_fullscreen_button=False,
188
+ sources=["upload"],
189
+ show_download_button=False,
190
+ interactive=True,
191
+ brush=gr.Brush(default_size=75, colors=["#000000"], color_mode="fixed"),
192
+ transforms=[],
193
+ )
194
+ with gr.Column(elem_id="col-mid"):
195
+ gr.HTML(
196
+ """
197
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
198
+ <div>
199
+ Step 2. Upload a furniture reference image ⬇️
200
+ </div>
201
+ </div>
202
+ """,
203
+ max_height=50,
204
+ )
205
+ condition_image = gr.Image(
206
+ label="Furniture Reference",
207
+ type="pil",
208
+ sources=["upload"],
209
+ image_mode="RGB",
210
+ )
211
+ with gr.Column(elem_id="col-right"):
212
+ gr.HTML(
213
+ """
214
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
215
+ <div>
216
+ Step 3. Press Run to launch the generation
217
+ </div>
218
+ </div>
219
+ """,
220
+ max_height=50,
221
+ )
222
+ # image_slider = ImageSlider(
223
+ # label="Result",
224
+ # interactive=False,
225
+ # )
226
+ result = gr.Image(label="Result")
227
+ run_button = gr.Button("Run")
228
+
229
+ with gr.Accordion("Advanced Settings", open=False):
230
+ seed = gr.Slider(
231
+ label="Seed",
232
+ minimum=0,
233
+ maximum=100_000,
234
+ step=1,
235
+ value=0,
236
+ )
237
+ condition_scale = gr.Slider(
238
+ label="Condition Scale",
239
+ minimum=-10.0,
240
+ maximum=10.0,
241
+ step=0.10,
242
+ value=1.0,
243
+ )
244
+ with gr.Column():
245
+ condition_size = gr.Slider(
246
+ label="Condition Size",
247
+ minimum=256,
248
+ maximum=1024,
249
+ step=128,
250
+ value=512,
251
+ )
252
+
253
+ target_size = gr.Slider(
254
+ label="Target Size",
255
+ minimum=256,
256
+ maximum=1024,
257
+ step=128,
258
+ value=512,
259
+ )
260
+
261
+ num_inference_steps = gr.Slider(
262
+ label="Number of inference steps",
263
+ minimum=1,
264
+ maximum=50,
265
+ step=1,
266
+ value=28,
267
+ )
268
+
269
+ run_button.click(
270
+ fn=predict,
271
+ inputs=[
272
+ image_and_mask,
273
+ condition_image,
274
+ seed,
275
+ num_inference_steps,
276
+ condition_size,
277
+ target_size,
278
+ condition_scale,
279
+ ],
280
+ # outputs=[image_slider],
281
+ outputs=[result],
282
+ )
283
+
284
+
285
+ demo.launch()
pyproject.toml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "VirtualStaging"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12"
7
+ dependencies = [
8
+ "accelerate>=1.2.1",
9
+ "diffusers==0.31.0",
10
+ "gradio>=5.12.0",
11
+ "gradio-imageslider>=0.0.20",
12
+ "peft>=0.14.0",
13
+ "pillow>=11.1.0",
14
+ "safetensors>=0.5.2",
15
+ "sentencepiece>=0.2.0",
16
+ "spaces>=0.32.0",
17
+ "transformers>=4.48.0",
18
+ ]
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ diffusers
2
+ transformers
3
+ accelerate
4
+ safetensors
5
+ sentencepiece
6
+ peft
7
+ gradio
8
+ spaces
9
+ pillow
10
+ gradio_imageslider