Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -110,14 +110,11 @@ def get_seed(randomize_seed: bool, seed: int) -> int:
|
|
| 110 |
@spaces.GPU
|
| 111 |
def image_to_3d(
|
| 112 |
image: Image.Image,
|
| 113 |
-
multiimages: List[Tuple[Image.Image, str]],
|
| 114 |
-
is_multiimage: bool,
|
| 115 |
seed: int,
|
| 116 |
ss_guidance_strength: float,
|
| 117 |
ss_sampling_steps: int,
|
| 118 |
slat_guidance_strength: float,
|
| 119 |
slat_sampling_steps: int,
|
| 120 |
-
multiimage_algo: Literal["multidiffusion", "stochastic"],
|
| 121 |
req: gr.Request,
|
| 122 |
) -> Tuple[dict, str]:
|
| 123 |
"""
|
|
@@ -139,7 +136,6 @@ def image_to_3d(
|
|
| 139 |
str: The path to the video of the 3D model.
|
| 140 |
"""
|
| 141 |
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
|
| 142 |
-
if not is_multiimage:
|
| 143 |
outputs = pipeline.run(
|
| 144 |
image,
|
| 145 |
seed=seed,
|
|
@@ -154,22 +150,6 @@ def image_to_3d(
|
|
| 154 |
"cfg_strength": slat_guidance_strength,
|
| 155 |
},
|
| 156 |
)
|
| 157 |
-
else:
|
| 158 |
-
outputs = pipeline.run_multi_image(
|
| 159 |
-
[image[0] for image in multiimages],
|
| 160 |
-
seed=seed,
|
| 161 |
-
formats=["gaussian", "mesh"],
|
| 162 |
-
preprocess_image=False,
|
| 163 |
-
sparse_structure_sampler_params={
|
| 164 |
-
"steps": ss_sampling_steps,
|
| 165 |
-
"cfg_strength": ss_guidance_strength,
|
| 166 |
-
},
|
| 167 |
-
slat_sampler_params={
|
| 168 |
-
"steps": slat_sampling_steps,
|
| 169 |
-
"cfg_strength": slat_guidance_strength,
|
| 170 |
-
},
|
| 171 |
-
mode=multiimage_algo,
|
| 172 |
-
)
|
| 173 |
video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
|
| 174 |
video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
|
| 175 |
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
|
@@ -278,7 +258,6 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
|
|
| 278 |
with gr.Row():
|
| 279 |
slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=3.0, step=0.1)
|
| 280 |
slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
|
| 281 |
-
multiimage_algo = gr.Radio(["stochastic", "multidiffusion"], label="Multi-image Algorithm", value="stochastic")
|
| 282 |
|
| 283 |
generate_btn = gr.Button("Generate")
|
| 284 |
|
|
@@ -301,7 +280,6 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
|
|
| 301 |
download_glb = gr.DownloadButton(label="Download GLB", interactive=False)
|
| 302 |
download_gs = gr.DownloadButton(label="Download Gaussian", interactive=False)
|
| 303 |
|
| 304 |
-
is_multiimage = gr.State(False)
|
| 305 |
output_buf = gr.State()
|
| 306 |
|
| 307 |
# Example images at the bottom of the page
|
|
@@ -317,15 +295,6 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
|
|
| 317 |
run_on_click=True,
|
| 318 |
examples_per_page=64,
|
| 319 |
)
|
| 320 |
-
with gr.Row(visible=False) as multiimage_example:
|
| 321 |
-
examples_multi = gr.Examples(
|
| 322 |
-
examples=prepare_multi_example(),
|
| 323 |
-
inputs=[image_prompt],
|
| 324 |
-
fn=split_image,
|
| 325 |
-
outputs=[multiimage_prompt],
|
| 326 |
-
run_on_click=True,
|
| 327 |
-
examples_per_page=8,
|
| 328 |
-
)
|
| 329 |
|
| 330 |
# Handlers
|
| 331 |
demo.load(start_session)
|
|
@@ -333,11 +302,7 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
|
|
| 333 |
|
| 334 |
single_image_input_tab.select(
|
| 335 |
lambda: tuple([False, gr.Row.update(visible=True), gr.Row.update(visible=False)]),
|
| 336 |
-
outputs=[
|
| 337 |
-
)
|
| 338 |
-
multiimage_input_tab.select(
|
| 339 |
-
lambda: tuple([True, gr.Row.update(visible=False), gr.Row.update(visible=True)]),
|
| 340 |
-
outputs=[is_multiimage, single_image_example, multiimage_example]
|
| 341 |
)
|
| 342 |
|
| 343 |
image_prompt.upload(
|
|
@@ -345,11 +310,6 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
|
|
| 345 |
inputs=[image_prompt],
|
| 346 |
outputs=[image_prompt],
|
| 347 |
)
|
| 348 |
-
multiimage_prompt.upload(
|
| 349 |
-
preprocess_images,
|
| 350 |
-
inputs=[multiimage_prompt],
|
| 351 |
-
outputs=[multiimage_prompt],
|
| 352 |
-
)
|
| 353 |
|
| 354 |
generate_btn.click(
|
| 355 |
get_seed,
|
|
@@ -357,7 +317,7 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
|
|
| 357 |
outputs=[seed],
|
| 358 |
).then(
|
| 359 |
image_to_3d,
|
| 360 |
-
inputs=[image_prompt,
|
| 361 |
outputs=[output_buf, video_output],
|
| 362 |
).then(
|
| 363 |
lambda: tuple([gr.Button(interactive=True), gr.Button(interactive=True)]),
|
|
|
|
| 110 |
@spaces.GPU
|
| 111 |
def image_to_3d(
|
| 112 |
image: Image.Image,
|
|
|
|
|
|
|
| 113 |
seed: int,
|
| 114 |
ss_guidance_strength: float,
|
| 115 |
ss_sampling_steps: int,
|
| 116 |
slat_guidance_strength: float,
|
| 117 |
slat_sampling_steps: int,
|
|
|
|
| 118 |
req: gr.Request,
|
| 119 |
) -> Tuple[dict, str]:
|
| 120 |
"""
|
|
|
|
| 136 |
str: The path to the video of the 3D model.
|
| 137 |
"""
|
| 138 |
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
|
|
|
|
| 139 |
outputs = pipeline.run(
|
| 140 |
image,
|
| 141 |
seed=seed,
|
|
|
|
| 150 |
"cfg_strength": slat_guidance_strength,
|
| 151 |
},
|
| 152 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
|
| 154 |
video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
|
| 155 |
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
|
|
|
| 258 |
with gr.Row():
|
| 259 |
slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=3.0, step=0.1)
|
| 260 |
slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
|
|
|
|
| 261 |
|
| 262 |
generate_btn = gr.Button("Generate")
|
| 263 |
|
|
|
|
| 280 |
download_glb = gr.DownloadButton(label="Download GLB", interactive=False)
|
| 281 |
download_gs = gr.DownloadButton(label="Download Gaussian", interactive=False)
|
| 282 |
|
|
|
|
| 283 |
output_buf = gr.State()
|
| 284 |
|
| 285 |
# Example images at the bottom of the page
|
|
|
|
| 295 |
run_on_click=True,
|
| 296 |
examples_per_page=64,
|
| 297 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 298 |
|
| 299 |
# Handlers
|
| 300 |
demo.load(start_session)
|
|
|
|
| 302 |
|
| 303 |
single_image_input_tab.select(
|
| 304 |
lambda: tuple([False, gr.Row.update(visible=True), gr.Row.update(visible=False)]),
|
| 305 |
+
outputs=[single_image_example]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
)
|
| 307 |
|
| 308 |
image_prompt.upload(
|
|
|
|
| 310 |
inputs=[image_prompt],
|
| 311 |
outputs=[image_prompt],
|
| 312 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 313 |
|
| 314 |
generate_btn.click(
|
| 315 |
get_seed,
|
|
|
|
| 317 |
outputs=[seed],
|
| 318 |
).then(
|
| 319 |
image_to_3d,
|
| 320 |
+
inputs=[image_prompt, seed, ss_guidance_strength, ss_sampling_steps, slat_guidance_strength, slat_sampling_steps],
|
| 321 |
outputs=[output_buf, video_output],
|
| 322 |
).then(
|
| 323 |
lambda: tuple([gr.Button(interactive=True), gr.Button(interactive=True)]),
|