FrozenBurning commited on
Commit
93bf50d
1 Parent(s): fb96ff6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -13
app.py CHANGED
@@ -84,7 +84,6 @@ def process(input_image, input_num_steps, input_seed=42, input_cfg=6.0):
84
  output_rgb_video_path = os.path.join(config.output_dir, GRADIO_RGB_VIDEO_PATH)
85
  output_prim_video_path = os.path.join(config.output_dir, GRADIO_PRIM_VIDEO_PATH)
86
  output_mat_video_path = os.path.join(config.output_dir, GRADIO_MAT_VIDEO_PATH)
87
- output_glb_path = os.path.join(config.output_dir, GRADIO_GLB_PATH)
88
 
89
  respacing = "ddim{}".format(input_num_steps)
90
  diffusion = create_diffusion(timestep_respacing=respacing, **config.diffusion)
@@ -139,7 +138,14 @@ def process(input_image, input_num_steps, input_seed=42, input_cfg=6.0):
139
  prim_params = {'srt_param': recon_srt_param[0].detach().cpu(), 'feat_param': recon_feat_param[0].detach().cpu()}
140
  torch.save({'model_state_dict': prim_params}, "{}/denoised.pt".format(config.output_dir))
141
 
 
 
 
142
  # exporting GLB mesh
 
 
 
 
143
  denoise_param_path = os.path.join(config.output_dir, 'denoised.pt')
144
  primx_ckpt_weight = torch.load(denoise_param_path, map_location='cpu')['model_state_dict']
145
  model_primx.load_state_dict(primx_ckpt_weight)
@@ -148,8 +154,7 @@ def process(input_image, input_num_steps, input_seed=42, input_cfg=6.0):
148
  with torch.no_grad():
149
  model_primx.srt_param[:, 1:4] *= 0.85
150
  extract_texmesh(config.inference, model_primx, config.output_dir, device)
151
-
152
- return output_rgb_video_path, output_prim_video_path, output_mat_video_path, output_glb_path
153
 
154
  # gradio UI
155
  _TITLE = '''3DTopia-XL'''
@@ -183,15 +188,15 @@ with block:
183
  input_seed = gr.Slider(label="random seed", minimum=0, maximum=10000, step=1, value=42)
184
  # gen button
185
  button_gen = gr.Button("Generate")
 
186
 
187
  with gr.Column(scale=1):
188
- with gr.Tab("Video"):
189
- with gr.Row():
190
- # final video results
191
- output_rgb_video = gr.Video(label="video")
192
- output_prim_video = gr.Video(label="video")
193
- output_mat_video = gr.Video(label="video")
194
- with gr.Tab("GLB"):
195
  # glb file
196
  output_glb = LitModel3D(
197
  label="3D GLB Model",
@@ -201,8 +206,33 @@ with block:
201
  contrast=1.0,
202
  scale=1.0,
203
  )
204
-
205
- button_gen.click(process, inputs=[input_image, input_num_steps, input_seed, input_cfg], outputs=[output_rgb_video, output_prim_video, output_mat_video, output_glb])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
 
207
  gr.Examples(
208
  examples=[
@@ -211,7 +241,7 @@ with block:
211
  "assets/examples/shuai_panda_notail.png",
212
  ],
213
  inputs=[input_image],
214
- outputs=[output_rgb_video, output_prim_video, output_mat_video, output_glb],
215
  fn=lambda x: process(input_image=x),
216
  cache_examples=False,
217
  label='Single Image to 3D PBR Asset'
 
84
  output_rgb_video_path = os.path.join(config.output_dir, GRADIO_RGB_VIDEO_PATH)
85
  output_prim_video_path = os.path.join(config.output_dir, GRADIO_PRIM_VIDEO_PATH)
86
  output_mat_video_path = os.path.join(config.output_dir, GRADIO_MAT_VIDEO_PATH)
 
87
 
88
  respacing = "ddim{}".format(input_num_steps)
89
  diffusion = create_diffusion(timestep_respacing=respacing, **config.diffusion)
 
138
  prim_params = {'srt_param': recon_srt_param[0].detach().cpu(), 'feat_param': recon_feat_param[0].detach().cpu()}
139
  torch.save({'model_state_dict': prim_params}, "{}/denoised.pt".format(config.output_dir))
140
 
141
+ return output_rgb_video_path, output_prim_video_path, output_mat_video_path, gr.update(interactive=True)
142
+
143
+ def export_mesh(remesh=False, decimate=100000, mc_resolution=256):
144
  # exporting GLB mesh
145
+ output_glb_path = os.path.join(config.output_dir, GRADIO_GLB_PATH)
146
+ config.inference.remesh = remesh
147
+ config.inference.decimate = decimate
148
+ config.inference.mc_resolution = mc_resolution
149
  denoise_param_path = os.path.join(config.output_dir, 'denoised.pt')
150
  primx_ckpt_weight = torch.load(denoise_param_path, map_location='cpu')['model_state_dict']
151
  model_primx.load_state_dict(primx_ckpt_weight)
 
154
  with torch.no_grad():
155
  model_primx.srt_param[:, 1:4] *= 0.85
156
  extract_texmesh(config.inference, model_primx, config.output_dir, device)
157
+ return output_glb_path, gr.update(visible=True)
 
158
 
159
  # gradio UI
160
  _TITLE = '''3DTopia-XL'''
 
188
  input_seed = gr.Slider(label="random seed", minimum=0, maximum=10000, step=1, value=42)
189
  # gen button
190
  button_gen = gr.Button("Generate")
191
+ export_glb_btn = gr.Button(value="Export GLB", interactive=False)
192
 
193
  with gr.Column(scale=1):
194
+ with gr.Row():
195
+ # final video results
196
+ output_rgb_video = gr.Video(label="RGB")
197
+ output_prim_video = gr.Video(label="Primitives")
198
+ output_mat_video = gr.Video(label="Material")
199
+ with gr.Row():
 
200
  # glb file
201
  output_glb = LitModel3D(
202
  label="3D GLB Model",
 
206
  contrast=1.0,
207
  scale=1.0,
208
  )
209
+ with gr.Column(visible=False, scale=1.0) as hdr_row:
210
+ gr.Markdown("""## HDR Environment Map
211
+
212
+ Select / Upload an HDR environment map to light the 3D model.
213
+ """)
214
+ with gr.Row():
215
+ hdr_illumination_file = gr.File(
216
+ label="HDR Envmap", file_types=[".hdr"], file_count="single"
217
+ )
218
+ example_hdris = [
219
+ os.path.join("assets/hdri", f)
220
+ for f in os.listdir("assets/hdri")
221
+ ]
222
+ hdr_illumination_example = gr.Examples(
223
+ examples=example_hdris,
224
+ inputs=hdr_illumination_file,
225
+ )
226
+
227
+ hdr_illumination_file.change(
228
+ lambda x: gr.update(env_map=x.name if x is not None else None),
229
+ inputs=hdr_illumination_file,
230
+ outputs=[output_glb],
231
+ )
232
+
233
+ button_gen.click(process, inputs=[input_image, input_num_steps, input_seed, input_cfg], outputs=[output_rgb_video, output_prim_video, output_mat_video, export_glb_btn])
234
+
235
+ export_glb_btn.click(export_mesh, inputs=[], outputs=[output_glb, hdr_row])
236
 
237
  gr.Examples(
238
  examples=[
 
241
  "assets/examples/shuai_panda_notail.png",
242
  ],
243
  inputs=[input_image],
244
+ outputs=[output_rgb_video, output_prim_video, output_mat_video, export_glb_btn],
245
  fn=lambda x: process(input_image=x),
246
  cache_examples=False,
247
  label='Single Image to 3D PBR Asset'