Spaces:
Sleeping
Sleeping
FrozenBurning
commited on
Commit
•
fb96ff6
1
Parent(s):
80596aa
add glb vis
Browse files- app.py +18 -9
- requirements.txt +3 -2
app.py
CHANGED
@@ -12,6 +12,7 @@ import torch.nn.functional as F
|
|
12 |
import torchvision.transforms.functional as TF
|
13 |
import rembg
|
14 |
import gradio as gr
|
|
|
15 |
from dva.io import load_from_config
|
16 |
from dva.ray_marcher import RayMarcher
|
17 |
from dva.visualize import visualize_primvolume, visualize_video_primvolume
|
@@ -75,7 +76,7 @@ model_primx = load_from_config(config.model)
|
|
75 |
rembg_session = rembg.new_session()
|
76 |
|
77 |
# process function
|
78 |
-
def process(input_image, input_num_steps
|
79 |
# seed
|
80 |
torch.manual_seed(input_seed)
|
81 |
|
@@ -175,23 +176,31 @@ with block:
|
|
175 |
# input image
|
176 |
input_image = gr.Image(label="image", type='pil')
|
177 |
# inference steps
|
178 |
-
input_num_steps = gr.
|
179 |
# random seed
|
180 |
-
input_cfg = gr.Slider(label="CFG scale", minimum=0, maximum=15, step=
|
181 |
# random seed
|
182 |
-
input_seed = gr.Slider(label="random seed", minimum=0, maximum=
|
183 |
# gen button
|
184 |
button_gen = gr.Button("Generate")
|
185 |
|
186 |
with gr.Column(scale=1):
|
187 |
with gr.Tab("Video"):
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
|
|
192 |
with gr.Tab("GLB"):
|
193 |
# glb file
|
194 |
-
output_glb =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
|
196 |
button_gen.click(process, inputs=[input_image, input_num_steps, input_seed, input_cfg], outputs=[output_rgb_video, output_prim_video, output_mat_video, output_glb])
|
197 |
|
|
|
12 |
import torchvision.transforms.functional as TF
|
13 |
import rembg
|
14 |
import gradio as gr
|
15 |
+
from gradio_litmodel3d import LitModel3D
|
16 |
from dva.io import load_from_config
|
17 |
from dva.ray_marcher import RayMarcher
|
18 |
from dva.visualize import visualize_primvolume, visualize_video_primvolume
|
|
|
76 |
rembg_session = rembg.new_session()
|
77 |
|
78 |
# process function
|
79 |
+
def process(input_image, input_num_steps, input_seed=42, input_cfg=6.0):
|
80 |
# seed
|
81 |
torch.manual_seed(input_seed)
|
82 |
|
|
|
176 |
# input image
|
177 |
input_image = gr.Image(label="image", type='pil')
|
178 |
# inference steps
|
179 |
+
input_num_steps = gr.Radio(choices=[25, 50, 100], label="DDIM steps")
|
180 |
# random seed
|
181 |
+
input_cfg = gr.Slider(label="CFG scale", minimum=0, maximum=15, step=0.5, value=6)
|
182 |
# random seed
|
183 |
+
input_seed = gr.Slider(label="random seed", minimum=0, maximum=10000, step=1, value=42)
|
184 |
# gen button
|
185 |
button_gen = gr.Button("Generate")
|
186 |
|
187 |
with gr.Column(scale=1):
|
188 |
with gr.Tab("Video"):
|
189 |
+
with gr.Row():
|
190 |
+
# final video results
|
191 |
+
output_rgb_video = gr.Video(label="video")
|
192 |
+
output_prim_video = gr.Video(label="video")
|
193 |
+
output_mat_video = gr.Video(label="video")
|
194 |
with gr.Tab("GLB"):
|
195 |
# glb file
|
196 |
+
output_glb = LitModel3D(
|
197 |
+
label="3D GLB Model",
|
198 |
+
visible=True,
|
199 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
200 |
+
tonemapping="aces",
|
201 |
+
contrast=1.0,
|
202 |
+
scale=1.0,
|
203 |
+
)
|
204 |
|
205 |
button_gen.click(process, inputs=[input_image, input_num_steps, input_seed, input_cfg], outputs=[output_rgb_video, output_prim_video, output_mat_video, output_glb])
|
206 |
|
requirements.txt
CHANGED
@@ -13,11 +13,12 @@ git+https://github.com/NVlabs/nvdiffrast/
|
|
13 |
scikit-learn
|
14 |
open_clip_torch
|
15 |
triton==2.1.0
|
16 |
-
rembg
|
17 |
gradio
|
18 |
tqdm
|
19 |
transformers==4.40.1
|
20 |
diffusers==0.19.3
|
21 |
ninja
|
22 |
imageio
|
23 |
-
imageio-ffmpeg
|
|
|
|
13 |
scikit-learn
|
14 |
open_clip_torch
|
15 |
triton==2.1.0
|
16 |
+
rembg[gpu]
|
17 |
gradio
|
18 |
tqdm
|
19 |
transformers==4.40.1
|
20 |
diffusers==0.19.3
|
21 |
ninja
|
22 |
imageio
|
23 |
+
imageio-ffmpeg
|
24 |
+
gradio-litmodel3d==0.0.1
|