cronos3k commited on
Commit
238c028
·
verified ·
1 Parent(s): 811784d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -219
app.py CHANGED
@@ -15,231 +15,48 @@ from trellis.utils import render_utils, postprocessing_utils
15
  from gradio_litmodel3d import LitModel3D
16
 
17
 
18
- MAX_SEED = np.iinfo(np.int32).max
19
- TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
20
- os.makedirs(TMP_DIR, exist_ok=True)
21
-
22
-
23
- def start_session(req: gr.Request):
24
- user_dir = os.path.join(TMP_DIR, str(req.session_hash))
25
- print(f'Creating user directory: {user_dir}')
26
- os.makedirs(user_dir, exist_ok=True)
27
-
28
- def end_session(req: gr.Request):
29
- user_dir = os.path.join(TMP_DIR, str(req.session_hash))
30
- print(f'Removing user directory: {user_dir}')
31
- shutil.rmtree(user_dir)
32
-
33
- def preprocess_image(image: Image.Image) -> Tuple[str, Image.Image]:
34
- processed_image = pipeline.preprocess_image(image)
35
- return processed_image
36
-
37
- def pack_state(gs: Gaussian, mesh: MeshExtractResult, trial_id: str) -> dict:
38
- return {
39
- 'gaussian': {
40
- **gs.init_params,
41
- '_xyz': gs._xyz.cpu().numpy(),
42
- '_features_dc': gs._features_dc.cpu().numpy(),
43
- '_scaling': gs._scaling.cpu().numpy(),
44
- '_rotation': gs._rotation.cpu().numpy(),
45
- '_opacity': gs._opacity.cpu().numpy(),
46
- },
47
- 'mesh': {
48
- 'vertices': mesh.vertices.cpu().numpy(),
49
- 'faces': mesh.faces.cpu().numpy(),
50
- },
51
- 'trial_id': trial_id,
52
- }
53
-
54
- def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
55
- gs = Gaussian(
56
- aabb=state['gaussian']['aabb'],
57
- sh_degree=state['gaussian']['sh_degree'],
58
- mininum_kernel_size=state['gaussian']['mininum_kernel_size'],
59
- scaling_bias=state['gaussian']['scaling_bias'],
60
- opacity_bias=state['gaussian']['opacity_bias'],
61
- scaling_activation=state['gaussian']['scaling_activation'],
62
- )
63
- gs._xyz = torch.tensor(state['gaussian']['_xyz'], device='cuda')
64
- gs._features_dc = torch.tensor(state['gaussian']['_features_dc'], device='cuda')
65
- gs._scaling = torch.tensor(state['gaussian']['_scaling'], device='cuda')
66
- gs._rotation = torch.tensor(state['gaussian']['_rotation'], device='cuda')
67
- gs._opacity = torch.tensor(state['gaussian']['_opacity'], device='cuda')
68
-
69
- mesh = edict(
70
- vertices=torch.tensor(state['mesh']['vertices'], device='cuda'),
71
- faces=torch.tensor(state['mesh']['faces'], device='cuda'),
72
- )
73
-
74
- return gs, mesh, state['trial_id']
75
-
76
- def get_seed(randomize_seed: bool, seed: int) -> int:
77
- return np.random.randint(0, MAX_SEED) if randomize_seed else seed
78
-
79
- def image_to_3d(
80
- image: Image.Image,
81
- seed: int,
82
- ss_guidance_strength: float,
83
- ss_sampling_steps: int,
84
- slat_guidance_strength: float,
85
- slat_sampling_steps: int,
86
- req: gr.Request,
87
- ) -> Tuple[dict, str, str, str]:
88
- user_dir = os.path.join(TMP_DIR, str(req.session_hash))
89
- outputs = pipeline.run(
90
- image,
91
- seed=seed,
92
- formats=["gaussian", "mesh"],
93
- preprocess_image=False,
94
- sparse_structure_sampler_params={
95
- "steps": ss_sampling_steps,
96
- "cfg_strength": ss_guidance_strength,
97
- },
98
- slat_sampler_params={
99
- "steps": slat_sampling_steps,
100
- "cfg_strength": slat_guidance_strength,
101
- },
102
- )
103
-
104
- video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
105
- video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
106
- video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
107
-
108
- trial_id = str(uuid.uuid4())
109
- video_path = os.path.join(user_dir, f"{trial_id}.mp4")
110
- imageio.mimsave(video_path, video, fps=15)
111
-
112
- # Save full-quality GLB
113
- glb = postprocessing_utils.to_glb(
114
- outputs['gaussian'][0],
115
- outputs['mesh'][0],
116
- simplify=0.0,
117
- texture_size=2048,
118
- verbose=False
119
- )
120
- glb_path = os.path.join(user_dir, f"{trial_id}_full.glb")
121
- glb.export(glb_path)
122
-
123
- state = pack_state(outputs['gaussian'][0], outputs['mesh'][0], trial_id)
124
- return state, video_path, glb_path, glb_path
125
-
126
- def extract_reduced_glb(
127
- state: dict,
128
- mesh_simplify: float,
129
- texture_size: int,
130
- req: gr.Request,
131
- ) -> Tuple[str, str]:
132
- user_dir = os.path.join(TMP_DIR, str(req.session_hash))
133
- gs, mesh, trial_id = unpack_state(state)
134
-
135
- glb = postprocessing_utils.to_glb(
136
- gs, mesh,
137
- simplify=mesh_simplify,
138
- texture_size=texture_size,
139
- verbose=False
140
- )
141
- glb_path = os.path.join(user_dir, f"{trial_id}_reduced.glb")
142
- glb.export(glb_path)
143
-
144
- return glb_path, glb_path
145
-
146
- with gr.Blocks(delete_cache=(600, 600)) as demo:
147
- gr.Markdown("""
148
- ## Image to 3D Asset with [TRELLIS](https://trellis3d.github.io/)
149
- * Upload an image and click "Generate" to create a 3D model
150
- * You can download either:
151
- * The full-quality GLB file (larger size, highest quality)
152
- * A reduced version with customizable quality settings (smaller size)
153
- """)
154
-
155
- with gr.Row():
156
- with gr.Column():
157
- image_prompt = gr.Image(label="Image Prompt", format="png", image_mode="RGBA", type="pil", height=300)
158
-
159
- with gr.Accordion(label="Generation Settings", open=False):
160
- seed = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1)
161
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
162
- gr.Markdown("Stage 1: Sparse Structure Generation")
163
- with gr.Row():
164
- ss_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=7.5, step=0.1)
165
- ss_sampling_steps = gr.Slider(1, 500, label="Sampling Steps", value=12, step=1)
166
- gr.Markdown("Stage 2: Structured Latent Generation")
167
- with gr.Row():
168
- slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=3.0, step=0.1)
169
- slat_sampling_steps = gr.Slider(1, 500, label="Sampling Steps", value=12, step=1)
170
-
171
- generate_btn = gr.Button("Generate")
172
-
173
- with gr.Accordion(label="Reduced GLB Settings", open=False):
174
- mesh_simplify = gr.Slider(0.0, 0.98, label="Mesh Simplification", value=0.95, step=0.01)
175
- texture_size = gr.Slider(512, 2048, label="Texture Size", value=1024, step=512)
176
-
177
- extract_reduced_btn = gr.Button("Extract Reduced GLB", interactive=False)
178
-
179
- with gr.Column():
180
- video_output = gr.Video(label="Generated 3D Asset Preview", autoplay=True, loop=True, height=300)
181
- model_output = LitModel3D(label="3D Model Preview", exposure=20.0, height=300)
182
- gr.Markdown("### Download Options")
183
- with gr.Row():
184
- download_full = gr.DownloadButton(label="Download Full-Quality GLB", interactive=False)
185
- download_reduced = gr.DownloadButton(label="Download Reduced GLB", interactive=False)
186
-
187
- output_buf = gr.State()
188
-
189
- # Example images
190
- with gr.Row():
191
- examples = gr.Examples(
192
- examples=[
193
- f'assets/example_image/{image}'
194
- for image in os.listdir("assets/example_image")
195
- ],
196
- inputs=[image_prompt],
197
- fn=preprocess_image,
198
- outputs=[image_prompt],
199
- run_on_click=True,
200
- examples_per_page=64,
201
  )
202
-
203
- # Event handlers
204
- demo.load(start_session)
205
- demo.unload(end_session)
206
 
207
- image_prompt.upload(
208
- preprocess_image,
209
- inputs=[image_prompt],
210
- outputs=[image_prompt],
211
- )
 
212
 
213
- generate_btn.click(
214
- get_seed,
215
- inputs=[randomize_seed, seed],
216
- outputs=[seed],
217
- ).then(
218
- image_to_3d,
219
- inputs=[image_prompt, seed, ss_guidance_strength, ss_sampling_steps, slat_guidance_strength, slat_sampling_steps],
220
- outputs=[output_buf, video_output, model_output, download_full],
221
- ).then(
222
- lambda: (gr.Button(interactive=True), gr.Button(interactive=True), gr.Button(interactive=False)),
223
- outputs=[download_full, extract_reduced_btn, download_reduced],
224
- )
225
 
226
- extract_reduced_btn.click(
227
- extract_reduced_glb,
228
- inputs=[output_buf, mesh_simplify, texture_size],
229
- outputs=[model_output, download_reduced],
230
- ).then(
231
- lambda: gr.Button(interactive=True),
232
- outputs=[download_reduced],
233
- )
234
 
235
  if __name__ == "__main__":
236
- # Initialize pipeline
237
- pipeline = TrellisImageTo3DPipeline.from_pretrained("JeffreyXiang/TRELLIS-image-large")
238
- pipeline.cuda()
239
 
240
- try:
241
- pipeline.preprocess_image(Image.fromarray(np.zeros((512, 512, 3), dtype=np.uint8)))
242
- except:
243
- pass
 
244
 
 
 
 
 
 
 
 
 
 
245
  demo.launch()
 
15
  from gradio_litmodel3d import LitModel3D
16
 
17
 
18
+ def check_gpu():
19
+ """Check if CUDA GPU is available and properly initialized"""
20
+ if not torch.cuda.is_available():
21
+ raise RuntimeError(
22
+ "This application requires a CUDA-capable GPU to run. "
23
+ "No CUDA GPU was detected in your system."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  )
 
 
 
 
25
 
26
+ # Print GPU information for debugging
27
+ gpu_count = torch.cuda.device_count()
28
+ print(f"Found {gpu_count} CUDA GPU(s)")
29
+ for i in range(gpu_count):
30
+ gpu_name = torch.cuda.get_device_name(i)
31
+ print(f"GPU {i}: {gpu_name}")
32
 
33
+ # Try to initialize CUDA
34
+ try:
35
+ torch.cuda.init()
36
+ current_device = torch.cuda.current_device()
37
+ print(f"Using GPU {current_device}: {torch.cuda.get_device_name(current_device)}")
38
+ except Exception as e:
39
+ raise RuntimeError(f"Failed to initialize CUDA: {str(e)}")
 
 
 
 
 
40
 
41
+ # ... [rest of the code remains exactly the same until main] ...
 
 
 
 
 
 
 
42
 
43
  if __name__ == "__main__":
44
+ # Check GPU availability first
45
+ check_gpu()
 
46
 
47
+ # Initialize pipeline with explicit device setting
48
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
49
+ pipeline = TrellisImageTo3DPipeline.from_pretrained(
50
+ "JeffreyXiang/TRELLIS-image-large"
51
+ ).to(device)
52
 
53
+ try:
54
+ # Use smaller test image and explicit device
55
+ test_img = np.zeros((256, 256, 3), dtype=np.uint8)
56
+ pipeline.preprocess_image(Image.fromarray(test_img))
57
+ del test_img
58
+ except Exception as e:
59
+ print(f"Warning: Failed to preload rembg: {str(e)}")
60
+
61
+ # Launch the demo
62
  demo.launch()