Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,83 +1,14 @@
|
|
1 |
-
import
|
2 |
-
import sys
|
3 |
-
import gradio as gr
|
4 |
-
import torch
|
5 |
import numpy as np
|
6 |
-
|
7 |
-
from visualise.rendering import RenderTool
|
8 |
-
from data_utils import torch_data
|
9 |
-
from trainer.options import parse_args
|
10 |
-
from trainer.config import load_JsonConfig
|
11 |
-
from nets import init_model, infer # Ensure these functions are properly defined
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
-
|
16 |
-
sys.path.append(os.getcwd())
|
17 |
|
18 |
-
#
|
19 |
-
|
20 |
-
|
21 |
-
config = load_JsonConfig(config_file)
|
22 |
-
|
23 |
-
# Initialize models
|
24 |
-
generator_face = init_model(face_model_name, face_model_path, args, config)
|
25 |
-
generator_body = init_model(body_model_name, body_model_path, args, config)
|
26 |
|
27 |
-
|
28 |
-
|
29 |
-
'model_path': './visualise/',
|
30 |
-
'model_type': 'smplx',
|
31 |
-
'create_global_orient': True,
|
32 |
-
'create_body_pose': True,
|
33 |
-
'create_betas': True,
|
34 |
-
'num_betas': 300,
|
35 |
-
'create_left_hand_pose': True,
|
36 |
-
'create_right_hand_pose': True,
|
37 |
-
'use_pca': False,
|
38 |
-
'flat_hand_mean': False,
|
39 |
-
'create_expression': True,
|
40 |
-
'num_expression_coeffs': 100,
|
41 |
-
'num_pca_comps': 12,
|
42 |
-
'create_jaw_pose': True,
|
43 |
-
'create_leye_pose': True,
|
44 |
-
'create_reye_pose': True,
|
45 |
-
'create_transl': False,
|
46 |
-
'dtype': torch.float64,
|
47 |
-
}
|
48 |
-
smplx_model = smpl.create(**smplx_model_params).to('cuda')
|
49 |
-
|
50 |
-
return generator_face, generator_body, smplx_model, config
|
51 |
-
|
52 |
-
# Inference function
|
53 |
-
def run_inference(audio_file):
|
54 |
-
# Load models
|
55 |
-
generator_face, generator_body, smplx_model, config = load_models(
|
56 |
-
'./config/LS3DCG.json',
|
57 |
-
's2g_LS3DCG',
|
58 |
-
'experiments/2022-10-19-smplx_S2G-LS3DCG/ckpt-99.pth',
|
59 |
-
's2g_LS3DCG',
|
60 |
-
'experiments/2022-10-19-smplx_S2G-LS3DCG/ckpt-99.pth'
|
61 |
-
)
|
62 |
-
|
63 |
-
# Initialize rendering tool
|
64 |
-
rendertool = RenderTool('visualise/video/' + config.Log.name)
|
65 |
-
|
66 |
-
# Inference
|
67 |
-
infer(generator_body, generator_face, smplx_model, rendertool, config, audio_file)
|
68 |
-
|
69 |
-
# Provide output (e.g., path to the rendered video)
|
70 |
-
output_video_path = f'visualise/video/{config.Log.name}/{audio_file.split("/")[-1].split(".")[0]}.npy'
|
71 |
-
return output_video_path
|
72 |
-
|
73 |
-
# Gradio interface
|
74 |
-
iface = gr.Interface(
|
75 |
-
fn=run_inference,
|
76 |
-
inputs=gr.inputs.Audio(source="upload", type="filepath", label="Upload Audio File"),
|
77 |
-
outputs=gr.outputs.Textbox(label="Output Video Path"),
|
78 |
-
title="Audio to 3D Model Renderer",
|
79 |
-
description="Upload an audio file to generate a 3D model rendering."
|
80 |
-
)
|
81 |
-
|
82 |
-
if __name__ == "__main__":
|
83 |
-
iface.launch()
|
|
|
1 |
+
import pyrender
|
|
|
|
|
|
|
2 |
import numpy as np
|
3 |
+
import trimesh
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
+
# Create a simple mesh
|
6 |
+
mesh = trimesh.Sphere(radius=1.0)
|
7 |
+
mesh = pyrender.Mesh.from_trimesh(mesh)
|
|
|
8 |
|
9 |
+
# Create a scene and add the mesh
|
10 |
+
scene = pyrender.Scene()
|
11 |
+
scene.add(mesh)
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
+
# Create a viewer
|
14 |
+
pyrender.Viewer(scene, use_raymond_lights=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|