Spaces:
Running
on
Zero
Running
on
Zero
update zero utils
Browse files- LHM/runners/infer/human_lrm.py +2 -4
- app.py +279 -278
- engine/pose_estimation/pose_estimator.py +3 -3
- requirements_lhm.txt +1 -0
LHM/runners/infer/human_lrm.py
CHANGED
@@ -747,11 +747,9 @@ class HumanLRMInferrer(Inferrer):
|
|
747 |
dump_video_path=dump_video_path,
|
748 |
shape_param=shape_pose.beta,
|
749 |
)
|
750 |
-
# if gradio_masked_image is not None:
|
751 |
-
# os.system("cp {} {}".format())
|
752 |
-
# if gradio_video_save_path is not None:
|
753 |
-
# os.system("cp {} {}".format(dump_video_path, gradio_video_save_path))
|
754 |
return True
|
|
|
|
|
755 |
|
756 |
|
757 |
# @REGISTRY_RUNNERS.register("infer.human_lrm_video")
|
|
|
747 |
dump_video_path=dump_video_path,
|
748 |
shape_param=shape_pose.beta,
|
749 |
)
|
|
|
|
|
|
|
|
|
750 |
return True
|
751 |
+
def to(self, device):
|
752 |
+
self.pose_estimator.to(device)
|
753 |
|
754 |
|
755 |
# @REGISTRY_RUNNERS.register("infer.human_lrm_video")
|
app.py
CHANGED
@@ -1,282 +1,283 @@
|
|
1 |
-
#
|
2 |
-
#
|
3 |
-
#
|
4 |
-
#
|
5 |
-
#
|
6 |
-
#
|
7 |
-
#
|
8 |
-
#
|
9 |
-
#
|
10 |
-
#
|
11 |
-
#
|
12 |
-
#
|
13 |
-
#
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
#
|
26 |
-
# #
|
27 |
-
# #
|
28 |
-
# #
|
29 |
-
# #
|
30 |
-
# #
|
31 |
-
# #
|
32 |
-
|
33 |
-
#
|
34 |
-
#
|
35 |
-
#
|
36 |
-
#
|
37 |
-
#
|
38 |
-
#
|
39 |
-
#
|
40 |
-
#
|
41 |
-
|
42 |
-
#
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
#
|
58 |
-
|
59 |
-
#
|
60 |
-
#
|
61 |
-
#
|
62 |
-
|
63 |
-
|
64 |
-
#
|
65 |
-
#
|
66 |
-
#
|
67 |
-
#
|
68 |
-
#
|
69 |
-
#
|
70 |
-
|
71 |
-
#
|
72 |
-
#
|
73 |
-
#
|
74 |
-
#
|
75 |
-
#
|
76 |
-
|
77 |
-
#
|
78 |
-
#
|
79 |
-
#
|
80 |
-
#
|
81 |
-
#
|
82 |
-
|
83 |
-
|
84 |
-
#
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
#
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
#
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
#
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
#
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
#
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
#
|
274 |
-
#
|
|
|
275 |
|
276 |
-
import gradio as gr
|
277 |
|
278 |
-
def greet(name):
|
279 |
-
|
280 |
|
281 |
-
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
282 |
-
demo.launch()
|
|
|
1 |
+
# Copyright (c) 2023-2024, Qi Zuo
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# https://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
|
16 |
+
import os
|
17 |
+
from PIL import Image
|
18 |
+
import numpy as np
|
19 |
+
import gradio as gr
|
20 |
+
import base64
|
21 |
+
import spaces
|
22 |
+
import subprocess
|
23 |
+
import os
|
24 |
+
|
25 |
+
# def install_cuda_toolkit():
|
26 |
+
# # CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"
|
27 |
+
# # # CUDA_TOOLKIT_URL = "https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run"
|
28 |
+
# # CUDA_TOOLKIT_FILE = "/tmp/%s" % os.path.basename(CUDA_TOOLKIT_URL)
|
29 |
+
# # subprocess.call(["wget", "-q", CUDA_TOOLKIT_URL, "-O", CUDA_TOOLKIT_FILE])
|
30 |
+
# # subprocess.call(["chmod", "+x", CUDA_TOOLKIT_FILE])
|
31 |
+
# # subprocess.call([CUDA_TOOLKIT_FILE, "--silent", "--toolkit"])
|
32 |
+
|
33 |
+
# os.environ["CUDA_HOME"] = "/usr/local/cuda"
|
34 |
+
# os.environ["PATH"] = "%s/bin:%s" % (os.environ["CUDA_HOME"], os.environ["PATH"])
|
35 |
+
# os.environ["LD_LIBRARY_PATH"] = "%s/lib:%s" % (
|
36 |
+
# os.environ["CUDA_HOME"],
|
37 |
+
# "" if "LD_LIBRARY_PATH" not in os.environ else os.environ["LD_LIBRARY_PATH"],
|
38 |
+
# )
|
39 |
+
# # Fix: arch_list[-1] += '+PTX'; IndexError: list index out of range
|
40 |
+
# os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6"
|
41 |
+
|
42 |
+
# install_cuda_toolkit()
|
43 |
+
|
44 |
+
def launch_pretrained():
|
45 |
+
from huggingface_hub import snapshot_download, hf_hub_download
|
46 |
+
hf_hub_download(repo_id="DyrusQZ/LHM_Runtime", repo_type='model', filename='assets.tar', local_dir="./")
|
47 |
+
os.system("tar -xvf assets.tar && rm assets.tar")
|
48 |
+
hf_hub_download(repo_id="DyrusQZ/LHM_Runtime", repo_type='model', filename='LHM-0.5B.tar', local_dir="./")
|
49 |
+
os.system("tar -xvf LHM-0.5B.tar && rm LHM-0.5B.tar")
|
50 |
+
hf_hub_download(repo_id="DyrusQZ/LHM_Runtime", repo_type='model', filename='LHM_prior_model.tar', local_dir="./")
|
51 |
+
os.system("tar -xvf LHM_prior_model.tar && rm LHM_prior_model.tar")
|
52 |
+
|
53 |
+
def launch_env_not_compile_with_cuda():
|
54 |
+
os.system("pip install chumpy")
|
55 |
+
os.system("pip uninstall -y basicsr")
|
56 |
+
os.system("pip install git+https://github.com/hitsz-zuoqi/BasicSR/")
|
57 |
+
# os.system("pip install -e ./third_party/sam2")
|
58 |
+
os.system("pip install numpy==1.23.0")
|
59 |
+
# os.system("pip install git+https://github.com/hitsz-zuoqi/sam2/")
|
60 |
+
# os.system("pip install git+https://github.com/ashawkey/diff-gaussian-rasterization/")
|
61 |
+
# os.system("pip install git+https://github.com/camenduru/simple-knn/")
|
62 |
+
os.system("pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu121_pyt251/download.html")
|
63 |
+
|
64 |
+
# def launch_env_compile_with_cuda():
|
65 |
+
# # simple_knn
|
66 |
+
# os.system("wget oss://virutalbuy-public/share/aigc3d/data/for_lingteng/LHM/simple_knn.zip && wget oss://virutalbuy-public/share/aigc3d/data/for_lingteng/LHM/simple_knn-0.0.0.dist-info.zip")
|
67 |
+
# os.system("unzip simple_knn.zip && unzip simple_knn-0.0.0.dist-info.zip")
|
68 |
+
# os.system("mv simple_knn /usr/local/lib/python3.10/site-packages/")
|
69 |
+
# os.system("mv simple_knn-0.0.0.dist-info /usr/local/lib/python3.10/site-packages/")
|
70 |
+
|
71 |
+
# # diff_gaussian
|
72 |
+
# os.system("wget oss://virutalbuy-public/share/aigc3d/data/for_lingteng/LHM/diff_gaussian_rasterization.zip && wget oss://virutalbuy-public/share/aigc3d/data/for_lingteng/LHM/diff_gaussian_rasterization-0.0.0.dist-info.zip")
|
73 |
+
# os.system("unzip diff_gaussian_rasterization.zip && unzip diff_gaussian_rasterization-0.0.0.dist-info.zip")
|
74 |
+
# os.system("mv diff_gaussian_rasterization /usr/local/lib/python3.10/site-packages/")
|
75 |
+
# os.system("mv diff_gaussian_rasterization-0.0.0.dist-info /usr/local/lib/python3.10/site-packages/")
|
76 |
+
|
77 |
+
# # pytorch3d
|
78 |
+
# os.system("wget oss://virutalbuy-public/share/aigc3d/data/for_lingteng/LHM/pytorch3d.zip && wget oss://virutalbuy-public/share/aigc3d/data/for_lingteng/LHM/pytorch3d-0.7.8.dist-info.zip")
|
79 |
+
# os.system("unzip pytorch3d.zip && unzip pytorch3d-0.7.8.dist-info.zip")
|
80 |
+
# os.system("mv pytorch3d /usr/local/lib/python3.10/site-packages/")
|
81 |
+
# os.system("mv pytorch3d-0.7.8.dist-info /usr/local/lib/python3.10/site-packages/")
|
82 |
+
|
83 |
+
|
84 |
+
# launch_env_compile_with_cuda()
|
85 |
+
|
86 |
+
def assert_input_image(input_image):
|
87 |
+
if input_image is None:
|
88 |
+
raise gr.Error("No image selected or uploaded!")
|
89 |
+
|
90 |
+
def prepare_working_dir():
|
91 |
+
import tempfile
|
92 |
+
working_dir = tempfile.TemporaryDirectory()
|
93 |
+
return working_dir
|
94 |
+
|
95 |
+
def init_preprocessor():
|
96 |
+
from LHM.utils.preprocess import Preprocessor
|
97 |
+
global preprocessor
|
98 |
+
preprocessor = Preprocessor()
|
99 |
+
|
100 |
+
def preprocess_fn(image_in: np.ndarray, remove_bg: bool, recenter: bool, working_dir):
|
101 |
+
image_raw = os.path.join(working_dir.name, "raw.png")
|
102 |
+
with Image.fromarray(image_in) as img:
|
103 |
+
img.save(image_raw)
|
104 |
+
image_out = os.path.join(working_dir.name, "rembg.png")
|
105 |
+
success = preprocessor.preprocess(image_path=image_raw, save_path=image_out, rmbg=remove_bg, recenter=recenter)
|
106 |
+
assert success, f"Failed under preprocess_fn!"
|
107 |
+
return image_out
|
108 |
+
|
109 |
+
def get_image_base64(path):
|
110 |
+
with open(path, "rb") as image_file:
|
111 |
+
encoded_string = base64.b64encode(image_file.read()).decode()
|
112 |
+
return f"data:image/png;base64,{encoded_string}"
|
113 |
+
|
114 |
+
|
115 |
+
def demo_lhm(infer_impl):
|
116 |
+
|
117 |
+
def core_fn(image: str, video_params, working_dir):
|
118 |
+
image_raw = os.path.join(working_dir.name, "raw.png")
|
119 |
+
with Image.fromarray(image) as img:
|
120 |
+
img.save(image_raw)
|
121 |
|
122 |
+
base_vid = os.path.basename(video_params).split("_")[0]
|
123 |
+
smplx_params_dir = os.path.join("./assets/sample_motion", base_vid, "smplx_params")
|
124 |
+
|
125 |
+
dump_video_path = os.path.join(working_dir.name, "output.mp4")
|
126 |
+
dump_image_path = os.path.join(working_dir.name, "output.png")
|
127 |
+
|
128 |
+
status = spaces.GPU(infer_impl(
|
129 |
+
gradio_demo_image=image_raw,
|
130 |
+
gradio_motion_file=smplx_params_dir,
|
131 |
+
gradio_masked_image=dump_image_path,
|
132 |
+
gradio_video_save_path=dump_video_path
|
133 |
+
))
|
134 |
+
if status:
|
135 |
+
return dump_image_path, dump_video_path
|
136 |
+
else:
|
137 |
+
return None, None
|
138 |
+
|
139 |
+
_TITLE = '''LHM: Large Animatable Human Model'''
|
140 |
+
|
141 |
+
_DESCRIPTION = '''
|
142 |
+
<strong>Reconstruct a human avatar in 0.2 seconds with A100!</strong>
|
143 |
+
'''
|
144 |
+
|
145 |
+
with gr.Blocks(analytics_enabled=False) as demo:
|
146 |
+
|
147 |
+
# </div>
|
148 |
+
logo_url = "./assets/rgba_logo_new.png"
|
149 |
+
logo_base64 = get_image_base64(logo_url)
|
150 |
+
gr.HTML(
|
151 |
+
f"""
|
152 |
+
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
|
153 |
+
<div>
|
154 |
+
<h1> <img src="{logo_base64}" style='height:35px; display:inline-block;'/> Large Animatable Human Model </h1>
|
155 |
+
</div>
|
156 |
+
</div>
|
157 |
+
"""
|
158 |
+
)
|
159 |
+
gr.HTML(
|
160 |
+
"""<p><h4 style="color: red;"> Notes: Please input full-body image in case of detection errors.</h4></p>"""
|
161 |
+
)
|
162 |
+
|
163 |
+
# DISPLAY
|
164 |
+
with gr.Row():
|
165 |
+
|
166 |
+
with gr.Column(variant='panel', scale=1):
|
167 |
+
with gr.Tabs(elem_id="openlrm_input_image"):
|
168 |
+
with gr.TabItem('Input Image'):
|
169 |
+
with gr.Row():
|
170 |
+
input_image = gr.Image(label="Input Image", image_mode="RGBA", height=480, width=270, sources="upload", type="numpy", elem_id="content_image")
|
171 |
+
# EXAMPLES
|
172 |
+
with gr.Row():
|
173 |
+
examples = [
|
174 |
+
['assets/sample_input/joker.jpg'],
|
175 |
+
['assets/sample_input/anime.png'],
|
176 |
+
['assets/sample_input/basket.png'],
|
177 |
+
['assets/sample_input/ai_woman1.JPG'],
|
178 |
+
['assets/sample_input/anime2.JPG'],
|
179 |
+
['assets/sample_input/anime3.JPG'],
|
180 |
+
['assets/sample_input/boy1.png'],
|
181 |
+
['assets/sample_input/choplin.jpg'],
|
182 |
+
['assets/sample_input/eins.JPG'],
|
183 |
+
['assets/sample_input/girl1.png'],
|
184 |
+
['assets/sample_input/girl2.png'],
|
185 |
+
['assets/sample_input/robot.jpg'],
|
186 |
+
]
|
187 |
+
gr.Examples(
|
188 |
+
examples=examples,
|
189 |
+
inputs=[input_image],
|
190 |
+
examples_per_page=20,
|
191 |
+
)
|
192 |
+
|
193 |
+
with gr.Column():
|
194 |
+
with gr.Tabs(elem_id="openlrm_input_video"):
|
195 |
+
with gr.TabItem('Input Video'):
|
196 |
+
with gr.Row():
|
197 |
+
video_input = gr.Video(label="Input Video",height=480, width=270, interactive=False)
|
198 |
+
|
199 |
+
examples = [
|
200 |
+
# './assets/sample_motion/danaotiangong/danaotiangong_origin.mp4',
|
201 |
+
'./assets/sample_motion/ex5/ex5_origin.mp4',
|
202 |
+
'./assets/sample_motion/girl2/girl2_origin.mp4',
|
203 |
+
'./assets/sample_motion/jntm/jntm_origin.mp4',
|
204 |
+
'./assets/sample_motion/mimo1/mimo1_origin.mp4',
|
205 |
+
'./assets/sample_motion/mimo2/mimo2_origin.mp4',
|
206 |
+
'./assets/sample_motion/mimo4/mimo4_origin.mp4',
|
207 |
+
'./assets/sample_motion/mimo5/mimo5_origin.mp4',
|
208 |
+
'./assets/sample_motion/mimo6/mimo6_origin.mp4',
|
209 |
+
'./assets/sample_motion/nezha/nezha_origin.mp4',
|
210 |
+
'./assets/sample_motion/taiji/taiji_origin.mp4'
|
211 |
+
]
|
212 |
+
|
213 |
+
gr.Examples(
|
214 |
+
examples=examples,
|
215 |
+
inputs=[video_input],
|
216 |
+
examples_per_page=20,
|
217 |
+
)
|
218 |
+
with gr.Column(variant='panel', scale=1):
|
219 |
+
with gr.Tabs(elem_id="openlrm_processed_image"):
|
220 |
+
with gr.TabItem('Processed Image'):
|
221 |
+
with gr.Row():
|
222 |
+
processed_image = gr.Image(label="Processed Image", image_mode="RGBA", type="filepath", elem_id="processed_image", height=480, width=270, interactive=False)
|
223 |
+
|
224 |
+
with gr.Column(variant='panel', scale=1):
|
225 |
+
with gr.Tabs(elem_id="openlrm_render_video"):
|
226 |
+
with gr.TabItem('Rendered Video'):
|
227 |
+
with gr.Row():
|
228 |
+
output_video = gr.Video(label="Rendered Video", format="mp4", height=480, width=270, autoplay=True)
|
229 |
+
|
230 |
+
# SETTING
|
231 |
+
with gr.Row():
|
232 |
+
with gr.Column(variant='panel', scale=1):
|
233 |
+
submit = gr.Button('Generate', elem_id="openlrm_generate", variant='primary')
|
234 |
+
|
235 |
+
|
236 |
+
working_dir = gr.State()
|
237 |
+
submit.click(
|
238 |
+
fn=assert_input_image,
|
239 |
+
inputs=[input_image],
|
240 |
+
queue=False,
|
241 |
+
).success(
|
242 |
+
fn=prepare_working_dir,
|
243 |
+
outputs=[working_dir],
|
244 |
+
queue=False,
|
245 |
+
).success(
|
246 |
+
fn=core_fn,
|
247 |
+
inputs=[input_image, video_input, working_dir], # video_params refer to smpl dir
|
248 |
+
outputs=[processed_image, output_video],
|
249 |
+
)
|
250 |
+
|
251 |
+
demo.queue()
|
252 |
+
demo.launch()
|
253 |
+
|
254 |
+
|
255 |
+
def launch_gradio_app():
|
256 |
+
|
257 |
+
os.environ.update({
|
258 |
+
"APP_ENABLED": "1",
|
259 |
+
"APP_MODEL_NAME": "./exps/releases/video_human_benchmark/human-lrm-500M/step_060000/",
|
260 |
+
"APP_INFER": "./configs/inference/human-lrm-500M.yaml",
|
261 |
+
"APP_TYPE": "infer.human_lrm",
|
262 |
+
"NUMBA_THREADING_LAYER": 'omp',
|
263 |
+
})
|
264 |
+
|
265 |
+
from LHM.runners import REGISTRY_RUNNERS
|
266 |
+
RunnerClass = REGISTRY_RUNNERS[os.getenv("APP_TYPE")]
|
267 |
+
with RunnerClass() as runner:
|
268 |
+
runner.to('cuda')
|
269 |
+
demo_lhm(infer_impl=runner.infer)
|
270 |
+
|
271 |
+
|
272 |
+
if __name__ == '__main__':
|
273 |
+
# launch_pretrained()
|
274 |
+
# launch_env_not_compile_with_cuda()
|
275 |
+
launch_gradio_app()
|
276 |
|
277 |
+
# import gradio as gr
|
278 |
|
279 |
+
# def greet(name):
|
280 |
+
# return "Hello " + name + "!!"
|
281 |
|
282 |
+
# demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
283 |
+
# demo.launch()
|
engine/pose_estimation/pose_estimator.py
CHANGED
@@ -92,8 +92,9 @@ def inverse_perspective_projection(points, K, distance):
|
|
92 |
return points
|
93 |
|
94 |
|
95 |
-
class PoseEstimator:
|
96 |
def __init__(self, model_path, device="cuda"):
|
|
|
97 |
self.device = torch.device(device)
|
98 |
self.mhmr_model = load_model(
|
99 |
os.path.join(model_path, "pose_estimate", "multiHMR_896_L.pt"),
|
@@ -170,8 +171,7 @@ class PoseEstimator:
|
|
170 |
|
171 |
return resize_img, annotation
|
172 |
|
173 |
-
|
174 |
-
def __call__(self, img_path):
|
175 |
# image_tensor H W C
|
176 |
|
177 |
# self.device = torch.device('cuda')
|
|
|
92 |
return points
|
93 |
|
94 |
|
95 |
+
class PoseEstimator(torch.nn.Module):
|
96 |
def __init__(self, model_path, device="cuda"):
|
97 |
+
super.__init__()
|
98 |
self.device = torch.device(device)
|
99 |
self.mhmr_model = load_model(
|
100 |
os.path.join(model_path, "pose_estimate", "multiHMR_896_L.pt"),
|
|
|
171 |
|
172 |
return resize_img, annotation
|
173 |
|
174 |
+
def forward(self, img_path):
|
|
|
175 |
# image_tensor H W C
|
176 |
|
177 |
# self.device = torch.device('cuda')
|
requirements_lhm.txt
CHANGED
@@ -47,6 +47,7 @@ trimesh==4.4.9
|
|
47 |
typeguard==2.13.3
|
48 |
xatlas==0.0.9
|
49 |
imageio-ffmpeg
|
|
|
50 |
|
51 |
./wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl
|
52 |
./wheels/simple_knn-0.0.0-cp310-cp310-linux_x86_64.whl
|
|
|
47 |
typeguard==2.13.3
|
48 |
xatlas==0.0.9
|
49 |
imageio-ffmpeg
|
50 |
+
rembg[cpu]
|
51 |
|
52 |
./wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl
|
53 |
./wheels/simple_knn-0.0.0-cp310-cp310-linux_x86_64.whl
|