txing commited on
Commit
e6728f2
·
verified ·
1 Parent(s): 4961a06

change app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -40
app.py CHANGED
@@ -6,28 +6,28 @@ import cv2
6
  import gradio as gr
7
  import torch
8
  import math
9
- import spaces
10
  from huggingface_hub import hf_hub_download
11
  try:
12
  import mmpose
13
  except:
14
- os.system('pip install /home/user/app/main/transformer_utils')
15
- hf_hub_download(repo_id="caizhongang/SMPLer-X", filename="smpler_x_h32.pth.tar", local_dir="/home/user/app/pretrained_models")
16
- os.system('cp -rf /home/user/app/assets/conversions.py /home/user/.pyenv/versions/3.9.19/lib/python3.9/site-packages/torchgeometry/core/conversions.py')
17
- DEFAULT_MODEL='smpler_x_h32'
18
- OUT_FOLDER = '/home/user/app/demo_out'
19
  os.makedirs(OUT_FOLDER, exist_ok=True)
20
  num_gpus = 1 if torch.cuda.is_available() else -1
21
- print("!!!", torch.cuda.is_available())
22
- print(torch.cuda.device_count())
23
- print(torch.version.cuda)
24
- index = torch.cuda.current_device()
25
- print(index)
26
- print(torch.cuda.get_device_name(index))
27
  from main.inference import Inferer
28
  inferer = Inferer(DEFAULT_MODEL, num_gpus, OUT_FOLDER)
29
 
30
- @spaces.GPU(enable_queue=True, duration=300)
31
  def infer(video_input, in_threshold=0.5, num_people="Single person", render_mesh=False):
32
  # from main.inference import Inferer
33
  # inferer = Inferer(DEFAULT_MODEL, num_gpus, OUT_FOLDER)
@@ -68,24 +68,24 @@ def infer(video_input, in_threshold=0.5, num_people="Single person", render_mesh
68
  yield img, video_path, save_mesh_file, save_smplx_file
69
 
70
  TITLE = '''<h1 align="center">SMPLer-X: Scaling Up Expressive Human Pose and Shape Estimation</h1>'''
71
- VIDEO = '''
72
- <center><iframe width="960" height="540"
73
- src="https://www.youtube.com/embed/DepTqbPpVzY?si=qSeQuX-bgm_rON7E"title="SMPLer-X: Scaling Up Expressive Human Pose and Shape Estimation" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen>
74
- </iframe>
75
- </center><br>'''
76
- DESCRIPTION = '''
77
- <b>Official Gradio demo</b> for <a href="https://caizhongang.com/projects/SMPLer-X/"><b>SMPLer-X: Scaling Up Expressive Human Pose and Shape Estimation</b></a>.<br>
78
- <p>
79
- Note: You can drop a video at the panel (or select one of the examples)
80
- to obtain the 3D parametric reconstructions of the detected humans.
81
- </p>
82
- '''
83
 
84
  with gr.Blocks(title="SMPLer-X", css=".gradio-container") as demo:
85
 
86
  gr.Markdown(TITLE)
87
- gr.HTML(VIDEO)
88
- gr.Markdown(DESCRIPTION)
89
 
90
  with gr.Row():
91
  with gr.Column():
@@ -101,7 +101,7 @@ with gr.Blocks(title="SMPLer-X", css=".gradio-container") as demo:
101
  scale=1,)
102
  gr.HTML("""<br/>""")
103
  mesh_as_vertices = gr.Checkbox(
104
- label="Render as mesh",
105
  info="By default, the estimated SMPL-X parameters are rendered as vertices for faster visualization. Check this option if you want to visualize meshes instead.",
106
  interactive=True,
107
  scale=1,)
@@ -119,18 +119,18 @@ with gr.Blocks(title="SMPLer-X", css=".gradio-container") as demo:
119
  # example_images = gr.Examples([])
120
  send_button.click(fn=infer, inputs=[video_input, threshold, num_people, mesh_as_vertices], outputs=[processed_frames, video_output, meshes_output, smplx_output])
121
  # with gr.Row():
122
- example_videos = gr.Examples([
123
- ['/home/user/app/assets/01.mp4'],
124
- ['/home/user/app/assets/02.mp4'],
125
- ['/home/user/app/assets/03.mp4'],
126
- ['/home/user/app/assets/04.mp4'],
127
- ['/home/user/app/assets/05.mp4'],
128
- ['/home/user/app/assets/06.mp4'],
129
- ['/home/user/app/assets/07.mp4'],
130
- ['/home/user/app/assets/08.mp4'],
131
- ['/home/user/app/assets/09.mp4'],
132
- ],
133
- inputs=[video_input, 0.5])
134
 
135
  #demo.queue()
136
  demo.queue().launch(debug=True)
 
6
  import gradio as gr
7
  import torch
8
  import math
9
+ # import spaces
10
  from huggingface_hub import hf_hub_download
11
  try:
12
  import mmpose
13
  except:
14
+ os.system('pip install /Volumes/zzz/smplerx2/main/transformer_utils')
15
+ # hf_hub_download(repo_id="caizhongang/SMPLer-X", filename="smpler_x_h32.pth.tar", local_dir="/home/user/app/pretrained_models")
16
+ #os.system('cp -rf /home/user/app/assets/conversions.py /home/user/.pyenv/versions/3.9.19/lib/python3.9/site-packages/torchgeometry/core/conversions.py')
17
+ DEFAULT_MODEL='smpler_x_s32'
18
+ OUT_FOLDER = '/home/ztx/Downloads/smplerx2/output'
19
  os.makedirs(OUT_FOLDER, exist_ok=True)
20
  num_gpus = 1 if torch.cuda.is_available() else -1
21
+ print("!!!", torch.cuda.is_available())
22
+ print(torch.cuda.device_count())
23
+ print(torch.version.cuda)
24
+ #index = torch.cuda.current_device()
25
+ #print(index)
26
+ #print(torch.cuda.get_device_name(index))
27
  from main.inference import Inferer
28
  inferer = Inferer(DEFAULT_MODEL, num_gpus, OUT_FOLDER)
29
 
30
+ # @spaces.GPU(enable_queue=True, duration=300)
31
  def infer(video_input, in_threshold=0.5, num_people="Single person", render_mesh=False):
32
  # from main.inference import Inferer
33
  # inferer = Inferer(DEFAULT_MODEL, num_gpus, OUT_FOLDER)
 
68
  yield img, video_path, save_mesh_file, save_smplx_file
69
 
70
  TITLE = '''<h1 align="center">SMPLer-X: Scaling Up Expressive Human Pose and Shape Estimation</h1>'''
71
+ # VIDEO = '''
72
+ # <center><iframe width="960" height="540"
73
+ # src="https://www.youtube.com/embed/DepTqbPpVzY?si=qSeQuX-bgm_rON7E"title="SMPLer-X: Scaling Up Expressive Human Pose and Shape Estimation" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen>
74
+ # </iframe>
75
+ # </center><br>'''
76
+ # DESCRIPTION = '''
77
+ # <b>Official Gradio demo</b> for <a href="https://caizhongang.com/projects/SMPLer-X/"><b>SMPLer-X: Scaling Up Expressive Human Pose and Shape Estimation</b></a>.<br>
78
+ # <p>
79
+ # Note: You can drop a video at the panel (or select one of the examples)
80
+ # to obtain the 3D parametric reconstructions of the detected humans.
81
+ # </p>
82
+ # '''
83
 
84
  with gr.Blocks(title="SMPLer-X", css=".gradio-container") as demo:
85
 
86
  gr.Markdown(TITLE)
87
+ # gr.HTML(VIDEO)
88
+ # gr.Markdown(DESCRIPTION)
89
 
90
  with gr.Row():
91
  with gr.Column():
 
101
  scale=1,)
102
  gr.HTML("""<br/>""")
103
  mesh_as_vertices = gr.Checkbox(
104
+ label="Render as mesh",
105
  info="By default, the estimated SMPL-X parameters are rendered as vertices for faster visualization. Check this option if you want to visualize meshes instead.",
106
  interactive=True,
107
  scale=1,)
 
119
  # example_images = gr.Examples([])
120
  send_button.click(fn=infer, inputs=[video_input, threshold, num_people, mesh_as_vertices], outputs=[processed_frames, video_output, meshes_output, smplx_output])
121
  # with gr.Row():
122
+ # example_videos = gr.Examples([
123
+ # ['/home/user/app/assets/01.mp4'],
124
+ # ['/home/user/app/assets/02.mp4'],
125
+ # ['/home/user/app/assets/03.mp4'],
126
+ # ['/home/user/app/assets/04.mp4'],
127
+ # ['/home/user/app/assets/05.mp4'],
128
+ # ['/home/user/app/assets/06.mp4'],
129
+ # ['/home/user/app/assets/07.mp4'],
130
+ # ['/home/user/app/assets/08.mp4'],
131
+ # ['/home/user/app/assets/09.mp4'],
132
+ # ],
133
+ # inputs=[video_input, 0.5])
134
 
135
  #demo.queue()
136
  demo.queue().launch(debug=True)