mingyuan commited on
Commit
4c803c6
·
1 Parent(s): 0d91c82

change_loading_options

Browse files
Files changed (1) hide show
  1. app.py +13 -7
app.py CHANGED
@@ -105,7 +105,10 @@ if type == "local":
105
  "std": "../data_hf/data/motionverse/statistics/std.npy",
106
  "skeleton": "../data_hf/data/motionverse/statistics/skeleton.npy",
107
  "lmm": "../data_hf/data/motionverse/pretrained/lmm_small_demo.pth",
108
- "imagebind": "../data_hf/data/motionverse/pretrained/imagebind_huge.pth"
 
 
 
109
  }
110
  os.environ["NO_PROXY"] = os.environ["no_proxy"] = "localhost, 127.0.0.1:7860"
111
  else:
@@ -114,7 +117,10 @@ else:
114
  "std": "data/motionverse/statistics/std.npy",
115
  "skeleton": "data/motionverse/statistics/skeleton.npy",
116
  "lmm": "data/motionverse/pretrained/lmm_small_demo.pth",
117
- "imagebind": "data/motionverse/pretrained/imagebind_huge.pth"
 
 
 
118
  }
119
  load_file_list = {}
120
  for key in src_file_list.keys():
@@ -238,11 +244,11 @@ demo = gr.Interface(
238
  inputs=[input_text, input_audio, gr.Slider(20, 200, value=60, label="Motion length (fps 20):")],
239
  outputs=gr.Video(label="Video:"),
240
  examples=[
241
- ["A person walks in a circle.", "examples/placeholder.m4a", 120],
242
- ["A person jumps forward.", "examples/placeholder.m4a", 100],
243
- ["A person is stretching arms.", "examples/placeholder.m4a", 80],
244
- ["", "examples/surprise.m4a", 200],
245
- ["", "examples/angry.m4a", 200],
246
  ],
247
  title="LMM: Large Motion Model for Unified Multi-Modal Motion Generation",
248
  description="\nThis is an interactive demo for LMM. For more information, feel free to visit our project page(https://github.com/mingyuan-zhang/LMM).")
 
105
  "std": "../data_hf/data/motionverse/statistics/std.npy",
106
  "skeleton": "../data_hf/data/motionverse/statistics/skeleton.npy",
107
  "lmm": "../data_hf/data/motionverse/pretrained/lmm_small_demo.pth",
108
+ "imagebind": "../data_hf/data/motionverse/pretrained/imagebind_huge.pth",
109
+ "audio_placeholder": "../data_hf/examples/placeholder.m4a",
110
+ "audio_surprise": "../data_hf/examples/surprise.m4a",
111
+ "audio_angry": "../data_hf/examples/angry.m4a"
112
  }
113
  os.environ["NO_PROXY"] = os.environ["no_proxy"] = "localhost, 127.0.0.1:7860"
114
  else:
 
117
  "std": "data/motionverse/statistics/std.npy",
118
  "skeleton": "data/motionverse/statistics/skeleton.npy",
119
  "lmm": "data/motionverse/pretrained/lmm_small_demo.pth",
120
+ "imagebind": "data/motionverse/pretrained/imagebind_huge.pth",
121
+ "audio_placeholder": "examples/placeholder.m4a",
122
+ "audio_surprise": "examples/surprise.m4a",
123
+ "audio_angry": "examples/angry.m4a"
124
  }
125
  load_file_list = {}
126
  for key in src_file_list.keys():
 
244
  inputs=[input_text, input_audio, gr.Slider(20, 200, value=60, label="Motion length (fps 20):")],
245
  outputs=gr.Video(label="Video:"),
246
  examples=[
247
+ ["A person walks in a circle.", load_file_list["audio_placeholder"], 120],
248
+ ["A person jumps forward.", load_file_list["audio_placeholder"], 100],
249
+ ["A person is stretching arms.", load_file_list["audio_placeholder"], 80],
250
+ ["", load_file_list["audio_surprise"], 200],
251
+ ["", load_file_list["audio_angry"], 200],
252
  ],
253
  title="LMM: Large Motion Model for Unified Multi-Modal Motion Generation",
254
  description="\nThis is an interactive demo for LMM. For more information, feel free to visit our project page(https://github.com/mingyuan-zhang/LMM).")