fantaxy commited on
Commit
a9f15ef
·
verified ·
1 Parent(s): ace8bc3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -91
app.py CHANGED
@@ -13,15 +13,7 @@ from subprocess import getoutput
13
  is_shared_ui = True if "fffiloni/MimicMotion" in os.environ['SPACE_ID'] else False
14
  available_property = False if is_shared_ui else True
15
 
16
- is_gpu_associated = torch.cuda.is_available()
17
- if is_gpu_associated:
18
- gpu_info = getoutput('nvidia-smi')
19
- if("A10G" in gpu_info):
20
- which_gpu = "A10G"
21
- elif("T4" in gpu_info):
22
- which_gpu = "T4"
23
- else:
24
- which_gpu = "CPU"
25
 
26
  def stream_output(pipe):
27
  for line in iter(pipe.readline, ''):
@@ -248,88 +240,21 @@ div#warning-ready > .gr-prose > h2, div#warning-ready > .gr-prose > p {
248
  color: #030303 !important;
249
  }
250
  """
251
- with gr.Blocks(css=css) as demo:
252
- with gr.Column():
253
- gr.Markdown("# MimicMotion")
254
- gr.Markdown("High-quality human motion video generation with pose-guided control")
255
- gr.HTML("""
256
- <div style="display:flex;column-gap:4px;">
257
- <a href='http://tencent.github.io/MimicMotion'>
258
- <img src='https://img.shields.io/badge/Project-Page-Green'>
259
- </a>
260
- <a href='https://arxiv.org/abs/2406.19680'>
261
- <img src='https://img.shields.io/badge/Paper-Arxiv-red'>
262
- </a>
263
- </div>
264
- """)
265
- with gr.Row():
266
- with gr.Column():
267
- if is_shared_ui:
268
- top_description = gr.HTML(f'''
269
- <div class="gr-prose">
270
- <h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
271
- Attention: this Space need to be duplicated to work</h2>
272
- <p class="main-message custom-color">
273
- To make it work, <strong>duplicate the Space</strong> and run it on your own profile using a <strong>private</strong> GPU (A10G-large recommended).<br />
274
- A A10G-large costs <strong>US$1.50/h</strong>. You'll also need to set your own secret hf_token to access gated stabilityai/stable-video-diffusion-img2vid-xt-1-1 repo.
275
- </p>
276
- <p class="actions custom-color">
277
- <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
278
- <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
279
- </a>
280
- to start experimenting with this demo
281
- </p>
282
- </div>
283
- ''', elem_id="warning-duplicate")
284
- else:
285
- if(is_gpu_associated):
286
- top_description = gr.HTML(f'''
287
- <div class="gr-prose">
288
- <h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
289
- You have successfully associated a {which_gpu} GPU to this Space 🎉</h2>
290
- <p class="custom-color">
291
- You will be billed by the minute from when you activated the GPU until when it is turned off.
292
- </p>
293
- </div>
294
- ''', elem_id="warning-ready")
295
- else:
296
- top_description = gr.HTML(f'''
297
- <div class="gr-prose">
298
- <h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
299
- You have successfully duplicated the MimicMotion Space 🎉</h2>
300
- <p class="custom-color">There's only one step left before you can properly play with this demo: <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a GPU</b> to it (via the Settings tab)</a> and run the app below.
301
- You will be billed by the minute from when you activate the GPU until when it is turned off.</p>
302
- <p class="actions custom-color">
303
- <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings">🔥 &nbsp; Set recommended GPU</a>
304
- </p>
305
- </div>
306
- ''', elem_id="warning-setgpu")
307
- with gr.Row():
308
- ref_image_in = gr.Image(label="Person Image Reference", type="filepath")
309
- ref_video_in = gr.Video(label="Person Video Reference")
310
- with gr.Accordion("Advanced Settings", open=False):
311
- num_inference_steps = gr.Slider(label="num inference steps", minimum=12, maximum=50, value=25, step=1, interactive=available_property)
312
- guidance_scale = gr.Slider(label="guidance scale", minimum=0.1, maximum=10, value=2, step=0.1, interactive=available_property)
313
- with gr.Row():
314
- output_frames_per_second = gr.Slider(label="fps", minimum=1, maximum=60, value=16, step=1, interactive=available_property)
315
- seed = gr.Number(label="Seed", value=42, interactive=available_property)
316
- checkpoint_version = gr.Dropdown(label="Checkpoint Version", choices=["MimicMotion_1.pth", "MimicMotion_1-1.pth"], value="MimicMotion_1.pth", interactive=available_property, filterable=False)
317
- submit_btn = gr.Button("Submit", interactive=available_property)
318
- gr.Examples(
319
- examples = [
320
- ["./examples/demo1.jpg", "./examples/preview_1.mp4"]
321
- ],
322
- fn = load_examples,
323
- inputs = [ref_image_in, ref_video_in],
324
- outputs = [output_video],
325
- run_on_click = True,
326
- cache_examples = False
327
- )
328
- output_video.render()
329
  submit_btn.click(
330
- fn = infer,
331
- inputs = [ref_image_in, ref_video_in, num_inference_steps, guidance_scale, output_frames_per_second, seed, checkpoint_version],
332
- outputs = [output_video]
333
  )
334
 
335
- demo.launch(show_api=False, show_error=False)
 
13
  is_shared_ui = True if "fffiloni/MimicMotion" in os.environ['SPACE_ID'] else False
14
  available_property = False if is_shared_ui else True
15
 
16
+
 
 
 
 
 
 
 
 
17
 
18
  def stream_output(pipe):
19
  for line in iter(pipe.readline, ''):
 
240
  color: #030303 !important;
241
  }
242
  """
243
+ with gr.Blocks() as demo:
244
+ with gr.Row():
245
+ ref_image_in = gr.Image()
246
+ ref_video_in = gr.Video()
247
+ num_inference_steps = gr.Slider(minimum=12, maximum=50, value=25, step=1)
248
+ guidance_scale = gr.Slider(minimum=0.1, maximum=10, value=2, step=0.1)
249
+ output_frames_per_second = gr.Slider(minimum=1, maximum=60, value=16, step=1)
250
+ seed = gr.Number(value=42)
251
+ checkpoint_version = gr.Dropdown(choices=["MimicMotion_1.pth", "MimicMotion_1-1.pth"], value="MimicMotion_1.pth")
252
+ submit_btn = gr.Button("실행")
253
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
  submit_btn.click(
255
+ fn=infer,
256
+ inputs=[ref_image_in, ref_video_in, num_inference_steps, guidance_scale, output_frames_per_second, seed, checkpoint_version],
257
+ outputs=[gr.Video()]
258
  )
259
 
260
+ demo.launch()