fffiloni commited on
Commit
2eced4f
·
verified ·
1 Parent(s): e972d51

Set prerendered examples, set max_size queue

Browse files
Files changed (1) hide show
  1. app.py +11 -2
app.py CHANGED
@@ -229,6 +229,15 @@ def infer(prompt, cond_image_path, cond_audio_path_spk1, cond_audio_path_spk2, s
229
  except Exception as e:
230
  print(f"[WARNING] Could not remove {f}: {e}")
231
 
 
 
 
 
 
 
 
 
 
232
 
233
  with gr.Blocks(title="MultiTalk Inference") as demo:
234
  gr.Markdown("## 🎤 Meigen MultiTalk Inference Demo")
@@ -290,7 +299,7 @@ with gr.Blocks(title="MultiTalk Inference") as demo:
290
  ["A woman sings passionately in a dimly lit studio.", "examples/single/single1.png", "examples/single/1.wav", None, 12],
291
  ["In a cozy recording studio, a man and a woman are singing together. The man, with tousled brown hair, stands to the left, wearing a light green button-down shirt. His gaze is directed towards the woman, who is smiling warmly. She, with wavy dark hair, is dressed in a black floral dress and stands to the right, her eyes closed in enjoyment. Between them is a professional microphone, capturing their harmonious voices. The background features wooden panels and various audio equipment, creating an intimate and focused atmosphere. The lighting is soft and warm, highlighting their expressions and the intimate setting. A medium shot captures their interaction closely.", "examples/multi/3/multi3.png", "examples/multi/3/1-man.WAV", "examples/multi/3/1-woman.WAV", 12],
292
  ],
293
- fn=infer,
294
  inputs = [prompt_input, image_input, audio_input_spk1, audio_input_spk2, sample_steps],
295
  outputs=output_video,
296
  cache_examples = True,
@@ -303,4 +312,4 @@ with gr.Blocks(title="MultiTalk Inference") as demo:
303
  outputs=output_video
304
  )
305
 
306
- demo.launch(ssr_mode=False, show_error=True, show_api=False)
 
229
  except Exception as e:
230
  print(f"[WARNING] Could not remove {f}: {e}")
231
 
232
+ def load_prerendered_examples(prompt, cond_image_path, cond_audio_path_spk1, cond_audio_path_spk2, sample_steps):
233
+ output_video = None
234
+
235
+ if cond_image_path == "examples/single/single1.png":
236
+ output_video = "examples/results/multitalk_single_example_1.mp4"
237
+ elif cond_image_path == "examples/multi/3/multi3.png":
238
+ output_video = "examples/results/multitalk_multi_example_2.mp4"
239
+
240
+ return output_video
241
 
242
  with gr.Blocks(title="MultiTalk Inference") as demo:
243
  gr.Markdown("## 🎤 Meigen MultiTalk Inference Demo")
 
299
  ["A woman sings passionately in a dimly lit studio.", "examples/single/single1.png", "examples/single/1.wav", None, 12],
300
  ["In a cozy recording studio, a man and a woman are singing together. The man, with tousled brown hair, stands to the left, wearing a light green button-down shirt. His gaze is directed towards the woman, who is smiling warmly. She, with wavy dark hair, is dressed in a black floral dress and stands to the right, her eyes closed in enjoyment. Between them is a professional microphone, capturing their harmonious voices. The background features wooden panels and various audio equipment, creating an intimate and focused atmosphere. The lighting is soft and warm, highlighting their expressions and the intimate setting. A medium shot captures their interaction closely.", "examples/multi/3/multi3.png", "examples/multi/3/1-man.WAV", "examples/multi/3/1-woman.WAV", 12],
301
  ],
302
+ fn=load_prerendered_examples,
303
  inputs = [prompt_input, image_input, audio_input_spk1, audio_input_spk2, sample_steps],
304
  outputs=output_video,
305
  cache_examples = True,
 
312
  outputs=output_video
313
  )
314
 
315
+ demo.queue(max_size=2).launch(ssr_mode=False, show_error=True, show_api=False)