pragnakalp commited on
Commit
c3bba51
·
1 Parent(s): 1958ca3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -4
app.py CHANGED
@@ -23,12 +23,33 @@ import ffmpeg
23
 
24
  block = gr.Blocks()
25
 
26
- def cal(gender,input_text):
27
- return gender+input_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  def one_shot(image,input_text,gender):
30
  if gender == 'Female' or gender == 'female':
31
- return cal(gender,input_text)
 
 
 
 
 
 
 
32
 
33
 
34
 
@@ -46,7 +67,7 @@ def run():
46
  # audio_in = gr.Audio(show_label=False, type='filepath')
47
  input_text=gr.Textbox(lines=3, value="Hello How are you?", label="Input Text")
48
  gender = gr.Radio(["Female","Male"],value="Female",label="Gender")
49
- video_out = gr.Textbox(label="output")
50
  # video_out = gr.Video(show_label=False)
51
  with gr.Row().style(equal_height=True):
52
  btn = gr.Button("Generate")
 
23
 
24
  block = gr.Blocks()
25
 
26
+ def calculate(image_in, audio_in):
27
+ waveform, sample_rate = torchaudio.load(audio_in)
28
+ waveform = torch.mean(waveform, dim=0, keepdim=True)
29
+ torchaudio.save("/content/audio.wav", waveform, sample_rate, encoding="PCM_S", bits_per_sample=16)
30
+ image = Image.open(image_in)
31
+ image = pad_image(image)
32
+ image.save("image.png")
33
+
34
+ pocketsphinx_run = subprocess.run(['pocketsphinx', '-phone_align', 'yes', 'single', '/content/audio.wav'], check=True, capture_output=True)
35
+ jq_run = subprocess.run(['jq', '[.w[]|{word: (.t | ascii_upcase | sub("<S>"; "sil") | sub("<SIL>"; "sil") | sub("\\\(2\\\)"; "") | sub("\\\(3\\\)"; "") | sub("\\\(4\\\)"; "") | sub("\\\[SPEECH\\\]"; "SIL") | sub("\\\[NOISE\\\]"; "SIL")), phones: [.w[]|{ph: .t | sub("\\\+SPN\\\+"; "SIL") | sub("\\\+NSN\\\+"; "SIL"), bg: (.b*100)|floor, ed: (.b*100+.d*100)|floor}]}]'], input=pocketsphinx_run.stdout, capture_output=True)
36
+ with open("test.json", "w") as f:
37
+ f.write(jq_run.stdout.decode('utf-8').strip())
38
+ # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
39
+ os.system(f"cd /content/one-shot-talking-face && python3 -B test_script.py --img_path /content/image.png --audio_path /content/audio.wav --phoneme_path /content/test.json --save_dir /content/train")
40
+ return "/content/train/image_audio.mp4"
41
+
42
 
43
  def one_shot(image,input_text,gender):
44
  if gender == 'Female' or gender == 'female':
45
+ tts = gTTS(input_text)
46
+ with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as f:
47
+ tts.write_to_fp(f)
48
+ f.seek(0)
49
+ sound = AudioSegment.from_file(f.name, format="mp3")
50
+ sound.export("/content/audio.wav", format="wav")
51
+ audio_in="/content/audio.wav"
52
+ return calculate(image_in,audio_in)
53
 
54
 
55
 
 
67
  # audio_in = gr.Audio(show_label=False, type='filepath')
68
  input_text=gr.Textbox(lines=3, value="Hello How are you?", label="Input Text")
69
  gender = gr.Radio(["Female","Male"],value="Female",label="Gender")
70
+ video_out = gr.Video(label="output")
71
  # video_out = gr.Video(show_label=False)
72
  with gr.Row().style(equal_height=True):
73
  btn = gr.Button("Generate")