File size: 2,248 Bytes
4d909dd
d35ea54
 
 
c79a608
6abe9d1
 
 
c79a608
6abe9d1
 
d35ea54
 
 
 
4d909dd
d35ea54
 
 
 
 
 
 
 
 
 
 
 
 
 
e17cc19
 
 
 
 
 
 
ae516d2
 
e17cc19
d35ea54
 
 
 
 
 
ae516d2
 
 
 
 
bb224dc
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import streamlit as st
import os
import sys
import torch

import numpy
print(numpy.__version__)
import librosa
print(librosa.__version__)
import numpy
print(numpy.__version__)
path_to_add = os.path.join(os.path.dirname(__file__), "Wav2Lip")
if path_to_add not in sys.path:
    sys.path.insert(0, path_to_add)
from avatar import Avatar


if 'is_initialized' not in st.session_state:
    st.session_state.avatar = Avatar()
    st.session_state.avatar.export_video = False
    st.session_state.avatar.load_model("checkpoint/wav2lip_gan.pth")
    print("load model finished")
    st.session_state.avatar.device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print(st.session_state.avatar.device)
    st.session_state.avatar.output_audio_path = "audio/"
    st.session_state.avatar.output_audio_filename = "result.wav"
    st.session_state.avatar.temp_lip_video_no_voice_path = "temp/"
    st.session_state.avatar.temp_lip_video_no_voice_filename = "result.avi"
    st.session_state.avatar.output_video_path = "results/"
    st.session_state.avatar.output_video_name = "result_voice.mp4"
    st.session_state.avatar.ref_video_path_and_filename = "ref_videos/Liv.mp4"
    st.session_state.avatar.face_det_results_path_and_name = 'ref_videos/Liv_face_det_result.pkl'
    st.session_state.avatar.get_video_full_frames(st.session_state.avatar.ref_video_path_and_filename)
    st.session_state.avatar.face_detect_batch_size = 16
    # avatar.create_face_detection_results(avatar.video_full_frames,True)
    print("load face detection result")
    st.session_state.avatar.load_face_detection_results()
    input_text = "Hi How are you?"
    st.session_state.avatar.text_to_lip_video(input_text)
    print("load face detection result done")
    st.session_state['is_initialized'] = True


from avatar import Avatar
# Create a text input box and store the input in a variable
user_input = st.text_input("Enter your text:")
if user_input:
    st.session_state.avatar.dir_clean_up()
    # Display the entered text
    st.write("You entered:", user_input)
    st.session_state.avatar.export_video=True
    st.session_state.avatar.text_to_lip_video(user_input)
    st.video(st.session_state.avatar.output_video_path +st.session_state.avatar.output_video_name)