import streamlit as st import os import sys import torch import numpy print(numpy.__version__) import librosa print(librosa.__version__) import numpy print(numpy.__version__) path_to_add = os.path.join(os.path.dirname(__file__), "Wav2Lip") if path_to_add not in sys.path: sys.path.insert(0, path_to_add) from avatar import Avatar if 'is_initialized' not in st.session_state: st.session_state.avatar = Avatar() st.session_state.avatar.export_video = False st.session_state.avatar.load_model("checkpoint/wav2lip_gan.pth") print("load model finished") st.session_state.avatar.device = 'cuda' if torch.cuda.is_available() else 'cpu' print(st.session_state.avatar.device) st.session_state.avatar.output_audio_path = "audio/" st.session_state.avatar.output_audio_filename = "result.wav" st.session_state.avatar.temp_lip_video_no_voice_path = "temp/" st.session_state.avatar.temp_lip_video_no_voice_filename = "result.avi" st.session_state.avatar.output_video_path = "results/" st.session_state.avatar.output_video_name = "result_voice.mp4" st.session_state.avatar.ref_video_path_and_filename = "ref_videos/Liv.mp4" st.session_state.avatar.face_det_results_path_and_name = 'ref_videos/Liv_face_det_result.pkl' st.session_state.avatar.get_video_full_frames(st.session_state.avatar.ref_video_path_and_filename) st.session_state.avatar.face_detect_batch_size = 16 # avatar.create_face_detection_results(avatar.video_full_frames,True) print("load face detection result") st.session_state.avatar.load_face_detection_results() input_text = "Hi How are you?" st.session_state.avatar.text_to_lip_video(input_text) print("load face detection result done") st.session_state['is_initialized'] = True from avatar import Avatar # Create a text input box and store the input in a variable user_input = st.text_input("Enter your text:") if user_input: st.session_state.avatar.dir_clean_up() # Display the entered text st.write("You entered:", user_input) st.session_state.avatar.export_video=True st.session_state.avatar.text_to_lip_video(user_input) st.video(st.session_state.avatar.output_video_path +st.session_state.avatar.output_video_name)