Spaces:
Running
Running
import gradio as gr | |
import spaces | |
import torch | |
import subprocess | |
zero = torch.Tensor([0]).cuda() | |
print(zero.device) # <-- 'cpu' π€ | |
def greet(n): | |
print(zero.device) # <-- 'cuda:0' π€ | |
return f"Hello {zero + n} Tensor" | |
def run_infrence(input_video,input_audio): | |
audio = "sample_data/sir.mp3" | |
video = "sample_data/spark_input.mp4" | |
command = f'python3 inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face "{video}" --audio "{audio}"' | |
print("running ") | |
# Execute the command | |
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True) | |
# Get the output | |
output, error = process.communicate() | |
return output | |
def run(): | |
with gr.Blocks(css=".gradio-container {background-color: lightgray} #radio_div {background-color: #FFD8B4; font-size: 40px;}") as demo: | |
gr.Markdown("<h1 style='text-align: center;'>"+ "One Shot Talking Face from Text" + "</h1><br/><br/>") | |
with gr.Group(): | |
# with gr.Box(): | |
with gr.Row(): | |
# with gr.Row().style(equal_height=True): | |
input_video = gr.Video(label="Input Video") | |
input_audio = gr.Audio(label="Input Audio") | |
video_out = gr.Video(show_label=True,label="Output") | |
with gr.Row(): | |
btn = gr.Button("Generate") | |
btn.click(run_infrence,inputs=[input_video,input_audio], outputs=[video_out]) | |
demo.queue() | |
demo.launch(server_name="0.0.0.0", server_port=7860) | |
if __name__ == "__main__": | |
run() |