pratikshahp commited on
Commit
7f90cea
·
verified ·
1 Parent(s): 7e0e287

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -49
app.py CHANGED
@@ -1,59 +1,27 @@
1
- import gradio as gr
2
- import subprocess, os
3
 
4
- # Ensure checkpoint is in the repo
5
- assert os.path.exists("Wav2Lip/checkpoints/wav2lip.pth"), "Checkpoint not found!"
 
6
 
7
- def generate(script: str, mic_path: str, video_path: str):
8
- # Validate inputs
9
- if not (script and mic_path and video_path):
10
- return None, "⚠️ Please provide script, audio, and video."
11
-
12
- # Save the script (optional—useful for logs or TTS extension)
13
- with open("Wav2Lip/script.txt", "w") as f:
14
- f.write(script)
15
 
 
 
 
16
  os.makedirs("Wav2Lip/results", exist_ok=True)
17
- output_path = "Wav2Lip/results/output.mp4"
18
-
19
- # Run Wav2Lip
20
  subprocess.call([
21
  "python", "Wav2Lip/inference.py",
22
- "--checkpoint_path", "Wav2Lip/checkpoints/wav2lip.pth",
23
  "--face", video_path,
24
  "--audio", mic_path,
25
- "--outfile", output_path
26
  ])
 
 
 
27
 
28
- # Return path to video and success status
29
- return output_path, "✅ Your video is ready!"
30
-
31
- # Build Gradio UI
32
- with gr.Blocks() as demo:
33
- gr.Markdown("# 🎬 AI Lip‑Sync Video Generator (Wav2Lip)")
34
-
35
- script_input = gr.Textbox(
36
- lines=3, placeholder="Paste the script here...", label="1) Script Input"
37
- )
38
- mic = gr.Audio(
39
- sources=["microphone"], type="filepath", label="2) Record Audio"
40
- )
41
- webcam = gr.Video(
42
- sources=["webcam", "upload"], format="mp4", label="3) Record / Upload Video",
43
- autoplay=False
44
- )
45
-
46
- btn = gr.Button("4) Generate Video")
47
- output_video = gr.Video(
48
- label="Generated Video", format="mp4", autoplay=True, show_download_button=True
49
- )
50
- status = gr.Textbox(label="Status", interactive=False)
51
-
52
- btn.click(
53
- fn=generate,
54
- inputs=[script_input, mic, webcam],
55
- outputs=[output_video, status]
56
- )
57
-
58
- # Launch app
59
- demo.launch()
 
1
+ from huggingface_hub import hf_hub_download
2
+ import gradio as gr, subprocess, os
3
 
4
+ # Replace these with your repo details
5
+ REPO_ID = "pratikshahp/AI-Powered-Video-Generator"
6
+ CHECKPOINT = "Wav2Lip/checkpoints/wav2lip.pth"
7
 
8
+ # Download checkpoint once at startup
9
+ hf_hub_download(repo_id=REPO_ID, filename=CHECKPOINT, repo_type="space")
 
 
 
 
 
 
10
 
11
+ def generate(script, mic_path, video_path):
12
+ if not (script and mic_path and video_path):
13
+ return None, "⚠️ Provide script, audio, and video."
14
  os.makedirs("Wav2Lip/results", exist_ok=True)
15
+ output = "Wav2Lip/results/output.mp4"
 
 
16
  subprocess.call([
17
  "python", "Wav2Lip/inference.py",
18
+ "--checkpoint_path", CHECKPOINT,
19
  "--face", video_path,
20
  "--audio", mic_path,
21
+ "--outfile", output
22
  ])
23
+ # Download via Hub to ensure actual file is accessible
24
+ local_video = hf_hub_download(repo_id=REPO_ID, filename=output, repo_type="space")
25
+ return local_video, "✅ Done!"
26
 
27
+ # Gradio UI setup...