mgbam commited on
Commit
21479b0
·
verified ·
1 Parent(s): a5c1c1b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -27
app.py CHANGED
@@ -1,51 +1,102 @@
1
  import streamlit as st
2
  import os
3
- from pipelines.video_process import extract_audio_ffmpeg, apply_edits
4
- from pipelines.ai_inference import transcribe_audio, generate_edit_instructions
5
- # from pipelines.auth_utils import check_auth_status # If using custom
6
- import openai
7
 
8
  def main():
9
- st.title("Smart Edit Assistant 🎬")
10
- # Check if user is logged in if using custom auth
11
- # user_info = check_auth_status()
12
- # if not user_info:
13
- # st.stop()
14
-
15
- openai_api_key = os.getenv("OPENAI_API_KEY", "")
16
- if openai_api_key:
17
- openai.api_key = openai_api_key
18
- else:
19
- st.warning("No OpenAI API key found in environment.")
20
 
21
  uploaded_file = st.file_uploader("Upload your video", type=["mp4", "mov", "mkv"])
22
- if uploaded_file is not None:
 
23
  with open("temp_input.mp4", "wb") as f:
24
  f.write(uploaded_file.getbuffer())
25
 
26
  st.video("temp_input.mp4")
27
-
28
  if st.button("Process Video"):
 
29
  with st.spinner("Extracting audio..."):
30
  audio_path = extract_audio_ffmpeg("temp_input.mp4", "temp_audio.wav")
31
 
 
32
  with st.spinner("Transcribing..."):
33
- transcript_text = transcribe_audio(audio_path) # calls either local or API-based Whisper
34
-
35
  st.text_area("Transcript", transcript_text, height=200)
36
 
37
- with st.spinner("Generating edit instructions with GPT..."):
 
38
  edit_instructions = generate_edit_instructions(transcript_text)
 
39
 
40
- st.write(edit_instructions)
41
-
42
  with st.spinner("Applying edits..."):
43
  edited_video_path = apply_edits("temp_input.mp4", edit_instructions)
44
-
45
- st.success("Done!")
46
- st.video(edited_video_path)
47
- with open(edited_video_path, "rb") as f_out:
48
- st.download_button("Download Edited Video", data=f_out, file_name="edited_result.mp4", mime="video/mp4")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
  if __name__ == "__main__":
51
  main()
 
1
  import streamlit as st
2
  import os
3
+ import subprocess
 
 
 
4
 
5
  def main():
6
+ st.title("Smart Edit Assistant")
 
 
 
 
 
 
 
 
 
 
7
 
8
  uploaded_file = st.file_uploader("Upload your video", type=["mp4", "mov", "mkv"])
9
+
10
+ if uploaded_file:
11
  with open("temp_input.mp4", "wb") as f:
12
  f.write(uploaded_file.getbuffer())
13
 
14
  st.video("temp_input.mp4")
15
+
16
  if st.button("Process Video"):
17
+ # 1. Extract audio using FFmpeg (example)
18
  with st.spinner("Extracting audio..."):
19
  audio_path = extract_audio_ffmpeg("temp_input.mp4", "temp_audio.wav")
20
 
21
+ # 2. Transcribe audio (placeholder function; use openai-whisper or local)
22
  with st.spinner("Transcribing..."):
23
+ transcript_text = transcribe_audio(audio_path)
 
24
  st.text_area("Transcript", transcript_text, height=200)
25
 
26
+ # 3. Generate instructions (placeholder function; calls GPT or open-source LLM)
27
+ with st.spinner("Generating edit instructions..."):
28
  edit_instructions = generate_edit_instructions(transcript_text)
29
+ st.write("AI Edit Instructions:", edit_instructions)
30
 
31
+ # 4. Apply Edits with FFmpeg
 
32
  with st.spinner("Applying edits..."):
33
  edited_video_path = apply_edits("temp_input.mp4", edit_instructions)
34
+
35
+ # 5. Verify output file
36
+ abs_edited_path = os.path.join(os.getcwd(), edited_video_path)
37
+ if not os.path.exists(abs_edited_path):
38
+ st.error(f"Edited video file not found at '{abs_edited_path}'. Check logs.")
39
+ return
40
+
41
+ file_size = os.path.getsize(abs_edited_path)
42
+ if file_size == 0:
43
+ st.error(f"Edited video file is empty (0 bytes). Check ffmpeg or editing logic.")
44
+ return
45
+
46
+ st.success("Edit complete! Now previewing the edited video.")
47
+ st.video(abs_edited_path) # pass the absolute path
48
+
49
+ with open(abs_edited_path, "rb") as f_out:
50
+ st.download_button(
51
+ label="Download Edited Video",
52
+ data=f_out,
53
+ file_name="edited_result.mp4",
54
+ mime="video/mp4",
55
+ )
56
+
57
+ def extract_audio_ffmpeg(input_video, output_audio):
58
+ """
59
+ Calls ffmpeg to extract audio. Returns path if successful; raises if not.
60
+ """
61
+ cmd = [
62
+ "ffmpeg", "-y",
63
+ "-i", input_video,
64
+ "-vn", # no video
65
+ "-acodec", "pcm_s16le",
66
+ "-ar", "16000",
67
+ "-ac", "1",
68
+ output_audio
69
+ ]
70
+ result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
71
+ if result.returncode != 0:
72
+ raise RuntimeError(f"ffmpeg error: {result.stderr.decode()}")
73
+ return output_audio
74
+
75
+ def transcribe_audio(audio_path):
76
+ """
77
+ Placeholder for your transcription logic (whisper / openai-whisper).
78
+ """
79
+ return "This is a mock transcript."
80
+
81
+ def generate_edit_instructions(transcript_text):
82
+ """
83
+ Placeholder for GPT/LLM-based instructions.
84
+ Return string or structured instructions (like JSON).
85
+ """
86
+ return "Keep everything; no major edits."
87
+
88
+ def apply_edits(input_video, edit_instructions):
89
+ """
90
+ Demo function: We'll just copy input to output to show a valid flow.
91
+ In practice, you'd parse instructions & cut the video with ffmpeg or moviepy.
92
+ """
93
+ output_video = "edited_video.mp4"
94
+ # For demonstration, let's do a direct copy with ffmpeg:
95
+ cmd = ["ffmpeg", "-y", "-i", input_video, "-c", "copy", output_video]
96
+ result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
97
+ if result.returncode != 0:
98
+ raise RuntimeError(f"ffmpeg editing error: {result.stderr.decode()}")
99
+ return output_video
100
 
101
  if __name__ == "__main__":
102
  main()