Spaces:
Sleeping
Sleeping
File size: 1,945 Bytes
cffc4f1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import streamlit as st
import os
from pipelines.video_process import extract_audio_ffmpeg, apply_edits
from pipelines.ai_inference import transcribe_audio, generate_edit_instructions
# from pipelines.auth_utils import check_auth_status # If using custom
import openai
def main():
st.title("Smart Edit Assistant 🎬")
# Check if user is logged in if using custom auth
# user_info = check_auth_status()
# if not user_info:
# st.stop()
openai_api_key = os.getenv("OPENAI_API_KEY", "")
if openai_api_key:
openai.api_key = openai_api_key
else:
st.warning("No OpenAI API key found in environment.")
uploaded_file = st.file_uploader("Upload your video", type=["mp4", "mov", "mkv"])
if uploaded_file is not None:
with open("temp_input.mp4", "wb") as f:
f.write(uploaded_file.getbuffer())
st.video("temp_input.mp4")
if st.button("Process Video"):
with st.spinner("Extracting audio..."):
audio_path = extract_audio_ffmpeg("temp_input.mp4", "temp_audio.wav")
with st.spinner("Transcribing..."):
transcript_text = transcribe_audio(audio_path) # calls either local or API-based Whisper
st.text_area("Transcript", transcript_text, height=200)
with st.spinner("Generating edit instructions with GPT..."):
edit_instructions = generate_edit_instructions(transcript_text)
st.write(edit_instructions)
with st.spinner("Applying edits..."):
edited_video_path = apply_edits("temp_input.mp4", edit_instructions)
st.success("Done!")
st.video(edited_video_path)
with open(edited_video_path, "rb") as f_out:
st.download_button("Download Edited Video", data=f_out, file_name="edited_result.mp4", mime="video/mp4")
if __name__ == "__main__":
main()
|