import streamlit as st from transformers import BartForConditionalGeneration, BartTokenizer from youtube_transcript_api import YouTubeTranscriptApi # Load BART model and tokenizer model_name = 'facebook/bart-large-cnn' tokenizer = BartTokenizer.from_pretrained(model_name) model = BartForConditionalGeneration.from_pretrained(model_name) @st.cache def get_transcript(url): try: video_id = url.split('=')[1] transcript_list = YouTubeTranscriptApi.get_transcript(video_id) transcript_text = "" for item in transcript_list: transcript_text += item['text'] + "\n" return transcript_text except Exception as e: return "Error fetching transcript: " + str(e) @st.cache def summarize_transcript(transcript): input_ids = tokenizer.encode("summarize: " + transcript, return_tensors="pt", max_length=1024, truncation=True) summary_ids = model.generate(input_ids, num_beams=4, min_length=30, max_length=200, early_stopping=True) summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) return summary def main(): st.title("YouTube Video Transcription Summarizer") video_url = st.text_input("Enter YouTube Video URL:") if st.button("Summarize Transcript"): transcript = get_transcript(video_url) if not transcript: st.error("Error fetching transcript.") else: summary = summarize_transcript(transcript) st.subheader("Summary:") st.write(summary) if __name__ == "__main__": main()