File size: 1,562 Bytes
2d11ae0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91b4905
2d11ae0
 
 
 
 
 
 
 
91b4905
 
 
 
 
 
 
 
 
 
 
 
 
2d11ae0
 
91b4905
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import streamlit as st
from transformers import BartForConditionalGeneration, BartTokenizer
from youtube_transcript_api import YouTubeTranscriptApi

# Load BART model and tokenizer
model_name = 'facebook/bart-large-cnn'
tokenizer = BartTokenizer.from_pretrained(model_name)
model = BartForConditionalGeneration.from_pretrained(model_name)

@st.cache
def get_transcript(url):
    try:
        video_id = url.split('=')[1]
        transcript_list = YouTubeTranscriptApi.get_transcript(video_id)
        transcript_text = ""
        for item in transcript_list:
            transcript_text += item['text'] + "\n"
        return transcript_text
    except Exception as e:
        return "Error fetching transcript: " + str(e)

@st.cache
def summarize_transcript(transcript):
    input_ids = tokenizer.encode("summarize: " + transcript, return_tensors="pt", max_length=1024, truncation=True)
    summary_ids = model.generate(input_ids, num_beams=4, min_length=30, max_length=200, early_stopping=True)
    summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
    return summary

def main():
    st.title("YouTube Video Transcription Summarizer")

    video_url = st.text_input("Enter YouTube Video URL:")
    
    if st.button("Summarize Transcript"):
        transcript = get_transcript(video_url)
        if not transcript:
            st.error("Error fetching transcript.")
        else:
            summary = summarize_transcript(transcript)
            st.subheader("Summary:")
            st.write(summary)

if __name__ == "__main__":
    main()