import gradio as gr from transformers import BartForConditionalGeneration, BartTokenizer from youtube_transcript_api import YouTubeTranscriptApi # Load BART model and tokenizer model_name = 'facebook/bart-large-cnn' tokenizer = BartTokenizer.from_pretrained(model_name) model = BartForConditionalGeneration.from_pretrained(model_name) def get_transcript(url): try: video_id = url.split('=')[1] transcript_list = YouTubeTranscriptApi.get_transcript(video_id) transcript_text = "" for item in transcript_list: transcript_text += item['text'] + "\n" return transcript_text except Exception as e: return "Error fetching transcript: " + str(e) def summarize_transcript(transcript): input_ids = tokenizer.encode("summarize: " + transcript, return_tensors="pt", max_length=1024, truncation=True) summary_ids = model.generate(input_ids, num_beams=4, min_length=30, max_length=200, early_stopping=True) summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) return summary def summarize_video_transcript(url): transcript = get_transcript(url) if not transcript or transcript.startswith("Error"): return transcript summary = summarize_transcript(transcript) return summary iface = gr.Interface(fn=summarize_video_transcript, inputs="text", outputs="text", title="YouTube Video Transcription Summarizer", description="Enter YouTube Video URL to get the transcript summary") if __name__ == "__main__": iface.launch()