Study-Helper / app.py
Shahadbal's picture
Update app.py
3f56529 verified
raw
history blame
7.31 kB
import os
import subprocess
import sys
import whisper
import torch
from transformers import pipeline
from transformers.utils import logging
from langdetect import detect
import gradio as gr
from gtts import gTTS
from moviepy.editor import VideoFileClip
import yt_dlp
# Set logging verbosity
logging.set_verbosity_error()
# Load the pre-trained Whisper model
whispermodel = whisper.load_model("medium")
# Load the summarizer pipeline
summarizer = pipeline(task="summarization", model="facebook/bart-large-cnn", torch_dtype=torch.bfloat16)
# Load the translator pipeline
translator = pipeline(task="translation", model="facebook/nllb-200-distilled-600M")
# Define language mappings
languages = {
"English": "eng_Latn",
"Arabic": "arb_Arab",
}
# Load QA pipeline
qa_pipeline = pipeline(task="question-answering", model="deepset/roberta-base-squad2")
# Function to download audio from YouTube
def download_audio_from_youtube(youtube_url, output_path="downloaded_audio.mp3"):
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': 'temp_audio.%(ext)s',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'quiet': True,
'no_warnings': True,
}
try:
command = [
"yt-dlp",
"-x", # extract audio only
"--audio-format", "mp3", # specify mp3 format
"-o", output_path, # specify output path
youtube_url # YouTube URL
]
subprocess.run(command, check=True, capture_output=True)
return output_path
except Exception as e:
return f"Error downloading audio: {e}"
# Function to extract audio from video
def extract_audio_from_video(video_file, output_audio="extracted_audio.mp3"):
try:
with VideoFileClip(video_file) as video_clip:
video_clip.audio.write_audiofile(output_audio)
return output_audio
except Exception as e:
return f"Error extracting audio: {e}"
# Define global variables
transcription = None
languageG = None
def content_input_update(content_type):
visibility_map = {
"Audio Upload": (True, False, False),
"Video Upload": (False, False, True),
"YouTube Link": (False, True, False),
}
visible_audio, visible_youtube, visible_video = visibility_map.get(content_type, (False, False, False))
return (
gr.update(visible=visible_audio),
gr.update(visible=visible_youtube),
gr.update(visible=visible_video)
)
def transcribe_content(content_type, audio_path, youtube_link, video):
if content_type == "Audio Upload" and audio_path:
return whispermodel.transcribe(audio_path)["text"]
elif content_type == "YouTube Link" and youtube_link:
audio_file = download_audio_from_youtube(youtube_link)
return whispermodel.transcribe(audio_file)["text"]
elif content_type == "Video Upload" and video:
audio_file = extract_audio_from_video(video.name)
return whispermodel.transcribe(audio_file)["text"]
return None
def generate_summary(summarize):
summary_text = None
if summarize:
summary = summarizer(transcription, min_length=10, max_length=150)
summary_text = summary[0]['summary_text']
return summary_text
def translator_text(summary, language):
if language == 'English':
return summary
translated_summary = None
translated_data = []
if summary is not None:
translated_summary = translator(summary, src_lang=languages["English"], tgt_lang=languages[language])[0]['translation_text']
else:
translated_summary = "No summary requested."
return translated_summary
def create_audio_summary(summary, language):
if summary and summary != 'No summary requested.':
tts = gTTS(text=summary, lang='ar' if language == 'Arabic' else 'en')
audio_path = "output_audio.mp3"
tts.save(audio_path)
return audio_path
return None
def main(content_type, audio_path, youtube_link, video, language, summarize):
global transcription, languageG
languageG = language
transcription = transcribe_content(content_type, audio_path, youtube_link, video)
if not transcription:
return "No transcription available.", "No Q&A requested.", None
input_language = detect(transcription)
input_language = 'Arabic' if input_language == 'ar' else 'English'
if input_language != 'English':
transcription = translator(transcription, src_lang=languages[input_language], tgt_lang=languages['English'])[0]['translation_text']
summary_text = generate_summary(summarize)
summary = translator_text(summary_text, language)
audio_path = create_audio_summary(summary, language)
return summary, audio_path
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown(
"""
# Student Helper App
This app allows students to upload audio, video, or YouTube links for automatic transcription.
It can translate content, summarize it, and generate Q&A questions to help with studying.
The app is ideal for students who want to review lectures, study materials, or any educational content more efficiently.
"""
)
content_type = gr.Radio(
choices=["Audio Upload", "Video Upload", "YouTube Link"],
label="Select Content Type",
value="Audio Upload"
)
file_input = gr.Audio(label="Upload an Audio File", visible=True, type="filepath")
youtube_input = gr.Textbox(label="Enter YouTube Link", visible=False, placeholder="https://www.youtube.com/watch?v=example")
video_input = gr.File(label="Upload a Video", visible=False, type="filepath")
language = gr.Radio(choices=["Arabic", "English"], label="Preferred Language", value="English")
summarize = gr.Checkbox(label="Summarize the content?")
examples = [
["Audio Upload", "audio-example.mp3", None, None, "English", True, True, 5],
["Video Upload", None, None, "video-example.mp4", "Arabic", True, False, 3],
["YouTube Link", None, "https://www.youtube.com/watch?v=J4RqCSD--Dg&ab_channel=LearnFree", None, "English", False, True, 2]
]
gr.Examples(
examples=examples,
inputs=[content_type, file_input, youtube_input, video_input, language, summarize],
label="Try These Examples"
)
with gr.Tab("Summary"):
summary_output = gr.Textbox(label="Summary", interactive=False)
audio_output = gr.Audio(label="Audio Summary")
with gr.Tab("Interactive Q&A"):
user_question = gr.Textbox(label="Ask a Question", placeholder="Enter your question here...")
qa_button = gr.Button("Get Answer")
qa_response = gr.Markdown(label="Answer")
qa_button.click(lambda question: interactive_qa(question), inputs=[user_question], outputs=qa_response)
content_type.change(content_input_update, inputs=[content_type], outputs=[file_input, youtube_input, video_input])
submit_btn = gr.Button("Submit")
submit_btn.click(main, inputs=[content_type, file_input, youtube_input, video_input, language, summarize],
outputs=[summary_output, audio_output])
demo.launch(share=True)