Shahadbal commited on
Commit
761458e
1 Parent(s): 6dc4c21

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +230 -0
app.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import whisper
2
+ import torch
3
+ from transformers import pipeline
4
+ from transformers.utils import logging
5
+ from langdetect import detect
6
+ import gradio as gr
7
+ import os
8
+ from gtts import gTTS
9
+ from moviepy.editor import VideoFileClip
10
+ import yt_dlp
11
+
12
+ # Set logging verbosity
13
+ logging.set_verbosity_error()
14
+
15
+ # Load the pre-trained Whisper model
16
+ whispermodel = whisper.load_model("medium")
17
+
18
+ # Load the summarizer pipeline
19
+ summarizer = pipeline(task="summarization", model="facebook/bart-large-cnn", torch_dtype=torch.bfloat16)
20
+
21
+ # Load the translator pipeline
22
+ translator = pipeline(task="translation", model="facebook/nllb-200-distilled-600M")
23
+
24
+ # Define language mappings
25
+ languages = {
26
+ "English": "eng_Latn",
27
+ "Arabic": "arb_Arab",
28
+ }
29
+
30
+ # Load QA pipeline
31
+ qa_pipeline = pipeline(task="question-answering", model="deepset/roberta-base-squad2")
32
+
33
+ # Load question generator
34
+ from pipelines import pipeline
35
+ question_generator = pipeline("question-generation", model="valhalla/t5-small-qg-prepend", qg_format="prepend")
36
+
37
+ # Function to download audio from YouTube
38
+ def download_audio_from_youtube(youtube_url, output_path="downloaded_audio.mp3"):
39
+ ydl_opts = {
40
+ 'format': 'bestaudio/best',
41
+ 'outtmpl': 'temp_audio.%(ext)s',
42
+ 'postprocessors': [{
43
+ 'key': 'FFmpegExtractAudio',
44
+ 'preferredcodec': 'mp3',
45
+ 'preferredquality': '192',
46
+ }],
47
+ 'quiet': True,
48
+ 'no_warnings': True,
49
+ }
50
+
51
+ try:
52
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
53
+ ydl.download([youtube_url])
54
+ os.rename('temp_audio.mp3', output_path)
55
+ return output_path
56
+ except Exception as e:
57
+ return f"Error downloading audio: {e}"
58
+
59
+ # Function to extract audio from video
60
+ def extract_audio_from_video(video_file, output_audio="extracted_audio.mp3"):
61
+ try:
62
+ with VideoFileClip(video_file) as video_clip:
63
+ video_clip.audio.write_audiofile(output_audio)
64
+ return output_audio
65
+ except Exception as e:
66
+ return f"Error extracting audio: {e}"
67
+
68
+ # Define global variables
69
+ transcription = None
70
+ languageG = None
71
+
72
+ def content_input_update(content_type):
73
+ visibility_map = {
74
+ "Audio Upload": (True, False, False),
75
+ "Video Upload": (False, False, True),
76
+ "YouTube Link": (False, True, False),
77
+ }
78
+ visible_audio, visible_youtube, visible_video = visibility_map.get(content_type, (False, False, False))
79
+ return (
80
+ gr.update(visible=visible_audio),
81
+ gr.update(visible=visible_youtube),
82
+ gr.update(visible=visible_video)
83
+ )
84
+
85
+ def transcribe_content(content_type, audio_path, youtube_link, video):
86
+ if content_type == "Audio Upload" and audio_path:
87
+ return whispermodel.transcribe(audio_path)["text"]
88
+ elif content_type == "YouTube Link" and youtube_link:
89
+ audio_file = download_audio_from_youtube(youtube_link)
90
+ return whispermodel.transcribe(audio_file)["text"]
91
+ elif content_type == "Video Upload" and video:
92
+ audio_file = extract_audio_from_video(video.name)
93
+ return whispermodel.transcribe(audio_file)["text"]
94
+ return None
95
+
96
+ def generate_summary_and_qna(summarize, qna, number):
97
+ summary_text = None
98
+ extracted_data = None
99
+
100
+ if summarize:
101
+ summary = summarizer(transcription, min_length=10, max_length=150)
102
+ summary_text = summary[0]['summary_text']
103
+
104
+ if qna:
105
+ questions = question_generator(transcription)
106
+ extracted_data = [{'question': item['question'], 'answer': item['answer'].replace('<pad> ', '')} for item in questions]
107
+ extracted_data = extracted_data[:number] if len(extracted_data) > number else extracted_data
108
+
109
+ return summary_text, extracted_data
110
+
111
+ def translator_text(summary, data, language):
112
+ if language == 'English':
113
+ return summary, data
114
+
115
+ translated_summary = None
116
+ translated_data = []
117
+
118
+ if summary is not None:
119
+ translated_summary = translator(summary, src_lang=languages["English"], tgt_lang=languages[language])[0]['translation_text']
120
+ else:
121
+ translated_summary = "No summary requested."
122
+
123
+ if data is not None:
124
+ for item in data:
125
+ question = item.get('question', '')
126
+ answer = item.get('answer', '')
127
+
128
+ translated_question = translator(question, src_lang=languages["English"], tgt_lang=languages[language])[0]['translation_text'] if question else ''
129
+ translated_answer = translator(answer, src_lang=languages["English"], tgt_lang=languages[language])[0]['translation_text'] if answer else ''
130
+
131
+ translated_data.append({
132
+ 'question': translated_question,
133
+ 'answer': translated_answer
134
+ })
135
+ else:
136
+ translated_data = "No Q&A requested."
137
+
138
+ return translated_summary, translated_data
139
+
140
+ def create_audio_summary(summary, language):
141
+ if summary and summary != 'No summary requested.':
142
+ tts = gTTS(text=summary, lang='ar' if language == 'Arabic' else 'en')
143
+ audio_path = "output_audio.mp3"
144
+ tts.save(audio_path)
145
+ return audio_path
146
+ return None
147
+
148
+ def main(content_type, audio_path, youtube_link, video, language, summarize, qna, number):
149
+ global transcription, languageG
150
+ languageG = language
151
+
152
+ transcription = transcribe_content(content_type, audio_path, youtube_link, video)
153
+ if not transcription:
154
+ return "No transcription available.", "No Q&A requested.", None
155
+
156
+ input_language = detect(transcription)
157
+ input_language = 'Arabic' if input_language == 'ar' else 'English'
158
+ if input_language != 'English':
159
+ transcription = translator(transcription, src_lang=languages[input_language], tgt_lang=languages['English'])[0]['translation_text']
160
+
161
+ summary_text, generated_qna = generate_summary_and_qna(summarize, qna, number)
162
+ summary, qna = translator_text(summary_text, generated_qna, language)
163
+ audio_path = create_audio_summary(summary, language)
164
+
165
+ qna_output = (
166
+ "\n\n".join(
167
+ f"**Question:** {item['question']}\n**Answer:** {item['answer']}"
168
+ for item in qna
169
+ ) if qna else "No Q&A requested."
170
+ )
171
+
172
+ return summary, qna_output, audio_path
173
+
174
+ # Gradio interface
175
+ with gr.Blocks() as demo:
176
+ gr.Markdown(
177
+ """
178
+ # Student Helper App
179
+ This app assists students by allowing them to upload audio, video, or YouTube links for automatic transcription.
180
+ It can translate content, summarize it, and generate Q&A questions to help with studying.
181
+ The app is ideal for students who want to review lectures, study materials, or any educational content more efficiently.
182
+ """
183
+ )
184
+
185
+ content_type = gr.Radio(
186
+ choices=["Audio Upload", "Video Upload", "YouTube Link"],
187
+ label="Select Content Type",
188
+ value="Audio Upload"
189
+ )
190
+
191
+ file_input = gr.Audio(label="Upload an Audio File", visible=True, type="filepath")
192
+ youtube_input = gr.Textbox(label="Enter YouTube Link", visible=False, placeholder="https://www.youtube.com/watch?v=example")
193
+ video_input = gr.File(label="Upload a Video", visible=False, type="filepath")
194
+
195
+ language = gr.Radio(choices=["Arabic", "English"], label="Preferred Language", value="English")
196
+ summarize = gr.Checkbox(label="Summarize the content?")
197
+ qna = gr.Checkbox(label="Generate Q&A about the content?")
198
+ number = gr.Number(label="How many questions do you want at maximum?", value=5)
199
+
200
+ examples = [
201
+ ["Audio Upload", "audio-example.mp3", None, None, "English", True, True, 5],
202
+ ["Video Upload", None, None, "video-example.mp4", "Arabic", True, False, 3],
203
+ ["YouTube Link", None, "https://www.youtube.com/watch?v=J4RqCSD--Dg", None, "English", False, True, 2]
204
+ ]
205
+ gr.Examples(
206
+ examples=examples,
207
+ inputs=[content_type, file_input, youtube_input, video_input, language, summarize, qna, number],
208
+ label="Try These Examples"
209
+ )
210
+
211
+ with gr.Tab("Summary"):
212
+ summary_output = gr.Textbox(label="Summary", interactive=False)
213
+ audio_output = gr.Audio(label="Audio Summary")
214
+
215
+ with gr.Tab("Q&A"):
216
+ qna_output = gr.Markdown(label="Q&A Request")
217
+
218
+ with gr.Tab("Interactive Q&A"):
219
+ user_question = gr.Textbox(label="Ask a Question", placeholder="Enter your question here...")
220
+ qa_button = gr.Button("Get Answer")
221
+ qa_response = gr.Markdown(label="Answer")
222
+
223
+ qa_button.click(lambda question: interactive_qa(question), inputs=[user_question], outputs=qa_response)
224
+
225
+ content_type.change(content_input_update, inputs=[content_type], outputs=[file_input, youtube_input, video_input])
226
+ submit_btn = gr.Button("Submit")
227
+ submit_btn.click(main, inputs=[content_type, file_input, youtube_input, video_input, language, summarize, qna, number],
228
+ outputs=[summary_output, qna_output, audio_output])
229
+
230
+ demo.launch()