Shahadbal commited on
Commit
0cc1853
1 Parent(s): 06ed0da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -42
app.py CHANGED
@@ -33,33 +33,6 @@ languages = {
33
  qa_pipeline = pipeline(task="question-answering", model="deepset/roberta-base-squad2")
34
 
35
 
36
- # Function to download audio from YouTube
37
- def download_audio_from_youtube(youtube_url, output_path="downloaded_audio.mp3"):
38
- ydl_opts = {
39
- 'format': 'bestaudio/best',
40
- 'outtmpl': 'temp_audio.%(ext)s',
41
- 'postprocessors': [{
42
- 'key': 'FFmpegExtractAudio',
43
- 'preferredcodec': 'mp3',
44
- 'preferredquality': '192',
45
- }],
46
- 'quiet': True,
47
- 'no_warnings': True,
48
- }
49
-
50
- try:
51
- command = [
52
- "yt-dlp",
53
- "-x", # extract audio only
54
- "--audio-format", "mp3", # specify mp3 format
55
- "-o", output_path, # specify output path
56
- youtube_url # YouTube URL
57
- ]
58
- subprocess.run(command, check=True, capture_output=True)
59
- return output_path
60
- except Exception as e:
61
- return f"Error downloading audio: {e}"
62
-
63
  # Function to extract audio from video
64
  def extract_audio_from_video(video_file, output_audio="extracted_audio.mp3"):
65
  try:
@@ -75,23 +48,18 @@ languageG = None
75
 
76
  def content_input_update(content_type):
77
  visibility_map = {
78
- "Audio Upload": (True, False, False),
79
- "Video Upload": (False, False, True),
80
- "YouTube Link": (False, True, False),
81
  }
82
- visible_audio, visible_youtube, visible_video = visibility_map.get(content_type, (False, False, False))
83
  return (
84
  gr.update(visible=visible_audio),
85
- gr.update(visible=visible_youtube),
86
  gr.update(visible=visible_video)
87
  )
88
 
89
  def transcribe_content(content_type, audio_path, youtube_link, video):
90
  if content_type == "Audio Upload" and audio_path:
91
  return whispermodel.transcribe(audio_path)["text"]
92
- elif content_type == "YouTube Link" and youtube_link:
93
- audio_file = download_audio_from_youtube(youtube_link)
94
- return whispermodel.transcribe(audio_file)["text"]
95
  elif content_type == "Video Upload" and video:
96
  audio_file = extract_audio_from_video(video.name)
97
  return whispermodel.transcribe(audio_file)["text"]
@@ -128,7 +96,7 @@ def create_audio_summary(summary, language):
128
  return audio_path
129
  return None
130
 
131
- def main(content_type, audio_path, youtube_link, video, language, summarize):
132
  global transcription, languageG
133
  languageG = language
134
 
@@ -159,26 +127,24 @@ with gr.Blocks() as demo:
159
  )
160
 
161
  content_type = gr.Radio(
162
- choices=["Audio Upload", "Video Upload", "YouTube Link"],
163
  label="Select Content Type",
164
  value="Audio Upload"
165
  )
166
 
167
  file_input = gr.Audio(label="Upload an Audio File", visible=True, type="filepath")
168
- youtube_input = gr.Textbox(label="Enter YouTube Link", visible=False, placeholder="https://www.youtube.com/watch?v=example")
169
  video_input = gr.File(label="Upload a Video", visible=False, type="filepath")
170
 
171
  language = gr.Radio(choices=["Arabic", "English"], label="Preferred Language", value="English")
172
  summarize = gr.Checkbox(label="Summarize the content?")
173
 
174
  examples = [
175
- ["Audio Upload", "audio-example.mp3", None, None, "English", True, True, 5],
176
- ["Video Upload", None, None, "video-example.mp4", "Arabic", True, False, 3],
177
- ["YouTube Link", None, "https://www.youtube.com/watch?v=J4RqCSD--Dg&ab_channel=LearnFree", None, "English", False, True, 2]
178
  ]
179
  gr.Examples(
180
  examples=examples,
181
- inputs=[content_type, file_input, youtube_input, video_input, language, summarize],
182
  label="Try These Examples"
183
  )
184
 
 
33
  qa_pipeline = pipeline(task="question-answering", model="deepset/roberta-base-squad2")
34
 
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  # Function to extract audio from video
37
  def extract_audio_from_video(video_file, output_audio="extracted_audio.mp3"):
38
  try:
 
48
 
49
  def content_input_update(content_type):
50
  visibility_map = {
51
+ "Audio Upload": (True, False),
52
+ "Video Upload": (False, True),
 
53
  }
54
+ visible_audio, visible_video = visibility_map.get(content_type, (False, False))
55
  return (
56
  gr.update(visible=visible_audio),
 
57
  gr.update(visible=visible_video)
58
  )
59
 
60
  def transcribe_content(content_type, audio_path, youtube_link, video):
61
  if content_type == "Audio Upload" and audio_path:
62
  return whispermodel.transcribe(audio_path)["text"]
 
 
 
63
  elif content_type == "Video Upload" and video:
64
  audio_file = extract_audio_from_video(video.name)
65
  return whispermodel.transcribe(audio_file)["text"]
 
96
  return audio_path
97
  return None
98
 
99
+ def main(content_type, audio_path, video, language, summarize):
100
  global transcription, languageG
101
  languageG = language
102
 
 
127
  )
128
 
129
  content_type = gr.Radio(
130
+ choices=["Audio Upload", "Video Upload"],
131
  label="Select Content Type",
132
  value="Audio Upload"
133
  )
134
 
135
  file_input = gr.Audio(label="Upload an Audio File", visible=True, type="filepath")
 
136
  video_input = gr.File(label="Upload a Video", visible=False, type="filepath")
137
 
138
  language = gr.Radio(choices=["Arabic", "English"], label="Preferred Language", value="English")
139
  summarize = gr.Checkbox(label="Summarize the content?")
140
 
141
  examples = [
142
+ ["Audio Upload", "audio-example.mp3", None, "English", True, True, 5],
143
+ ["Video Upload", None, "video-example.mp4", "Arabic", True, False, 3],
 
144
  ]
145
  gr.Examples(
146
  examples=examples,
147
+ inputs=[content_type, file_input, video_input, language, summarize],
148
  label="Try These Examples"
149
  )
150