pritamdeka commited on
Commit
ec4c5f1
·
verified ·
1 Parent(s): ff627b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -7
app.py CHANGED
@@ -1,8 +1,6 @@
1
  import gradio as gr
2
  import whisper
3
  from transformers import pipeline
4
- import torch
5
- import numpy as np
6
  import librosa
7
 
8
  # Load Whisper model
@@ -20,15 +18,15 @@ def get_summarizer(model_name):
20
  return None
21
 
22
  # Function to transcribe audio file using Whisper
23
- def transcribe_audio(model_size, audio):
24
- if audio is None:
25
  return "No audio file provided."
26
 
27
  # Load the selected Whisper model
28
  model = whisper.load_model(model_size)
29
 
30
  # Load and convert audio using librosa
31
- audio_data, sample_rate = librosa.load(audio, sr=16000)
32
 
33
  # Transcribe the audio file
34
  result = model.transcribe(audio_data)
@@ -50,9 +48,9 @@ def summarize_text(transcription, model_name):
50
  return "Invalid summarization model selected."
51
 
52
  # Create a Gradio interface that combines transcription and summarization
53
- def combined_transcription_and_summarization(model_size, summarizer_model, audio):
54
  # Step 1: Transcribe the audio using Whisper
55
- transcription = transcribe_audio(model_size, audio)
56
 
57
  # Step 2: Summarize the transcribed text using the chosen summarizer model
58
  summary = summarize_text(transcription, summarizer_model)
 
1
  import gradio as gr
2
  import whisper
3
  from transformers import pipeline
 
 
4
  import librosa
5
 
6
  # Load Whisper model
 
18
  return None
19
 
20
  # Function to transcribe audio file using Whisper
21
+ def transcribe_audio(model_size, audio_path):
22
+ if audio_path is None:
23
  return "No audio file provided."
24
 
25
  # Load the selected Whisper model
26
  model = whisper.load_model(model_size)
27
 
28
  # Load and convert audio using librosa
29
+ audio_data, sample_rate = librosa.load(audio_path, sr=16000)
30
 
31
  # Transcribe the audio file
32
  result = model.transcribe(audio_data)
 
48
  return "Invalid summarization model selected."
49
 
50
  # Create a Gradio interface that combines transcription and summarization
51
+ def combined_transcription_and_summarization(model_size, summarizer_model, audio_path):
52
  # Step 1: Transcribe the audio using Whisper
53
+ transcription = transcribe_audio(model_size, audio_path)
54
 
55
  # Step 2: Summarize the transcribed text using the chosen summarizer model
56
  summary = summarize_text(transcription, summarizer_model)