Spaces:
Sleeping
Sleeping
Gradio ASR - first commit
Browse files- app.py +12 -0
- audio_processing.py +135 -0
- config.py +8 -0
- model_utils.py +39 -0
app.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from model_utils import load_models
|
3 |
+
from audio_processing import iface
|
4 |
+
|
5 |
+
# Clear GPU cache and load models at startup
|
6 |
+
torch.cuda.empty_cache()
|
7 |
+
load_models()
|
8 |
+
|
9 |
+
if __name__ == "__main__":
|
10 |
+
iface.launch()
|
11 |
+
|
12 |
+
|
audio_processing.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import whisper
|
3 |
+
import numpy as np
|
4 |
+
import torchaudio as ta
|
5 |
+
import gradio as gr
|
6 |
+
from model_utils import get_processor, get_model, get_whisper_model_small, get_device
|
7 |
+
from config import SAMPLING_RATE, CHUNK_LENGTH_S
|
8 |
+
import subprocess
|
9 |
+
|
10 |
+
import subprocess
|
11 |
+
import torchaudio as ta
|
12 |
+
|
13 |
+
def resample_with_ffmpeg(input_file, output_file, target_sr=16000):
|
14 |
+
command = [
|
15 |
+
'ffmpeg', '-i', input_file, '-ar', str(target_sr), output_file
|
16 |
+
]
|
17 |
+
subprocess.run(command, check=True)
|
18 |
+
|
19 |
+
def detect_language(audio):
|
20 |
+
whisper_model = get_whisper_model_small()
|
21 |
+
|
22 |
+
# Save the input audio to a temporary file
|
23 |
+
ta.save("input_audio.wav", torch.tensor(audio[1]).unsqueeze(0), audio[0])
|
24 |
+
|
25 |
+
# Resample if necessary using ffmpeg
|
26 |
+
if audio[0] != SAMPLING_RATE:
|
27 |
+
resample_with_ffmpeg("input_audio.wav", "resampled_audio.wav", target_sr=SAMPLING_RATE)
|
28 |
+
audio_tensor, _ = ta.load("resampled_audio.wav")
|
29 |
+
else:
|
30 |
+
audio_tensor = torch.tensor(audio[1]).float()
|
31 |
+
|
32 |
+
# Ensure the audio is in the correct shape (mono)
|
33 |
+
if audio_tensor.dim() == 2:
|
34 |
+
audio_tensor = audio_tensor.mean(dim=0)
|
35 |
+
|
36 |
+
# Use Whisper's preprocessing
|
37 |
+
audio_tensor = whisper.pad_or_trim(audio_tensor)
|
38 |
+
print(f"Audio length after pad/trim: {audio_tensor.shape[-1] / SAMPLING_RATE} seconds")
|
39 |
+
mel = whisper.log_mel_spectrogram(audio_tensor).to(whisper_model.device)
|
40 |
+
|
41 |
+
# Detect language
|
42 |
+
_, probs = whisper_model.detect_language(mel)
|
43 |
+
detected_lang = max(probs, key=probs.get)
|
44 |
+
|
45 |
+
print(f"Audio shape: {audio_tensor.shape}")
|
46 |
+
print(f"Mel spectrogram shape: {mel.shape}")
|
47 |
+
print(f"Detected language: {detected_lang}")
|
48 |
+
print("Language probabilities:", probs)
|
49 |
+
|
50 |
+
return detected_lang
|
51 |
+
|
52 |
+
|
53 |
+
def process_long_audio(audio, task="transcribe", language=None):
|
54 |
+
if audio[0] != SAMPLING_RATE:
|
55 |
+
# Save the input audio to a file for ffmpeg processing
|
56 |
+
ta.save("input_audio_1.wav", torch.tensor(audio[1]).unsqueeze(0), audio[0])
|
57 |
+
|
58 |
+
# Resample using ffmpeg
|
59 |
+
try:
|
60 |
+
resample_with_ffmpeg("input_audio_1.wav", "resampled_audio_2.wav", target_sr=SAMPLING_RATE)
|
61 |
+
except subprocess.CalledProcessError as e:
|
62 |
+
print(f"ffmpeg failed: {e.stderr}")
|
63 |
+
raise e
|
64 |
+
|
65 |
+
waveform, _ = ta.load("resampled_audio_2.wav")
|
66 |
+
else:
|
67 |
+
waveform = torch.tensor(audio[1]).float()
|
68 |
+
|
69 |
+
# Ensure the audio is in the correct shape (mono)
|
70 |
+
if waveform.dim() == 2:
|
71 |
+
waveform = waveform.mean(dim=0)
|
72 |
+
|
73 |
+
print(f"Waveform shape after processing: {waveform.shape}")
|
74 |
+
|
75 |
+
if waveform.numel() == 0:
|
76 |
+
raise ValueError("Waveform is empty. Please check the input audio file.")
|
77 |
+
|
78 |
+
input_length = waveform.shape[0] # Since waveform is 1D, access the length with shape[0]
|
79 |
+
chunk_length = int(CHUNK_LENGTH_S * SAMPLING_RATE)
|
80 |
+
|
81 |
+
# Corrected slicing for 1D tensor
|
82 |
+
chunks = [waveform[i:i + chunk_length] for i in range(0, input_length, chunk_length)]
|
83 |
+
|
84 |
+
# Initialize the processor
|
85 |
+
processor = get_processor()
|
86 |
+
model = get_model()
|
87 |
+
device = get_device()
|
88 |
+
|
89 |
+
results = []
|
90 |
+
for chunk in chunks:
|
91 |
+
input_features = processor(chunk, sampling_rate=SAMPLING_RATE, return_tensors="pt").input_features.to(device)
|
92 |
+
|
93 |
+
with torch.no_grad():
|
94 |
+
if task == "translate":
|
95 |
+
forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task="translate")
|
96 |
+
generated_ids = model.generate(input_features, forced_decoder_ids=forced_decoder_ids)
|
97 |
+
else:
|
98 |
+
generated_ids = model.generate(input_features)
|
99 |
+
|
100 |
+
transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
101 |
+
results.extend(transcription)
|
102 |
+
|
103 |
+
# Clear GPU cache
|
104 |
+
torch.cuda.empty_cache()
|
105 |
+
|
106 |
+
return " ".join(results)
|
107 |
+
|
108 |
+
|
109 |
+
def process_audio(audio):
|
110 |
+
if audio is None:
|
111 |
+
return "No file uploaded", "", ""
|
112 |
+
|
113 |
+
detected_lang = detect_language(audio)
|
114 |
+
transcription = process_long_audio(audio, task="transcribe")
|
115 |
+
translation = process_long_audio(audio, task="translate", language=detected_lang)
|
116 |
+
|
117 |
+
return detected_lang, transcription, translation
|
118 |
+
|
119 |
+
# Gradio interface
|
120 |
+
iface = gr.Interface(
|
121 |
+
fn=process_audio,
|
122 |
+
inputs=gr.Audio(),
|
123 |
+
outputs=[
|
124 |
+
gr.Textbox(label="Detected Language"),
|
125 |
+
gr.Textbox(label="Transcription", lines=5),
|
126 |
+
gr.Textbox(label="Translation", lines=5)
|
127 |
+
],
|
128 |
+
title="Audio Transcription and Translation",
|
129 |
+
description="Upload an audio file to detect its language, transcribe, and translate it.",
|
130 |
+
allow_flagging="never",
|
131 |
+
css=".output-textbox { font-family: 'Noto Sans Devanagari', sans-serif; font-size: 18px; }"
|
132 |
+
)
|
133 |
+
|
134 |
+
if __name__ == "__main__":
|
135 |
+
iface.launch()
|
config.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Audio processing constants
|
2 |
+
SAMPLING_RATE = 16000
|
3 |
+
CHUNK_LENGTH_S = 20 # 20 seconds per chunk
|
4 |
+
|
5 |
+
# Model constants
|
6 |
+
WHISPER_MODEL_SIZE = "small"
|
7 |
+
|
8 |
+
# Other constants can be added here as needed
|
model_utils.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
3 |
+
import whisper
|
4 |
+
from config import WHISPER_MODEL_SIZE
|
5 |
+
|
6 |
+
# Global variables to store models
|
7 |
+
whisper_processor = None
|
8 |
+
whisper_model = None
|
9 |
+
whisper_model_small = None
|
10 |
+
|
11 |
+
def load_models():
|
12 |
+
global whisper_processor, whisper_model, whisper_model_small
|
13 |
+
if whisper_processor is None:
|
14 |
+
whisper_processor = WhisperProcessor.from_pretrained(f"openai/whisper-{WHISPER_MODEL_SIZE}")
|
15 |
+
if whisper_model is None:
|
16 |
+
whisper_model = WhisperForConditionalGeneration.from_pretrained(f"openai/whisper-{WHISPER_MODEL_SIZE}").to(get_device())
|
17 |
+
if whisper_model_small is None:
|
18 |
+
whisper_model_small = whisper.load_model(WHISPER_MODEL_SIZE)
|
19 |
+
|
20 |
+
def get_device():
|
21 |
+
return "cuda:0" if torch.cuda.is_available() else "cpu"
|
22 |
+
|
23 |
+
def get_processor():
|
24 |
+
global whisper_processor
|
25 |
+
if whisper_processor is None:
|
26 |
+
load_models()
|
27 |
+
return whisper_processor
|
28 |
+
|
29 |
+
def get_model():
|
30 |
+
global whisper_model
|
31 |
+
if whisper_model is None:
|
32 |
+
load_models()
|
33 |
+
return whisper_model
|
34 |
+
|
35 |
+
def get_whisper_model_small():
|
36 |
+
global whisper_model_small
|
37 |
+
if whisper_model_small is None:
|
38 |
+
load_models()
|
39 |
+
return whisper_model_small
|