File size: 1,618 Bytes
ed9aac5
b6907f5
ed9aac5
f42dcac
ed9aac5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import gradio as gr
#import librosa
import torch

from transformers import WhisperProcessor, WhisperForConditionalGeneration

processor = WhisperProcessor.from_pretrained("openai/whisper-large")
model = SpeechT5ForSpeechToText.from_pretrained("openai/whisper-large")

model.config.forced_decoder_ids = WhisperProcessor.get_decoder_prompt_ids(language="english", task="transcribe")


def predict(audio, mic_audio=None):
    # audio = tuple (sample_rate, frames) or (sample_rate, (frames, channels))
    if mic_audio is not None:
        sampling_rate, waveform = mic_audio
    elif audio is not None:
        sampling_rate, waveform = audio
    else:
        return "(please provide audio)"

    waveform = process_audio(sampling_rate, waveform)
    input_features = processor(waveform, sampling_rate=16000, return_tensors="pt").input_features
    predicted_ids = model.generate(input_features, max_length=400)
    transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
    return transcription[0]


title = "Demo for Whisper -> Something -> XLS-R"

description = """
<b>How to use:</b> Upload an audio file or record using the microphone. The audio is converted to mono and resampled to 16 kHz before
being passed into the model. The output is the text transcription of the audio.
"""

gr.Interface(
    fn=predict,
    inputs=[
        gr.Audio(label="Upload Speech", source="upload", type="numpy"),
        gr.Audio(label="Record Speech", source="microphone", type="numpy"),
    ],
    outputs=[
        gr.Text(label="Transcription"),
    ],
    title=title,
    article=article,
).launch()