File size: 1,509 Bytes
a009c84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import gradio as gr
import pyaudioconvert as pac
from pydub import AudioSegment

import nemo
import nemo.collections.asr as nemo_asr

# Load the pre-trained model
model = nemo_asr.models.EncDecCTCModelBPE.restore_from(
    restore_path="/home/yonas/stt/demo/model/Kinyarwanda_nemo_stt_conformer_model.nemo"
)

assert isinstance(model, nemo.collections.asr.models.EncDecCTCModel)


def convert(file_name):
    if file_name.endswith(("mp3", "wav", "ogg")):
        if file_name.endswith("mp3"):
            sound = AudioSegment.from_mp3(file_name)
            sound.export(file_name, format="wav")
        elif file_name.endswith("ogg"):
            sound = AudioSegment.from_ogg(file_name)
            sound.export(file_name, format="wav")
    else:
        return False
    pac.convert_wav_to_16bit_mono(file_name, file_name)
    return True


def transcribe(audio):
    if not audio:
        return "No audio provided"
    
    if not convert(audio):
        return "The format must be mp3, wav, or ogg"

    result = model.transcribe([audio])

    return result[0]


gradio_ui = gr.Interface(
    fn=transcribe,
    title="Kinyarwanda Speech Recognition",
    description="Upload an audio clip or record from browser using microphone.",
    inputs=[
        gr.Audio(label="Upload Audio File or Record from microphone", sources=["upload", "microphone"], type="filepath", format="wav"),
    ],
    outputs=gr.Text(label="Recognized speech")
)

# Launch the Gradio app
gradio_ui.launch(share=True, debug=True)