import gradio as gr import os def build_interface(): """ Builds an enhanced Gradio interface for Bambara speech recognition. """ example_files = get_example_files() custom_css = """ .gr-button-primary { background-color: #2c5282 !important; color: white !important; border-radius: 8px !important; font-weight: bold !important; } .gr-button-secondary { background-color: #e2e8f0 !important; color: #2d3748 !important; border-radius: 8px !important; } .example-container { background-color: #f7fafc; padding: 16px; border-radius: 8px; margin-top: 16px; } .gr-textbox { border-radius: 8px !important; border: 1px solid #cbd5e0 !important; } .gr-audio { border-radius: 8px !important; } .header { text-align: center; color: #2d3748; } .info-section { background-color: #edf2f7; padding: 16px; border-radius: 8px; margin-top: 16px; } """ with gr.Blocks(title="Bambara Speech Recognition", css=custom_css) as demo: # Header gr.Markdown( """

🎤 Bambara Speech Recognition

Powered by MALIBA-AI | Convert Bambara speech to text effortlessly

""" ) # Main interaction section with gr.Row(): with gr.Column(scale=1): gr.Markdown("### đŸŽ™ī¸ Record or Upload Audio") audio_input = gr.Audio( label="Record or Upload Audio", type="filepath", sources=["microphone", "upload"], show_label=False ) audio_preview = gr.Audio( label="Preview Your Audio", interactive=False, visible=False ) with gr.Row(): transcribe_btn = gr.Button( "🔄 Transcribe Audio", variant="primary", size="lg" ) clear_btn = gr.Button( "đŸ—‘ī¸ Clear", variant="secondary", size="lg" ) with gr.Column(scale=1): gr.Markdown("### 📝 Transcription Output") output_text = gr.Textbox( label="Transcribed Text (Bambara)", lines=6, placeholder="Your transcribed Bambara text will appear here...", interactive=False, show_copy_button=True ) status_message = gr.Markdown( value="", visible=False ) # Example audio section if example_files: gr.Markdown("## đŸŽĩ Try Example Audio Files") with gr.Group(elem_classes="example-container"): gr.Markdown( """ Listen to these sample Bambara audio files and transcribe them with one click. """ ) for idx, file in enumerate(example_files): with gr.Row(): gr.Audio( value=file, label=f"Example {idx + 1}: {os.path.basename(file)}", interactive=False, show_label=True ) gr.Button( f"Transcribe Example {idx + 1}", variant="primary", size="sm" ).click( fn=transcribe_audio, inputs=gr.State(value=file), outputs=[output_text, status_message], show_progress=True, _js="() => {return {show_progress: true}}" ) gr.Markdown( """
## â„šī¸ How to Use 1. **Record**: Click the microphone to speak in Bambara. 2. **Upload**: Select an audio file (WAV, MP3, M4A, FLAC, OGG). 3. **Transcribe**: Click "Transcribe Audio" or try an example. 4. **View**: See the transcribed text in Bambara. ## 📊 Model Details - **Model**: [sudoping01/maliba-asr-v1](https://huggingface.co/sudoping01/maliba-asr-v1) - **Language**: Bambara (bm) - **Sample Rate**: 16kHz (auto-resampled) - **Best for**: Clear speech with minimal background noise
""" ) def update_audio_preview(audio_file): return gr.update(value=audio_file, visible=True), "" def clear_inputs(): return None, "", gr.update(visible=False), "" def transcribe_with_status(audio_file): if not audio_file: return "", "**Error**: Please provide an audio file." result = transcribe_audio(audio_file) if "Error" in result: return result, f"**Error**: {result}" return result, "**Success**: Transcription completed!" audio_input.change( fn=update_audio_preview, inputs=audio_input, outputs=[audio_preview, status_message] ).then( fn=transcribe_with_status, inputs=audio_input, outputs=[output_text, status_message], show_progress=True ) transcribe_btn.click( fn=transcribe_with_status, inputs=audio_input, outputs=[output_text, status_message], show_progress=True ) clear_btn.click( fn=clear_inputs, outputs=[audio_input, output_text, audio_preview, status_message] ) return demo