badrex's picture
fix AttributeError with .shape
b04a244
raw
history blame
1.96 kB
import gradio as gr
from transformers import pipeline
import numpy as np
# Load the model
model_id = "badrex/mms-300m-arabic-dialect-identifier" # Replace with your model ID
classifier = pipeline("audio-classification", model=model_id)
# Define dialect names for better display
dialect_mapping = {
"MSA": "Modern Standard Arabic",
"Egyptian": "Egyptian Arabic",
"Gulf": "Gulf Arabic",
"Levantine": "Levantine Arabic",
"Maghrebi": "Maghrebi Arabic"
}
def predict_dialect(audio):
# The audio input from Gradio is a tuple of (sample_rate, audio_array)
if isinstance(audio, tuple) and len(audio) == 2:
sr, audio_array = audio
else:
# Handle error case
return {"Error": 1.0}
# Process the audio input
if len(audio_array.shape) > 1:
audio_array = audio_array.mean(axis=1) # Convert stereo to mono
# Classify the dialect
predictions = classifier({"sampling_rate": sr, "raw": audio_array})
# Format results for display
results = {}
for pred in predictions:
dialect_name = dialect_mapping.get(pred['label'], pred['label'])
results[dialect_name] = float(pred['score'])
return results
# Create the Gradio interface
demo = gr.Interface(
fn=predict_dialect,
inputs=gr.Audio(), # Simplified audio input
outputs=gr.Label(num_top_classes=5, label="Predicted Dialect"),
title="Arabic Dialect Identifier",
description="""This demo identifies Arabic dialects from speech audio.
Upload an audio file or record your voice speaking Arabic to see which dialect it matches.
The model identifies: Modern Standard Arabic (MSA), Egyptian, Gulf, Levantine, and Maghrebi dialects.""",
examples=[
# Optional: Add example audio files here if you have them
# ["examples/msa_example.wav"],
# ["examples/egyptian_example.wav"],
],
allow_flagging="never"
)
# Launch the app
demo.launch()