File size: 1,956 Bytes
07a50af b04a244 07a50af b04a244 07a50af b04a244 07a50af b04a244 07a50af b04a244 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import gradio as gr
from transformers import pipeline
import numpy as np
# Load the model
model_id = "badrex/mms-300m-arabic-dialect-identifier" # Replace with your model ID
classifier = pipeline("audio-classification", model=model_id)
# Define dialect names for better display
dialect_mapping = {
"MSA": "Modern Standard Arabic",
"Egyptian": "Egyptian Arabic",
"Gulf": "Gulf Arabic",
"Levantine": "Levantine Arabic",
"Maghrebi": "Maghrebi Arabic"
}
def predict_dialect(audio):
# The audio input from Gradio is a tuple of (sample_rate, audio_array)
if isinstance(audio, tuple) and len(audio) == 2:
sr, audio_array = audio
else:
# Handle error case
return {"Error": 1.0}
# Process the audio input
if len(audio_array.shape) > 1:
audio_array = audio_array.mean(axis=1) # Convert stereo to mono
# Classify the dialect
predictions = classifier({"sampling_rate": sr, "raw": audio_array})
# Format results for display
results = {}
for pred in predictions:
dialect_name = dialect_mapping.get(pred['label'], pred['label'])
results[dialect_name] = float(pred['score'])
return results
# Create the Gradio interface
demo = gr.Interface(
fn=predict_dialect,
inputs=gr.Audio(), # Simplified audio input
outputs=gr.Label(num_top_classes=5, label="Predicted Dialect"),
title="Arabic Dialect Identifier",
description="""This demo identifies Arabic dialects from speech audio.
Upload an audio file or record your voice speaking Arabic to see which dialect it matches.
The model identifies: Modern Standard Arabic (MSA), Egyptian, Gulf, Levantine, and Maghrebi dialects.""",
examples=[
# Optional: Add example audio files here if you have them
# ["examples/msa_example.wav"],
# ["examples/egyptian_example.wav"],
],
allow_flagging="never"
)
# Launch the app
demo.launch() |