|
from transformers import pipeline,AutoConfig,AutoModelForAudioClassification |
|
|
|
model_id = "Ahmed107/whisper-tiny-finetuned-eos" |
|
|
|
|
|
id2label = {"0": "NOT_EOS", "1": "EOS"} |
|
label2id = {"NOT_EOS": "0", "EOS": "1"} |
|
|
|
|
|
config = AutoConfig.from_pretrained(model_id, label2id=label2id, id2label=id2label) |
|
model = AutoModelForAudioClassification.from_pretrained(model_id, config = config) |
|
pipe = pipeline("audio-classification", model=model) |
|
|
|
def classify_audio(filepath): |
|
preds = pipe(filepath) |
|
outputs = {} |
|
for p in preds: |
|
outputs[p["label"]] = p["score"] |
|
return outputs |
|
|
|
import gradio as gr |
|
|
|
demo = gr.Interface( |
|
fn=classify_audio, inputs=gr.Audio(type="filepath"), outputs=gr.outputs.Label() |
|
) |
|
demo.launch(debug=True) |