File size: 786 Bytes
57a760c
d864aed
 
57a760c
 
 
 
 
 
 
 
 
d864aed
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
from transformers import pipeline,AutoConfig,AutoModelForAudioClassification

model_id = "Ahmed107/whisper-tiny-finetuned-eos"

# define mappings as dictionaries
id2label = {"0": "NOT_EOS", "1": "EOS"}
label2id = {"NOT_EOS": "0", "EOS": "1"}

# define config
config = AutoConfig.from_pretrained(model_id, label2id=label2id, id2label=id2label)
model = AutoModelForAudioClassification.from_pretrained(model_id, config = config)
pipe = pipeline("audio-classification", model=model)

def classify_audio(filepath):
    preds = pipe(filepath)
    outputs = {}
    for p in preds:
        outputs[p["label"]] = p["score"]
    return outputs

import gradio as gr

demo = gr.Interface(
    fn=classify_audio, inputs=gr.Audio(type="filepath"), outputs=gr.outputs.Label()
)
demo.launch(debug=True)