import gradio as gr from transformers import pipeline #Model_1 = "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-classification-MESD" #Model_2 ="hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd" model_name2id = {"Model A": "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-classification-MESD", "Model B": "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd"} def classify_sentiment(audio, model_name): pipe = pipeline("audio-classification", model=model_name2id[model_name]) pred = pipe(audio) return {dic["label"]: dic["score"] for dic in pred} input_audio = [gr.inputs.Audio(source="microphone", type="filepath", label="Record/ Drop audio"), gr.inputs.Dropdown([model_name2id[model_name], model_name2id[model_name]], label="Model Name")] label = gr.outputs.Label(num_top_classes=5) ################### Gradio Web APP ################################ title = "Audio Sentiment Classifier" description = """

This application classifies the sentiment of the audio input provided by the user. #
#

#
#logo #
""" gr.Interface( fn = classify_sentiment, inputs = input_audio, outputs = label, examples=[["basta_neutral.wav", model_name2id[model_name]], ["detras_disgust.wav", model_name2id[model_name]], ["mortal_sadness.wav", model_name2id[model_name]], ["respiracion_happiness.wav", model_name2id[model_name]], ["robo_fear.wav", model_name2id[model_name]]], theme="grass").launch()