DrishtiSharma's picture
Update app.py
a92da65
raw
history blame
1.79 kB
import gradio as gr
from transformers import pipeline
#Model_1 = "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-classification-MESD"
#Model_2 ="hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd"
model_name2id = {"Model A": "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-classification-MESD", "Model B": "hackathon-pln-es/wav2vec2-base-finetuned-sentiment-mesd"}
def classify_sentiment(audio, model_name):
pipe = pipeline("audio-classification", model=model_name2id[model_name])
pred = pipe(audio)
return {dic["label"]: dic["score"] for dic in pred}
input_audio = [gr.inputs.Audio(source="microphone", type="filepath", label="Record/ Drop audio"), gr.inputs.Dropdown([model_name2id[model_name], model_name2id[model_name]], label="Model Name")]
label = gr.outputs.Label(num_top_classes=5)
################### Gradio Web APP ################################
title = "Audio Sentiment Classifier"
description = """
<p>
<center>
This application classifies the sentiment of the audio input provided by the user.
#</center>
#</p>
#<center>
#<img src="https://huggingface.co/spaces/hackathon-pln-es/Audio-Sentiment-Classifier/tree/main/sentiment.jpg" alt="logo" width="750"/>
#<img src="https://huggingface.co/spaces/hackathon-pln-es/Audio-Sentiment-Classifier/tree/main/sentiment.jpg" style="max-width: 100%; max-height: 10%; height: 250px; object-fit: fill">
</center>
"""
gr.Interface(
fn = classify_sentiment,
inputs = input_audio,
outputs = label,
examples=[["basta_neutral.wav", model_name2id[model_name]], ["detras_disgust.wav", model_name2id[model_name]], ["mortal_sadness.wav", model_name2id[model_name]], ["respiracion_happiness.wav", model_name2id[model_name]], ["robo_fear.wav", model_name2id[model_name]]],
theme="grass").launch()