Spaces:
Runtime error
Runtime error
import gradio as gr | |
import whisper | |
from transformers import pipeline | |
import pandas as pd, numpy as np | |
import os | |
import torchaudio | |
import librosa | |
from scipy.io.wavfile import write | |
import shutil | |
import soundfile as sf | |
import noisereduce as nr | |
from scipy.stats import skew | |
from tqdm import tqdm | |
import requests | |
import pickle | |
import dash | |
import dash_bootstrap_components as dbc | |
from dash import html | |
sr = 8000 | |
url = "https://huggingface.co/spaces/aslanovaf/Sentiment_Analysis_Azerbaijani/resolve/main/sentiment_model_8000.pickle" | |
hf_token = os.environ.get("HF_TOKEN") | |
headers = {"Authorization": f"Bearer {hf_token}"} | |
response = requests.get(url, headers=headers) | |
if response.status_code == 200: | |
model = pickle.loads(response.content) | |
else: | |
st.markdown(f"Failed to download TTS from {url} (Status code: {response.status_code})") | |
def split_full_audio_15_sec(audio_file): | |
audio, orig_sr = sf.read(audio_file) | |
audio = librosa.resample(y=audio, orig_sr=orig_sr, target_sr=sr) | |
chunk_length = 15 * sr | |
total_length = len(audio) | |
start_index = 0 | |
end_index = min(chunk_length, total_length) | |
f = 0 | |
chunks = [] | |
while start_index < total_length: | |
chunk = audio[start_index:end_index] | |
chunk_name = f"example_{f}.wav" | |
chunk_duration = len(chunk)/sr | |
if chunk_duration<3: | |
break | |
chunks.append(chunk) | |
start_index = end_index | |
end_index = min(end_index + chunk_length, total_length) | |
f+=1 | |
return chunks | |
def get_mfcc(name): | |
resampled_audio = name | |
try: | |
reduced_noise = nr.reduce_noise(resampled_audio, sr=sr) | |
ft1 = librosa.feature.mfcc(y=reduced_noise, sr = sr, n_mfcc=16) | |
ft2 = librosa.feature.zero_crossing_rate(reduced_noise)[0] | |
ft3 = librosa.feature.spectral_rolloff(y=reduced_noise)[0] | |
ft4 = librosa.feature.spectral_centroid(y=reduced_noise)[0] | |
ft1_trunc = np.hstack((np.mean(ft1, axis=1), np.std(ft1, axis=1), skew(ft1, axis = 1), np.max(ft1, axis = 1), np.min(ft1, axis = 1))) | |
ft2_trunc = np.hstack((np.mean(ft2), np.std(ft2), skew(ft2), np.max(ft2), np.min(ft2))) | |
ft3_trunc = np.hstack((np.mean(ft3), np.std(ft3), skew(ft3), np.max(ft3), np.min(ft3))) | |
ft4_trunc = np.hstack((np.mean(ft4), np.std(ft4), skew(ft4), np.max(ft4), np.min(ft4))) | |
return pd.Series(np.hstack((ft1_trunc, ft2_trunc, ft3_trunc, ft4_trunc))) | |
except: | |
print('bad file') | |
return pd.Series([0]*95) | |
def analyze_sentiment(audio): | |
chunks = split_full_audio_15_sec(audio) | |
chunked_df = pd.DataFrame(data={'Chunk_order': [f'Chunk_{i+1}' for i in range(len(chunks))], 'Data': chunks}) | |
df_features = chunked_df['Data'].apply(get_mfcc) | |
df = pd.concat([chunked_df, df_features], axis=1) | |
df = df.drop(columns=['Data']) | |
df.columns = ['Chunk_order']+[f'Feature_{i+1}' for i in range(95)] | |
df['Prediction'] = model.predict(df.drop(columns=['Chunk_order'])) | |
df['Prediction'] = df['Prediction'].map({ | |
'pozitive_normal':'Normal', | |
'scope':'Silence', | |
'neqativ':'Negative' | |
}) | |
clean_df = df[['Chunk_order', 'Prediction']] | |
predictions = df['Prediction'].tolist() | |
final_prediction = 'Negative' if 'Negative' in predictions else 'Normal' if 'Normal' in predictions else 'Silence' | |
final_prediction_2x = 'Negative' if predictions.count('Negative')>1 else 'Normal' if 'Normal' in predictions else 'Silence' | |
color_map = { | |
'Normal':'success', | |
'Silence': 'warning', | |
'Negative': 'danger' | |
} | |
return (', '.join(predictions), final_prediction) | |
title = """<h1 align="center">π€ Azerbaijani Audio Speech Sentiment Analysis π¬</h1>""" | |
image_path = "thmbnail.jpg" | |
description = """ | |
π» This demo showcases a general-purpose sentiment analysis process. It is trained on a collection of audio calls from banking/fintech industries based on audio features. The main analysis predicts one of the categories (Normal/Negative/Silence) for each 15-second bucket in the audio. The final category for the whole audio is also estimated. | |
<br> | |
βοΈ Components of the tool:<br> | |
<br> | |
- Sentiment analysis directly of the audios.<br> | |
<br> | |
β Use the microphone for real-time audio recording.<br> | |
β Or upload an audio file.<br> | |
<br> | |
β‘οΈ The model will extract audio features and perform sentiment analysis on the audio.<br> | |
""" | |
custom_css = """ | |
#banner-image { | |
display: block; | |
margin-left: auto; | |
margin-right: auto; | |
} | |
#chat-message { | |
font-size: 14px; | |
min-height: 300px; | |
} | |
""" | |
block = gr.Blocks(css=custom_css) | |
with block: | |
gr.HTML(title) | |
with gr.Row(): | |
with gr.Column(): | |
gr.HTML(description) | |
with gr.Column(): | |
gr.Image(image_path, elem_id="banner-image", show_label=False) | |
gr.Interface( | |
fn=analyze_sentiment, | |
inputs=[ | |
gr.Audio(sources=["upload", "microphone"], type="filepath", label="Input Audio"), | |
], | |
outputs=[gr.Textbox(label="Sentiment Analysis Results of 15-second buckets"),gr.Textbox(label="Final Prediction")], | |
# layout="vertical", | |
# theme="huggingface", | |
examples=[ | |
["./Recording_1.wav", "analyze_sentiment"], | |
["./Recording_2.wav", "analyze_sentiment"], | |
], | |
cache_examples=False, | |
allow_flagging="never", | |
) | |
# gr.TabbedInterface([mic, file], ["Audio from Microphone", "Audio from File"]) | |
block.launch() | |