Ayushdavidkushwahaaaa commited on
Commit
3ad24cf
1 Parent(s): 3cc388e

Create app.py

Browse files

Yo it's a tested version.....

Files changed (1) hide show
  1. app.py +95 -0
app.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !pip install git+https://github.com/openai/whisper.git
2
+ !pip install transformers
3
+ # Installs the openai-whisper package from its GitHub repository.
4
+
5
+ import gradio as gr
6
+ import torch
7
+ import whisper # Now you should be able to import the whisper module
8
+ import warnings
9
+ import os
10
+ import librosa
11
+ import numpy as np
12
+ from transformers import pipeline
13
+
14
+ warnings.filterwarnings('ignore')
15
+
16
+ MODEL_NAME = "openai/whisper-small"
17
+ BATCH_SIZE = 8
18
+
19
+ device = 0 if torch.cuda.is_available() else "cpu"
20
+
21
+ # Whisper for transcription
22
+ pipe = pipeline(
23
+ task="automatic-speech-recognition",
24
+ model=MODEL_NAME,
25
+ chunk_length_s=30,
26
+ device=device
27
+ )
28
+
29
+ # Emotion classifier for text-based classification
30
+ emotion_classifier = pipeline("text-classification", model='MilaNLProc/xlm-emo-t', return_all_scores=True)
31
+
32
+ # Function to extract prosodic features using librosa
33
+ def extract_audio_features(audio_file):
34
+ y, sr = librosa.load(audio_file)
35
+
36
+ # Pitch (Fundamental Frequency)
37
+ pitches, magnitudes = librosa.core.piptrack(y=y, sr=sr)
38
+ pitch = np.mean([pitches[i][magnitudes[i] > 0] for i in range(len(pitches)) if len(pitches[i][magnitudes[i] > 0]) > 0])
39
+
40
+ # Intensity (RMS)
41
+ rms = np.mean(librosa.feature.rms(y=y))
42
+
43
+ # Loudness (Using the perceptual C-weighting of the signal)
44
+ S = np.abs(librosa.stft(y))**2
45
+ loudness = np.mean(librosa.perceptual_weighting(S, freqs=librosa.fft_frequencies(sr=sr)))
46
+
47
+ return {
48
+ "pitch": pitch,
49
+ "rms": rms,
50
+ "loudness": loudness
51
+ }
52
+
53
+ # Function to transcribe and classify emotions (dual-pipeline)
54
+ def translate_and_classify(audio):
55
+ # Step 1: Transcribe audio to text using Whisper
56
+ text_result = pipe(audio, batch_size=BATCH_SIZE)["text"]
57
+
58
+ # Step 2: Extract prosodic features from the audio using librosa
59
+ prosodic_features = extract_audio_features(audio)
60
+
61
+ # Step 3: Use the emotion classifier on the transcribed text
62
+ emotion = emotion_classifier(text_result)
63
+ detected_emotion = {}
64
+ for emotion_item in emotion[0]:
65
+ detected_emotion[emotion_item["label"]] = emotion_item["score"]
66
+
67
+ # Combine prosodic features and text-based emotion detection
68
+ combined_result = {
69
+ "transcription": text_result,
70
+ "text_based_emotion": detected_emotion,
71
+ "prosody": prosodic_features
72
+ }
73
+
74
+ return combined_result["transcription"], combined_result["text_based_emotion"], combined_result["prosody"]
75
+
76
+ # Gradio UI
77
+ with gr.Blocks() as demo:
78
+ gr.Markdown(
79
+ """# Emotion Detection from Speech
80
+
81
+ ##### Detection of anger, sadness, joy, fear in speech using OpenAI Whisper, XLM-RoBERTa, and prosodic features (pitch, loudness, intensity)
82
+ """)
83
+
84
+ with gr.Column():
85
+ with gr.Tab("Record Audio"):
86
+ audio_input_r = gr.Audio(label='Record Audio Input', sources=["microphone"], type="filepath")
87
+ transcribe_audio_r = gr.Button('Transcribe')
88
+
89
+ with gr.Tab("Upload Audio as File"):
90
+ audio_input_u = gr.Audio(label='Upload Audio', sources=["upload"], type="filepath")
91
+ transcribe_audio_u = gr.Button('Transcribe')
92
+
93
+ with gr.Row():
94
+ transcript_output = gr.Textbox(label="Transcription", lines=3)
95
+ emotion_output = gr.Label(label="Detected Emotion from Text")