Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import matplotlib.pyplot as plt
|
4 |
-
import librosa
|
5 |
|
6 |
HOME_DIR = ""
|
7 |
local_config_path = 'config.json'
|
@@ -109,7 +108,7 @@ saved_model = torch.load(model_path, map_location=torch.device('cpu'))
|
|
109 |
model = Wav2Vec2ForSpeechClassification(config=config)
|
110 |
|
111 |
# Load the state dictionary
|
112 |
-
model.load_state_dict(saved_model
|
113 |
|
114 |
print("Model initialized successfully.")
|
115 |
|
@@ -117,10 +116,10 @@ model.eval()
|
|
117 |
|
118 |
|
119 |
def recognize_emotion(audio):
|
|
|
120 |
# Load the audio file using librosa
|
121 |
-
|
122 |
sample_rate, audio_data = audio
|
123 |
-
print(audio_data)
|
124 |
|
125 |
# Ensure audio data is in floating-point format
|
126 |
if not np.issubdtype(audio_data.dtype, np.floating):
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import matplotlib.pyplot as plt
|
|
|
4 |
|
5 |
HOME_DIR = ""
|
6 |
local_config_path = 'config.json'
|
|
|
108 |
model = Wav2Vec2ForSpeechClassification(config=config)
|
109 |
|
110 |
# Load the state dictionary
|
111 |
+
model.load_state_dict(saved_model)
|
112 |
|
113 |
print("Model initialized successfully.")
|
114 |
|
|
|
116 |
|
117 |
|
118 |
def recognize_emotion(audio):
|
119 |
+
import librosa
|
120 |
# Load the audio file using librosa
|
121 |
+
|
122 |
sample_rate, audio_data = audio
|
|
|
123 |
|
124 |
# Ensure audio data is in floating-point format
|
125 |
if not np.issubdtype(audio_data.dtype, np.floating):
|