Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,156 +1,82 @@
|
|
1 |
-
import
|
|
|
|
|
2 |
import soundfile as sf
|
3 |
import numpy as np
|
4 |
-
import os
|
5 |
-
import glob
|
6 |
-
import pickle
|
7 |
-
import sounddevice as sd
|
8 |
-
import time
|
9 |
import requests
|
10 |
import webbrowser
|
11 |
-
import
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
stft = np.abs(lb.stft(audio))
|
43 |
-
chroma = np.mean(lb.feature.chroma_stft(S=stft, sr=sample_rate).T, axis=0)
|
44 |
-
result = np.hstack((result, chroma))
|
45 |
-
if mel:
|
46 |
-
mel = np.mean(lb.feature.melspectrogram(audio, sr=sample_rate).T, axis=0)
|
47 |
-
result = np.hstack((result, mel))
|
48 |
-
return result
|
49 |
-
|
50 |
-
def get_emotion_from_file_name(file_name):
|
51 |
-
return emotion_labels[file_name.split("-")[2]] # Adjust based on your actual filename structure
|
52 |
-
|
53 |
-
def loading_audio_data():
|
54 |
-
x = [] # Input - features
|
55 |
-
y = [] # Output - labels emotions
|
56 |
-
|
57 |
-
# Go through all sound files
|
58 |
-
for file in glob.glob("data/Actor_*/*.wav"):
|
59 |
-
file_name = os.path.basename(file)
|
60 |
-
emotion = get_emotion_from_file_name(file_name)
|
61 |
-
|
62 |
-
# Use only focused emotions
|
63 |
-
if emotion in focused_emotion_labels:
|
64 |
-
try:
|
65 |
-
feature = audio_features(file, mfcc=True, chroma=True, mel=True)
|
66 |
-
x.append(feature)
|
67 |
-
y.append(emotion)
|
68 |
-
except Exception as e:
|
69 |
-
print(f"This file wasn't processed due to an error: {file} - {e}")
|
70 |
-
|
71 |
-
# Split the dataset into training and testing
|
72 |
-
return train_test_split(np.array(x), y, test_size=0.1, random_state=9)
|
73 |
-
|
74 |
-
def record_sound():
|
75 |
-
fs = 44100 # Sample rate
|
76 |
-
seconds = 3 # Duration of recording
|
77 |
-
|
78 |
-
print("Recording in 3")
|
79 |
-
time.sleep(1)
|
80 |
-
print("Recording in 2")
|
81 |
-
time.sleep(1)
|
82 |
-
print("Recording in 1")
|
83 |
-
time.sleep(1)
|
84 |
-
|
85 |
-
# Record and save
|
86 |
-
my_recording = sd.rec(int(seconds * fs), samplerate=fs, channels=1)
|
87 |
-
print("Recording: Started")
|
88 |
-
sd.wait()
|
89 |
-
print("Recording: Stopped")
|
90 |
-
write('output.wav', fs, my_recording)
|
91 |
-
|
92 |
-
return 'output.wav'
|
93 |
|
|
|
|
|
|
|
94 |
def get_playlist(mood):
|
95 |
url = "https://unsa-unofficial-spotify-api.p.rapidapi.com/search"
|
96 |
-
querystring = {"query": mood, "count":
|
97 |
-
headers = {
|
|
|
98 |
'x-rapidapi-host': "unsa-unofficial-spotify-api.p.rapidapi.com"
|
99 |
}
|
100 |
|
101 |
-
|
102 |
-
|
103 |
-
response.raise_for_status() # Raises error for bad responses
|
104 |
-
playlist_id = response.json()["Results"][random.randint(0, 9)]["id"]
|
105 |
-
return playlist_id
|
106 |
-
except requests.exceptions.RequestException as e:
|
107 |
-
print(f"Error fetching playlist data: {e}")
|
108 |
-
return None
|
109 |
-
|
110 |
-
def open_playlist_in_browser(playlist_id):
|
111 |
-
webbrowser.open('https://open.spotify.com/playlist/' + str(playlist_id))
|
112 |
-
|
113 |
-
def train_model():
|
114 |
-
X_train, X_test, y_train, y_test = loading_audio_data()
|
115 |
|
116 |
-
|
117 |
-
model.fit(X_train, y_train)
|
118 |
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
|
|
127 |
|
128 |
-
|
|
|
|
|
129 |
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
feature = audio_features(my_sound_file, mfcc=True, chroma=True, mel=True)
|
134 |
-
mood_prediction = model.predict([feature])[0]
|
135 |
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
return mood_prediction
|
140 |
|
141 |
-
|
142 |
-
|
143 |
-
if os.path.exists('emotion_model.pkl'):
|
144 |
-
with open('emotion_model.pkl', 'rb') as model_file:
|
145 |
-
model = pickle.load(model_file)
|
146 |
-
print("Loaded existing model.")
|
147 |
-
else:
|
148 |
-
model, accuracy = train_model()
|
149 |
-
if accuracy > 60: # You can adjust this threshold as needed
|
150 |
-
mood = recognize_your_mood(model)
|
151 |
-
playlist_id = get_playlist(mood)
|
152 |
-
if playlist_id:
|
153 |
-
open_playlist_in_browser(playlist_id)
|
154 |
|
155 |
if __name__ == "__main__":
|
156 |
main()
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import Wav2Vec2Tokenizer, Wav2Vec2ForCTC
|
3 |
+
import sounddevice as sd
|
4 |
import soundfile as sf
|
5 |
import numpy as np
|
|
|
|
|
|
|
|
|
|
|
6 |
import requests
|
7 |
import webbrowser
|
8 |
+
from sklearn.preprocessing import LabelEncoder
|
9 |
+
|
10 |
+
# Load pre-trained model and tokenizer
|
11 |
+
model_name = "facebook/wav2vec2-large-xlsr-53" # Change to the specific model you need for emotion recognition
|
12 |
+
tokenizer = Wav2Vec2Tokenizer.from_pretrained(model_name)
|
13 |
+
model = Wav2Vec2ForCTC.from_pretrained(model_name)
|
14 |
+
|
15 |
+
# Function to record audio
|
16 |
+
def record_audio(duration=5, fs=16000):
|
17 |
+
print("Recording...")
|
18 |
+
audio = sd.rec(int(duration * fs), samplerate=fs, channels=1, dtype='float32')
|
19 |
+
sd.wait() # Wait until recording is finished
|
20 |
+
print("Recording finished.")
|
21 |
+
return audio.flatten()
|
22 |
+
|
23 |
+
# Function to save audio file
|
24 |
+
def save_audio(filename, audio, fs=16000):
|
25 |
+
sf.write(filename, audio, fs)
|
26 |
+
|
27 |
+
# Function for emotion recognition
|
28 |
+
def recognize_emotion(audio):
|
29 |
+
# Convert audio array to input suitable for the model
|
30 |
+
input_values = tokenizer(audio, return_tensors='pt', padding='longest', sampling_rate=16000).input_values
|
31 |
+
|
32 |
+
# Store logits (raw predictions) and apply softmax to get probabilities
|
33 |
+
with torch.no_grad():
|
34 |
+
logits = model(input_values).logits
|
35 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
36 |
+
|
37 |
+
# Decode the predicted IDs to text
|
38 |
+
transcription = tokenizer.decode(predicted_ids[0])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
+
return transcription # Return the detected text
|
41 |
+
|
42 |
+
# Function to map emotion text to playlist (customizable)
|
43 |
def get_playlist(mood):
|
44 |
url = "https://unsa-unofficial-spotify-api.p.rapidapi.com/search"
|
45 |
+
querystring = {"query": mood, "count":"10", "type": "playlists"}
|
46 |
+
headers = {
|
47 |
+
'x-rapidapi-key': "your-api-key", # Replace with your actual API key
|
48 |
'x-rapidapi-host': "unsa-unofficial-spotify-api.p.rapidapi.com"
|
49 |
}
|
50 |
|
51 |
+
response = requests.get(url, headers=headers, params=querystring)
|
52 |
+
playlist_id = response.json()["Results"][0]["id"] # Choose the first playlist
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
+
return playlist_id
|
|
|
55 |
|
56 |
+
# Function to open playlist URL
|
57 |
+
def open_playlist(playlist_id):
|
58 |
+
webbrowser.open(f'https://open.spotify.com/playlist/{playlist_id}')
|
59 |
|
60 |
+
# Main function to run the recorder and emotion recognizer
|
61 |
+
def main():
|
62 |
+
try:
|
63 |
+
# Record audio
|
64 |
+
audio = record_audio()
|
65 |
|
66 |
+
# Save audio to file
|
67 |
+
filename = "output.wav"
|
68 |
+
save_audio(filename, audio)
|
69 |
|
70 |
+
# Recognize the mood/emotion from audio
|
71 |
+
emotion_text = recognize_emotion(audio)
|
72 |
+
print(f"Detected Emotion: {emotion_text}")
|
|
|
|
|
73 |
|
74 |
+
# Get playlist based on detected emotion
|
75 |
+
playlist_id = get_playlist(emotion_text)
|
76 |
+
open_playlist(playlist_id)
|
|
|
77 |
|
78 |
+
except Exception as e:
|
79 |
+
print(f"An error occurred: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
if __name__ == "__main__":
|
82 |
main()
|