Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,6 @@ import joblib
|
|
6 |
import pandas as pd
|
7 |
from sklearn.neighbors import NearestNeighbors
|
8 |
from sklearn.preprocessing import StandardScaler
|
9 |
-
from sklearn.metrics.pairwise import cosine_similarity
|
10 |
|
11 |
# Load the LSTM model for emotion prediction
|
12 |
emotion_model = load_model('lstm_model.h5')
|
@@ -20,7 +19,7 @@ tokenizer = joblib.load('tokenizer.pkl')
|
|
20 |
# Load the dataset
|
21 |
df = pd.read_csv('df1.csv')
|
22 |
|
23 |
-
# Preprocess for
|
24 |
audio_feature_columns = ['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness',
|
25 |
'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo',
|
26 |
'duration_ms', 'time_signature']
|
@@ -29,28 +28,12 @@ audio_features = df[audio_feature_columns]
|
|
29 |
mood_cats = df[['mood_cats']]
|
30 |
mood_cats_df = pd.DataFrame(mood_cats)
|
31 |
|
32 |
-
# Normalize audio features for
|
33 |
-
scaler_cb = StandardScaler()
|
34 |
-
audio_features_scaled_cb = scaler_cb.fit_transform(audio_features)
|
35 |
-
audio_features_df_cb = pd.DataFrame(audio_features_scaled_cb, columns=audio_feature_columns)
|
36 |
-
combined_features_cb = pd.concat([mood_cats, audio_features_df_cb], axis=1)
|
37 |
-
|
38 |
-
# Preprocessing for KNN
|
39 |
scaler_knn = StandardScaler()
|
40 |
audio_features_scaled_knn = scaler_knn.fit_transform(audio_features)
|
41 |
audio_features_df_knn = pd.DataFrame(audio_features_scaled_knn, columns=audio_feature_columns)
|
42 |
combined_features_knn = pd.concat([mood_cats_df, audio_features_df_knn], axis=1)
|
43 |
|
44 |
-
# Function for content-based recommendation
|
45 |
-
def recommend_cont(song_index, num_recommendations=5):
|
46 |
-
song_similarity = similarity_matrix[song_index]
|
47 |
-
# Get indices and similarity scores of top similar songs
|
48 |
-
similar_songs = sorted(list(enumerate(song_similarity)), key=lambda x: x[1], reverse=True)[1:num_recommendations+1]
|
49 |
-
recommended_song_indices = [idx for idx, similarity in similar_songs]
|
50 |
-
recommended_songs = df.iloc[recommended_song_indices].copy()
|
51 |
-
recommended_songs['score'] = [similarity for idx, similarity in similar_songs]
|
52 |
-
return recommended_songs
|
53 |
-
|
54 |
# Function for KNN-based recommendation
|
55 |
def recommend_knn(query_index, n_recommendations=5):
|
56 |
distances, indices = knn_model.kneighbors(combined_features_knn.iloc[query_index].values.reshape(1, -1), n_neighbors=n_recommendations)
|
@@ -59,22 +42,8 @@ def recommend_knn(query_index, n_recommendations=5):
|
|
59 |
recommended_songs['score'] = 1 / (1 + distances.flatten()) # Inverse of distance
|
60 |
return recommended_songs
|
61 |
|
62 |
-
# Function for hybrid recommendation
|
63 |
-
def hybrid_recommendation(song_index, top_n=10):
|
64 |
-
# Get recommendations from both models
|
65 |
-
content_based_recs = recommend_cont(song_index, top_n)
|
66 |
-
knn_based_recs = recommend_knn(song_index, top_n)
|
67 |
-
|
68 |
-
# Combine recommendations
|
69 |
-
combined_recs = pd.concat([content_based_recs, knn_based_recs])
|
70 |
-
|
71 |
-
# Group by song index (or identifier) and average scores
|
72 |
-
hybrid_recs = combined_recs.groupby(combined_recs.index).mean().sort_values(by='score', ascending=False).head(top_n)
|
73 |
-
|
74 |
-
return hybrid_recs
|
75 |
-
|
76 |
# Set up the title of the app
|
77 |
-
st.title('
|
78 |
|
79 |
# Get song index from user input
|
80 |
song_index_to_recommend = st.number_input('Enter song index:', min_value=0, max_value=len(df)-1, value=0)
|
@@ -90,11 +59,11 @@ emotion = emotion_model.predict(padded_sequence).flatten()
|
|
90 |
# Combine emotion and audio features for recommendation
|
91 |
combined_features = np.concatenate([emotion, audio_features_scaled_knn[song_index_to_recommend]])
|
92 |
|
93 |
-
# Get
|
94 |
-
|
95 |
|
96 |
# Display the predicted emotion and recommendations
|
97 |
st.write(f"Predicted Emotion: {emotion}")
|
98 |
-
st.write("
|
99 |
-
for index in
|
100 |
-
st.write(f"Song Index: {index}, Title: {df.iloc[index]['title']}, Artist: {df.iloc[index]['artist']}, Score: {
|
|
|
6 |
import pandas as pd
|
7 |
from sklearn.neighbors import NearestNeighbors
|
8 |
from sklearn.preprocessing import StandardScaler
|
|
|
9 |
|
10 |
# Load the LSTM model for emotion prediction
|
11 |
emotion_model = load_model('lstm_model.h5')
|
|
|
19 |
# Load the dataset
|
20 |
df = pd.read_csv('df1.csv')
|
21 |
|
22 |
+
# Preprocess for KNN
|
23 |
audio_feature_columns = ['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness',
|
24 |
'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo',
|
25 |
'duration_ms', 'time_signature']
|
|
|
28 |
mood_cats = df[['mood_cats']]
|
29 |
mood_cats_df = pd.DataFrame(mood_cats)
|
30 |
|
31 |
+
# Normalize audio features for KNN
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
scaler_knn = StandardScaler()
|
33 |
audio_features_scaled_knn = scaler_knn.fit_transform(audio_features)
|
34 |
audio_features_df_knn = pd.DataFrame(audio_features_scaled_knn, columns=audio_feature_columns)
|
35 |
combined_features_knn = pd.concat([mood_cats_df, audio_features_df_knn], axis=1)
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
# Function for KNN-based recommendation
|
38 |
def recommend_knn(query_index, n_recommendations=5):
|
39 |
distances, indices = knn_model.kneighbors(combined_features_knn.iloc[query_index].values.reshape(1, -1), n_neighbors=n_recommendations)
|
|
|
42 |
recommended_songs['score'] = 1 / (1 + distances.flatten()) # Inverse of distance
|
43 |
return recommended_songs
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
# Set up the title of the app
|
46 |
+
st.title('KNN Recommender App')
|
47 |
|
48 |
# Get song index from user input
|
49 |
song_index_to_recommend = st.number_input('Enter song index:', min_value=0, max_value=len(df)-1, value=0)
|
|
|
59 |
# Combine emotion and audio features for recommendation
|
60 |
combined_features = np.concatenate([emotion, audio_features_scaled_knn[song_index_to_recommend]])
|
61 |
|
62 |
+
# Get KNN-based recommendations
|
63 |
+
knn_recs = recommend_knn(song_index_to_recommend)
|
64 |
|
65 |
# Display the predicted emotion and recommendations
|
66 |
st.write(f"Predicted Emotion: {emotion}")
|
67 |
+
st.write("KNN Recommendations:")
|
68 |
+
for index in knn_recs.index:
|
69 |
+
st.write(f"Song Index: {index}, Title: {df.iloc[index]['title']}, Artist: {df.iloc[index]['artist']}, Score: {knn_recs.loc[index, 'score']}")
|