Spaces:
Runtime error
Runtime error
File size: 3,270 Bytes
edc4276 946af09 01e4bba c4d5407 946af09 6868cdb 946af09 401d1a7 946af09 ff3aa08 c4d5407 946af09 edc4276 946af09 6868cdb 946af09 a4492f7 946af09 ff3aa08 a4492f7 946af09 577a126 ff3aa08 946af09 c18e3ad 946af09 577a126 946af09 1f9b2ed 946af09 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import streamlit as st
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
import joblib
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler
from sklearn.metrics.pairwise import cosine_similarity
# Load the LSTM model for emotion prediction
emotion_model = load_model('lstm_model.h5')
# Load the KNN model
knn_model = joblib.load('knn_model.joblib')
# Load the tokenizer
tokenizer = joblib.load('tokenizer.pkl')
# Load the dataset
df = pd.read_csv('df1.csv')
# Load the scaler for KNN
scaler_knn = StandardScaler()
# Function for hybrid recommendation
def hybrid_recommendation(song_index):
# Get data for the query song
query_data = df.iloc[song_index]
# Process the lyrics for emotion prediction using LSTM
sequence = tokenizer.texts_to_sequences([query_data['lyrics']])
padded_sequence = pad_sequences(sequence, maxlen=50)
predicted_emotion = emotion_model.predict(padded_sequence).flatten()
# Preprocess for KNN
audio_features_knn = query_data[['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness',
'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo',
'duration_ms', 'time_signature']].values.reshape(1, -1)
mood_cats = query_data[['mood_cats']]
mood_cats_df = pd.DataFrame(mood_cats)
audio_features_scaled_knn = scaler_knn.fit_transform(audio_features_knn)
combined_features = pd.concat([mood_cats_df, pd.DataFrame(audio_features_scaled_knn, columns=audio_features_knn.columns)], axis=1)
# Predict using the KNN model
knn_recommendations = knn_model.kneighbors(combined_features, n_neighbors=5, return_distance=False)[0]
# Mapping emotion predictions to encoded categories
emotion_mapping = {0: 'happy', 1: 'sad', 2: 'calm', 3: 'anger'}
encoded_emotion = np.argmax(predicted_emotion)
emotion_category = emotion_mapping[encoded_emotion]
# Compute cosine similarity for content-based recommendation
features_for_similarity = df[['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness',
'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo',
'duration_ms', 'time_signature']].values
cosine_similarities = cosine_similarity([emotion_category], features_for_similarity).flatten()
# Combine recommendations from both models
combined_indices = np.argsort(-np.concatenate([knn_recommendations, cosine_similarities]))
hybrid_recs_sorted = combined_indices[:5] # Select top 5 recommendations
return hybrid_recs_sorted
# Set up the title of the app
st.title('Hybrid Recommender App')
# Get song index from user input
song_index_to_recommend = st.number_input('Enter song index:', min_value=0, max_value=len(df)-1, value=0)
# Get hybrid recommendations
hybrid_recs = hybrid_recommendation(song_index_to_recommend)
# Display the recommendations
st.write("Hybrid Recommendations:")
for index in hybrid_recs:
st.write(f"Song Index: {index}, Title: {df.iloc[index]['title']}, Artist: {df.iloc[index]['artist']}")
|