from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score from sklearn.ensemble import RandomForestRegressor import pandas as pd from tqdm.auto import tqdm import streamlit as st tqdm.pandas() def predict_popularity(features): predictions = [None] * 2 predictions[0], predictions[1] = rf_model.predict([features]), model.predict([features]) return predictions data = pd.read_csv('top50.csv', encoding='ISO-8859-1') print(data.head()) # Let's also describe the data to get a sense of the distributions print(data.describe()) # Selecting the features and the target variable X = data.drop(['Unnamed: 0', 'Track.Name', 'Artist.Name', 'Genre', 'Popularity'], axis=1) y = data['Popularity'] # Splitting the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Initializing the Linear Regression model model = LinearRegression() # Fitting the model model.fit(X_train, y_train) # Making predictions y_pred = model.predict(X_test) # Calculating the performance metrics mse = mean_squared_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) # Initialize the Random Forest Regressor rf_model = RandomForestRegressor(n_estimators=100, random_state=42) # Fitting the model rf_model.fit(X_train, y_train) # Making predictions rf_pred = rf_model.predict(X_test) # Calculating the performance metrics rf_mse = mean_squared_error(y_test, rf_pred) rf_r2 = r2_score(y_test, rf_pred) # Feature importances feature_importances = rf_model.feature_importances_ # Create a pandas series with feature importances importances = pd.Series(feature_importances, index=X.columns) # Sort the feature importances in descending order sorted_importances = importances.sort_values(ascending=False)