final
Browse files- app.py +103 -0
- books.csv +0 -0
- requirements.txt +6 -0
app.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
+
import seaborn as sns
|
6 |
+
from sklearn.cluster import KMeans
|
7 |
+
from sklearn.linear_model import LogisticRegression
|
8 |
+
from sklearn.model_selection import train_test_split
|
9 |
+
from sklearn.preprocessing import StandardScaler, LabelEncoder
|
10 |
+
from sklearn.metrics import accuracy_score, classification_report
|
11 |
+
|
12 |
+
# Streamlit App Title
|
13 |
+
st.title("Book Recommendation System")
|
14 |
+
|
15 |
+
# Load dataset
|
16 |
+
file_path = "books.csv"
|
17 |
+
df = pd.read_csv(file_path, on_bad_lines="skip", engine="python")
|
18 |
+
|
19 |
+
# Select only existing relevant columns
|
20 |
+
expected_columns = ['bookID', 'title', 'authors', 'average_rating', 'isbn', 'isbn13', 'language_code', 'num_pages', 'ratings_count', 'text_reviews_count', 'publication_date', 'publisher']
|
21 |
+
available_columns = [col for col in expected_columns if col in df.columns]
|
22 |
+
df = df[available_columns]
|
23 |
+
df = df.dropna()
|
24 |
+
|
25 |
+
# Ensure numeric columns are properly converted
|
26 |
+
numeric_columns = ['average_rating', 'ratings_count', 'text_reviews_count', 'num_pages']
|
27 |
+
for col in numeric_columns:
|
28 |
+
if col in df.columns:
|
29 |
+
df[col] = pd.to_numeric(df[col], errors='coerce')
|
30 |
+
df = df.dropna()
|
31 |
+
|
32 |
+
# Handle categorical columns
|
33 |
+
label_encoders = {}
|
34 |
+
categorical_columns = ['title', 'authors', 'publisher']
|
35 |
+
for col in categorical_columns:
|
36 |
+
if col in df.columns:
|
37 |
+
df[col] = df[col].astype(str) # Ensure all values are strings
|
38 |
+
|
39 |
+
# Create tabs
|
40 |
+
tab1, tab2, tab3 = st.tabs(["Dataset Overview", "Visualization Matrix", "Book Prediction Based on Input"])
|
41 |
+
|
42 |
+
with tab1:
|
43 |
+
st.write("### Data Preview")
|
44 |
+
st.write(df.head())
|
45 |
+
st.write("### Summary Statistics")
|
46 |
+
st.write(df.describe())
|
47 |
+
|
48 |
+
with tab2:
|
49 |
+
st.write("### Clustering Visualization using K-Means")
|
50 |
+
features = st.multiselect("Select Features for Clustering", df.columns)
|
51 |
+
k = st.slider("Select Number of Clusters (K)", min_value=2, max_value=10, value=3)
|
52 |
+
if st.button("Run K-Means Clustering"):
|
53 |
+
if len(features) == 2:
|
54 |
+
X = df[features]
|
55 |
+
scaler = StandardScaler()
|
56 |
+
X_scaled = scaler.fit_transform(X)
|
57 |
+
|
58 |
+
kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
|
59 |
+
df['Cluster'] = kmeans.fit_predict(X_scaled)
|
60 |
+
|
61 |
+
plt.figure(figsize=(8, 6))
|
62 |
+
sns.scatterplot(x=df[features[0]], y=df[features[1]], hue=df['Cluster'], palette='viridis')
|
63 |
+
plt.title("Book Clustering Visualization")
|
64 |
+
st.pyplot(plt)
|
65 |
+
else:
|
66 |
+
st.write("Please select exactly two features for visualization.")
|
67 |
+
|
68 |
+
with tab3:
|
69 |
+
st.write("### Predict Books Based on User Input")
|
70 |
+
|
71 |
+
avg_rating = st.number_input("Enter desired Average Rating", min_value=float(df['average_rating'].min()), max_value=float(df['average_rating'].max()), value=float(df['average_rating'].median()))
|
72 |
+
ratings_count = st.number_input("Enter desired Ratings Count", min_value=float(df['ratings_count'].min()), max_value=float(df['ratings_count'].max()), value=float(df['ratings_count'].median()))
|
73 |
+
text_reviews_count = st.number_input("Enter desired Text Reviews Count", min_value=float(df['text_reviews_count'].min()), max_value=float(df['text_reviews_count'].max()), value=float(df['text_reviews_count'].median()))
|
74 |
+
|
75 |
+
if st.button("Find Matching Books"):
|
76 |
+
filtered_books = df.copy()
|
77 |
+
|
78 |
+
lower_bound_avg = avg_rating * 0.8
|
79 |
+
upper_bound_avg = avg_rating * 1.2
|
80 |
+
filtered_books = filtered_books[(filtered_books['average_rating'] >= lower_bound_avg) & (filtered_books['average_rating'] <= upper_bound_avg)]
|
81 |
+
|
82 |
+
lower_bound_ratings = ratings_count * 0.8
|
83 |
+
upper_bound_ratings = ratings_count * 1.2
|
84 |
+
filtered_books = filtered_books[(filtered_books['ratings_count'] >= lower_bound_ratings) & (filtered_books['ratings_count'] <= upper_bound_ratings)]
|
85 |
+
|
86 |
+
lower_bound_reviews = text_reviews_count * 0.8
|
87 |
+
upper_bound_reviews = text_reviews_count * 1.2
|
88 |
+
filtered_books = filtered_books[(filtered_books['text_reviews_count'] >= lower_bound_reviews) & (filtered_books['text_reviews_count'] <= upper_bound_reviews)]
|
89 |
+
|
90 |
+
output_columns = ['title', 'authors'] + [col for col in ['bookID', 'average_rating', 'isbn', 'isbn13', 'language_code', 'num_pages', 'ratings_count', 'text_reviews_count', 'publication_date', 'publisher'] if col in df.columns]
|
91 |
+
|
92 |
+
if not filtered_books.empty:
|
93 |
+
st.write("### Books Matching Your Preferences")
|
94 |
+
st.write(filtered_books[output_columns].head(10))
|
95 |
+
else:
|
96 |
+
st.write("No exact matches found. Showing closest books instead.")
|
97 |
+
df['distance'] = (
|
98 |
+
abs(df['average_rating'] - avg_rating) +
|
99 |
+
abs(df['ratings_count'] - ratings_count) +
|
100 |
+
abs(df['text_reviews_count'] - text_reviews_count)
|
101 |
+
)
|
102 |
+
sorted_books = df.nsmallest(10, 'distance')
|
103 |
+
st.write(sorted_books[output_columns].head(10))
|
books.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
pandas
|
3 |
+
numpy
|
4 |
+
matplotlib
|
5 |
+
seaborn
|
6 |
+
scikit-learn
|