File size: 3,150 Bytes
b7e4e4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3533ee2
b7e4e4f
 
 
 
 
 
 
3533ee2
 
b7e4e4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import gradio as gr
import torch
import torch.nn as nn
from joblib import load

# Define the same neural network model
class ImprovedSongRecommender(nn.Module):
    def __init__(self, input_size, num_titles):
        super(ImprovedSongRecommender, self).__init__()
        self.fc1 = nn.Linear(input_size, 128)
        self.bn1 = nn.BatchNorm1d(128)
        self.fc2 = nn.Linear(128, 256)
        self.bn2 = nn.BatchNorm1d(256)
        self.fc3 = nn.Linear(256, 128)
        self.bn3 = nn.BatchNorm1d(128)
        self.output = nn.Linear(128, num_titles)
        self.dropout = nn.Dropout(0.5)

    def forward(self, x):
        x = torch.relu(self.bn1(self.fc1(x)))
        x = self.dropout(x)
        x = torch.relu(self.bn2(self.fc2(x)))
        x = self.dropout(x)
        x = torch.relu(self.bn3(self.fc3(x)))
        x = self.dropout(x)
        x = self.output(x)
        return x

# Load the trained model
model_path = "models/improved_model.pth"
num_unique_titles = 4855  

model = ImprovedSongRecommender(input_size=2, num_titles=num_unique_titles)  
model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
model.eval()

# Load the label encoders and scaler
label_encoders_path = "data/new_label_encoders.joblib"
scaler_path = "data/new_scaler.joblib"

label_encoders = load(label_encoders_path)
scaler = load(scaler_path)

# Create a mapping from encoded indices to actual song titles
index_to_song_title = {index: title for index, title in enumerate(label_encoders['title'].classes_)}

def encode_input(tags, artist_name):
    tags = tags.strip().replace('\n', '')
    artist_name = artist_name.strip().replace('\n', '')

    try:
        encoded_tags = label_encoders['tags'].transform([tags])[0]
    except ValueError:
        encoded_tags = label_encoders['tags'].transform(['unknown'])[0]

    if artist_name:
        try:
            encoded_artist = label_encoders['artist_name'].transform([artist_name])[0]
        except ValueError:
            encoded_artist = label_encoders['artist_name'].transform(['unknown'])[0]
    else:
        encoded_artist = label_encoders['artist_name'].transform(['unknown'])[0]

    return [encoded_tags, encoded_artist]

def recommend_songs(tags, artist_name):
    encoded_input = encode_input(tags, artist_name)
    input_tensor = torch.tensor([encoded_input]).float()
    
    with torch.no_grad():
        output = model(input_tensor)
    
    recommendations_indices = torch.topk(output, 5).indices.squeeze().tolist()
    recommendations = [index_to_song_title.get(idx, "Unknown song") for idx in recommendations_indices]
    
    formatted_output = [f"Recommendation {i+1}: {rec}" for i, rec in enumerate(recommendations)]
    return formatted_output

# Set up the Gradio interface
interface = gr.Interface(
    fn=recommend_songs,
    inputs=[gr.Textbox(lines=1, placeholder="Enter Tags (e.g., rock)"), gr.Textbox(lines=1, placeholder="Enter Artist Name (optional)")],
    outputs=gr.Textbox(label="Recommendations"),
    title="Music Recommendation System",
    description="Enter tags and (optionally) artist name to get music recommendations."
)

interface.launch()