NimaKL commited on
Commit
8aaf909
·
verified ·
1 Parent(s): d8d0759

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -49
app.py CHANGED
@@ -1,13 +1,30 @@
1
- import gradio as gr
 
2
  import torch
3
  import pandas as pd
4
  import numpy as np
 
5
  from torch_geometric.data import Data
6
  from torch_geometric.nn import GATConv
7
- from sentence_transformers import SentenceTransformer
8
  from sklearn.metrics.pairwise import cosine_similarity
9
 
10
- # Define the GATConv model architecture
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  class ModeratelySimplifiedGATConvModel(torch.nn.Module):
12
  def __init__(self, in_channels, hidden_channels, out_channels):
13
  super().__init__()
@@ -22,38 +39,19 @@ class ModeratelySimplifiedGATConvModel(torch.nn.Module):
22
  x = self.conv2(x, edge_index, edge_attr)
23
  return x
24
 
25
- # Load the dataset and the GATConv model
26
- data = torch.load("graph_data.pt", map_location=torch.device("cpu"))
27
-
28
- # Correct the state dictionary's key names
29
- original_state_dict = torch.load("graph_model.pth", map_location=torch.device("cpu"))
30
- corrected_state_dict = {}
31
- for key, value in original_state_dict.items():
32
- if "lin.weight" in key:
33
- corrected_state_dict[key.replace("lin.weight", "lin_src.weight")] = value
34
- corrected_state_dict[key.replace("lin.weight", "lin_dst.weight")] = value
35
- else:
36
- corrected_state_dict[key] = value
37
-
38
- # Initialize the GATConv model with the corrected state dictionary
39
  gatconv_model = ModeratelySimplifiedGATConvModel(
40
  in_channels=data.x.shape[1], hidden_channels=32, out_channels=768
41
  )
42
  gatconv_model.load_state_dict(corrected_state_dict)
43
 
44
- # Load the BERT-based sentence transformer model
45
  model_bert = SentenceTransformer("all-mpnet-base-v2")
46
 
47
- # Ensure the DataFrame is loaded properly
48
  df = pd.read_feather("EmbeddedCombined.feather")
49
 
50
- # Generate GNN-based embeddings
51
- with torch.no_grad():
52
- all_video_embeddings = gatconv_model(data.x, data.edge_index, data.edge_attr).cpu()
53
-
54
- # Function to find the most similar video and recommend the top 10 based on GNN embeddings
55
  def get_similar_and_recommend(input_text):
56
- # Find the most similar video based on input text
57
  embeddings_matrix = np.array(df["embeddings"].tolist())
58
  input_embedding = model_bert.encode([input_text])[0]
59
  similarities = cosine_similarity([input_embedding], embeddings_matrix)[0]
@@ -65,7 +63,7 @@ def get_similar_and_recommend(input_text):
65
  "similarity_score": similarities[most_similar_index],
66
  }
67
 
68
- # Recommend the top 10 videos based on GNN embeddings and dot product
69
  def recommend_next_10_videos(given_video_index, all_video_embeddings):
70
  dot_products = [
71
  torch.dot(all_video_embeddings[given_video_index].cpu(), all_video_embeddings[i].cpu())
@@ -78,29 +76,23 @@ def get_similar_and_recommend(input_text):
78
  return recommendations
79
 
80
  top_10_recommendations = recommend_next_10_videos(
81
- most_similar_index, all_video_embeddings
82
  )
83
 
84
- return (
85
- most_similar_video["title"],
86
- most_similar_video["description"],
87
- most_similar_video["similarity_score"],
88
- top_10_recommendations,
89
- )
90
-
91
- # Update the Gradio interface to fix the output type
92
- interface = gr.Interface(
93
- fn=get_similar_and_recommend,
94
- inputs=gr.components.Textbox(label="Enter Text to Find Most Similar Video"),
95
- outputs=[
96
- gr.components.Textbox(label="Video Title"),
97
- gr.components.Textbox(label="Video Description"),
98
- gr.components.Textbox(label="Similarity Score"),
99
- gr.components.Textbox(label="Top 10 Recommended Videos", lines=10), # Handle a list
100
- ],
101
- title="Video Recommendation System with GNN-based Recommendations",
102
- description="Enter text to find the most similar video and get the top 10 recommended videos based on dot product and GNN embeddings.",
103
- )
104
 
105
- # Launch the Gradio interface
106
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
  import torch
4
  import pandas as pd
5
  import numpy as np
6
+ from sentence_transformers import SentenceTransformer
7
  from torch_geometric.data import Data
8
  from torch_geometric.nn import GATConv
 
9
  from sklearn.metrics.pairwise import cosine_similarity
10
 
11
+ # FastAPI App
12
+ app = FastAPI()
13
+
14
+ # Data and Model Initialization
15
+ data = torch.load("graph_data.pt", map_location=torch.device("cpu"))
16
+
17
+ # Corrected state dictionary for GATConv model
18
+ original_state_dict = torch.load("graph_model.pth", map_location=torch.device("cpu"))
19
+ corrected_state_dict = {}
20
+ for key, value in original_state_dict.items():
21
+ if "lin.weight" in key:
22
+ corrected_state_dict[key.replace("lin.weight", "lin_src.weight")] = value
23
+ corrected_state_dict[key.replace("lin.weight", "lin_dst.weight")] = value
24
+ else:
25
+ corrected_state_dict[key] = value
26
+
27
+ # Define GATConv Model
28
  class ModeratelySimplifiedGATConvModel(torch.nn.Module):
29
  def __init__(self, in_channels, hidden_channels, out_channels):
30
  super().__init__()
 
39
  x = self.conv2(x, edge_index, edge_attr)
40
  return x
41
 
42
+ # Initialize GATConv model and BERT-based sentence transformer model
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  gatconv_model = ModeratelySimplifiedGATConvModel(
44
  in_channels=data.x.shape[1], hidden_channels=32, out_channels=768
45
  )
46
  gatconv_model.load_state_dict(corrected_state_dict)
47
 
 
48
  model_bert = SentenceTransformer("all-mpnet-base-v2")
49
 
50
+ # Ensure DataFrame is loaded properly
51
  df = pd.read_feather("EmbeddedCombined.feather")
52
 
53
+ # Function to get most similar video and recommend top 10 based on GNN embeddings
 
 
 
 
54
  def get_similar_and_recommend(input_text):
 
55
  embeddings_matrix = np.array(df["embeddings"].tolist())
56
  input_embedding = model_bert.encode([input_text])[0]
57
  similarities = cosine_similarity([input_embedding], embeddings_matrix)[0]
 
63
  "similarity_score": similarities[most_similar_index],
64
  }
65
 
66
+ # Function to recommend top 10 videos based on GNN embeddings
67
  def recommend_next_10_videos(given_video_index, all_video_embeddings):
68
  dot_products = [
69
  torch.dot(all_video_embeddings[given_video_index].cpu(), all_video_embeddings[i].cpu())
 
76
  return recommendations
77
 
78
  top_10_recommendations = recommend_next_10_videos(
79
+ most_similar_index, gatconv_model(data.x, data.edge_index, data.edge_attr).cpu()
80
  )
81
 
82
+ return {
83
+ "most_similar_video_title": most_similar_video["title"],
84
+ "top_10_recommendations": top_10_recommendations,
85
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
+ # Define the endpoint for FastAPI to get video title and recommendations
88
+ class UserInput(BaseModel):
89
+ text: str # The string input from the user
90
+
91
+ @app.post("/recommendations")
92
+ def recommend_videos(user_input: UserInput):
93
+ if not user_input.text:
94
+ raise HTTPException(status_code=400, detail="Input text cannot be empty.")
95
+
96
+ result = get_similar_and_recommend(user_input.text)
97
+
98
+ return result