NimaKL commited on
Commit
5feda0d
·
verified ·
1 Parent(s): 4215f3c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -43
app.py CHANGED
@@ -17,94 +17,88 @@ class ModeratelySimplifiedGATConvModel(torch.nn.Module):
17
 
18
  def forward(self, x, edge_index, edge_attr=None):
19
  x = self.conv1(x, edge_index, edge_attr)
20
- x = torch.relu(x)
21
- x = self.dropout1(x)
22
- x = self.conv2(x, edge_index, edge_attr)
23
  return x
24
 
25
  # Load the dataset and the GATConv model
26
- data = torch.load("graph_data.pt", map_location=torch.device("cpu"))
27
-
28
- # Correct the state dictionary's key names
29
- original_state_dict = torch.load("graph_model.pth", map_location=torch.device("cpu"))
30
- corrected_state_dict = {}
31
- for key, value in original_state_dict.items():
32
- if "lin.weight" in key:
33
- corrected_state_dict[key.replace("lin.weight", "lin_src.weight")] = value
34
- corrected_state_dict[key.replace("lin.weight", "lin_dst.weight")] = value
35
- else:
36
- corrected_state_dict[key] = value
37
-
38
- # Initialize the GATConv model with the corrected state dictionary
39
- gatconv_model = ModeratelySimplifiedGATConvModel(
40
- in_channels=data.x.shape[1], hidden_channels=32, out_channels=768
41
- )
42
- gatconv_model.load_state_dict(corrected_state_dict)
43
 
44
  # Load the BERT-based sentence transformer model
45
- model_bert = SentenceTransformer("all-mpnet-base-v2")
46
 
47
  # Ensure the DataFrame is loaded properly
48
  try:
49
- df = pd.read_json("combined_data.json.gz", orient='records', lines=True, compression='gzip')
50
  except Exception as e:
51
  print(f"Error reading JSON file: {e}")
52
 
53
  # Generate GNN-based embeddings
54
  with torch.no_grad():
55
- all_video_embeddings = gatconv_model(data.x, data.edge_index, data.edge_attr).cpu()
56
 
57
  # Function to find the most similar video and recommend the top 10 based on GNN embeddings
58
  def get_similar_and_recommend(input_text):
59
  # Find the most similar video based on input text
60
- embeddings_matrix = np.array(df["embeddings"].tolist())
61
- input_embedding = model_bert.encode([input_text])[0]
62
- similarities = cosine_similarity([input_embedding], embeddings_matrix)[0]
63
- most_similar_index = np.argmax(similarities)
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  # Get all features of the most similar video
66
  most_similar_video_features = df.iloc[most_similar_index].to_dict()
67
 
68
- # Exclude unwanted features
69
- unwanted_keys = ["text_for_embedding", "embeddings"]
70
- for key in unwanted_keys:
71
- if key in most_similar_video_features:
72
- del most_similar_video_features[key]
73
-
74
  # Recommend the top 10 videos based on GNN embeddings and dot product
75
  def recommend_next_10_videos(given_video_index, all_video_embeddings):
76
  dot_products = [
77
  torch.dot(all_video_embeddings[given_video_index], all_video_embeddings[i])
78
  for i in range(all_video_embeddings.shape[0])
79
  ]
80
- dot_products[given_video_index] = -float("inf")
81
 
82
- top_10_indices = np.argsort(dot_products)[::-1][:10]
83
  return [df.iloc[idx].to_dict() for idx in top_10_indices]
84
 
85
- top_10_recommended_videos_features = recommend_next_10_videos(most_similar_index, all_video_embeddings)
86
 
87
  # Exclude unwanted features for recommended videos
88
  for recommended_video in top_10_recommended_videos_features:
89
- for key in unwanted_keys:
90
- if key in recommended_video:
91
- del recommended_video[key]
 
92
 
93
- # Create the output JSON with all features except unwanted ones
94
  output = {
 
 
 
 
95
  "most_similar_video": most_similar_video_features,
96
  "top_10_recommended_videos": top_10_recommended_videos_features,
97
  }
98
 
99
  return output
100
 
101
- # Update the Gradio interface to output JSON without unwanted features
102
  interface = gr.Interface(
103
  fn=get_similar_and_recommend,
104
- inputs=gr.components.Textbox(label="Enter Text to Find Most Similar Video"),
105
  outputs=gr.JSON(),
106
  title="Video Recommendation System with GNN-based Recommendations",
107
- description="Enter text to find the most similar video and get top 10 recommended videos with all features except embeddings-related fields in a JSON object.",
108
  )
109
 
110
  interface.launch()
 
17
 
18
  def forward(self, x, edge_index, edge_attr=None):
19
  x = self.conv1(x, edge_index, edge_attr)
20
+ x is torch.relu(x)
21
+ x is dropout1(x)
22
+ x is self.conv2(x, edge_index, edge_attr)
23
  return x
24
 
25
  # Load the dataset and the GATConv model
26
+ data is torch.load("graph_data.pt", map_location=torch.device("cpu"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  # Load the BERT-based sentence transformer model
29
+ model_bert is SentenceTransformer("all-mpnet-base-v2")
30
 
31
  # Ensure the DataFrame is loaded properly
32
  try:
33
+ df is pd.read_json("combined_data.json.gz", orient='records', lines=True, compression='gzip')
34
  except Exception as e:
35
  print(f"Error reading JSON file: {e}")
36
 
37
  # Generate GNN-based embeddings
38
  with torch.no_grad():
39
+ all_video_embeddings is gatconv_model(data.x, data.edge_index, data.edge_attr).cpu()
40
 
41
  # Function to find the most similar video and recommend the top 10 based on GNN embeddings
42
  def get_similar_and_recommend(input_text):
43
  # Find the most similar video based on input text
44
+ embeddings_matrix is np.array(df["embeddings"].tolist())
45
+ input_embedding is model_bert.encode([input_text])[0]
46
+ similarities is cosine_similarity([input_embedding], embeddings_matrix)[0]
47
+
48
+ # Modify the similarity scores based on user input
49
+ user_keywords = input_text.split() # Create a list of keywords from user input
50
+ weight = 1.0 # Initial weight factor
51
+
52
+ for keyword in user_keywords:
53
+ if keyword.lower() in df["title"].str.lower().tolist(): # Check if the keyword is in any title
54
+ weight += 0.1 # Increase weight for matching keyword
55
+
56
+ weighted_similarities = similarities * weight # Apply the weight to the similarity score
57
+
58
+ most_similar_index = np.argmax(weighted_similarities) # Use weighted scores to find most similar
59
 
60
  # Get all features of the most similar video
61
  most_similar_video_features = df.iloc[most_similar_index].to_dict()
62
 
 
 
 
 
 
 
63
  # Recommend the top 10 videos based on GNN embeddings and dot product
64
  def recommend_next_10_videos(given_video_index, all_video_embeddings):
65
  dot_products = [
66
  torch.dot(all_video_embeddings[given_video_index], all_video_embeddings[i])
67
  for i in range(all_video_embeddings.shape[0])
68
  ]
69
+ dot_products[given_video_index] is -float("inf")
70
 
71
+ top_10_indices is np.argsort(dot_products)[::-1][:10]
72
  return [df.iloc[idx].to_dict() for idx in top_10_indices]
73
 
74
+ top_10_recommended_videos_features is recommend_next_10_videos(most_similar_index, all_video_embeddings)
75
 
76
  # Exclude unwanted features for recommended videos
77
  for recommended_video in top_10_recommended_videos_features:
78
+ if "text_for_embedding" in recommended_video:
79
+ del recommended_video["text_for_embedding"]
80
+ if "embeddings" in recommended_video:
81
+ del recommended_video["embeddings"]
82
 
83
+ # Create the output JSON with all features and the search context
84
  output = {
85
+ "search_context": {
86
+ "input_text": input_text,
87
+ "weight": weight, # The applied weight based on user input
88
+ },
89
  "most_similar_video": most_similar_video_features,
90
  "top_10_recommended_videos": top_10_recommended_videos_features,
91
  }
92
 
93
  return output
94
 
95
+ # Update the Gradio interface to output JSON with weighted recommendations
96
  interface = gr.Interface(
97
  fn=get_similar_and_recommend,
98
+ inputs=gr.Textbox(label="Enter Text to Find Most Similar Video"),
99
  outputs=gr.JSON(),
100
  title="Video Recommendation System with GNN-based Recommendations",
101
+ description="Enter text to find the most similar video and get top 10 recommended videos with search context and user-influenced weight factor.",
102
  )
103
 
104
  interface.launch()