Erva Ulusoy commited on
Commit
301ba09
·
1 Parent(s): 0901ef4

working app

Browse files
Files changed (2) hide show
  1. ProtHGT_app.py +295 -12
  2. run_prothgt_app.py +115 -62
ProtHGT_app.py CHANGED
@@ -9,18 +9,301 @@ from run_prothgt_app import *
9
  def convert_df(df):
10
  return df.to_csv(index=False).encode('utf-8')
11
 
 
 
 
 
 
 
 
12
  with st.sidebar:
13
- st.title("ProtHGT: Heterogeneous Graph Transformers for Automated Protein Function Prediction Using Knowledge Graphs and Language Models")
14
- st.write("[![publication](https://img.shields.io/badge/DOI-10.1002/pro.4988-b31b1b.svg)]() [![github-repository](https://img.shields.io/badge/GitHub-black?logo=github)](https://github.com/HUBioDataLab/ProtHGT)")
15
-
16
- # Add protein selection
17
- # You'll need to replace this with your actual data loading
18
- available_proteins = get_available_proteins() # Function to get list of proteins from your data
19
- selected_protein = st.selectbox(
20
- "Select or search for a protein (UniProt ID)",
21
- options=available_proteins,
22
- placeholder="Start typing to search...",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
- if selected_protein:
26
- st.write(f"Selected protein: {selected_protein}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  def convert_df(df):
10
  return df.to_csv(index=False).encode('utf-8')
11
 
12
+ # Initialize session state variables
13
+ if 'predictions_df' not in st.session_state:
14
+ st.session_state.predictions_df = None
15
+ if 'submitted' not in st.session_state:
16
+ st.session_state.submitted = False
17
+
18
+
19
  with st.sidebar:
20
+ st.markdown("""
21
+ <style>
22
+ .title {
23
+ font-size: 35px;
24
+ font-weight: bold;
25
+ color: #424242;
26
+ margin-bottom: 0px;
27
+ }
28
+ .subtitle {
29
+ font-size: 20px;
30
+ color: #424242;
31
+ margin-bottom: 20px;
32
+ line-height: 1.5;
33
+ }
34
+ .badges {
35
+ margin-top: 10px;
36
+ margin-bottom: 20px;
37
+ }
38
+ </style>
39
+
40
+ <div class="title">ProtHGT</div>
41
+ <div class="subtitle">Heterogeneous Graph Transformers for Automated Protein Function Prediction Using Knowledge Graphs and Language Models</div>
42
+ <div class="badges">
43
+ <a href="">
44
+ <img src="https://img.shields.io/badge/DOI-10.1002/pro.4988-b31b1b.svg" alt="publication">
45
+ </a>
46
+ <a href="https://github.com/HUBioDataLab/ProtHGT">
47
+ <img src="https://img.shields.io/badge/GitHub-black?logo=github" alt="github-repository">
48
+ </a>
49
+ </div>
50
+ """, unsafe_allow_html=True)
51
+
52
+ available_proteins = get_available_proteins()
53
+
54
+ selected_proteins = []
55
+
56
+ # Add protein selection methods
57
+ selection_method = st.radio(
58
+ "Choose input method:",
59
+ ["Search proteins", "Upload protein ID file"]
60
  )
61
+
62
+ if selection_method == "Search proteins":
63
+ # Add custom CSS to make multiselect scrollable
64
+ st.markdown("""
65
+ <style>
66
+ [data-testid="stMultiSelect"] div:nth-child(2) {
67
+ max-height: 200px;
68
+ overflow-y: auto;
69
+ }
70
+ </style>
71
+ """, unsafe_allow_html=True)
72
+
73
+ selected_proteins = st.multiselect(
74
+ "Select or search for proteins (UniProt IDs)",
75
+ options=available_proteins,
76
+ placeholder="Start typing to search...",
77
+ max_selections=100
78
+ )
79
+
80
+ if selected_proteins:
81
+ st.write(f"Selected {len(selected_proteins)} proteins")
82
+
83
+ else:
84
+ uploaded_file = st.file_uploader(
85
+ "Upload a text file with UniProt IDs (one per line, max 100)*",
86
+ type=['txt']
87
+ )
88
+
89
+ if uploaded_file:
90
+ protein_list = [line.decode('utf-8').strip() for line in uploaded_file]
91
+ # Remove empty lines and duplicates
92
+ protein_list = list(filter(None, protein_list))
93
+ protein_list = list(dict.fromkeys(protein_list))
94
+
95
+ # filter out proteins that are not in available_proteins
96
+ protein_list = [p for p in protein_list if p in available_proteins]
97
+ proteins_not_found = [p for p in protein_list if p not in available_proteins]
98
+
99
+ if len(protein_list) > 100:
100
+ st.error("Please upload a file with maximum 100 protein IDs.")
101
+ selected_proteins = []
102
+ else:
103
+ selected_proteins = protein_list
104
+ st.write(f"Loaded {len(selected_proteins)} proteins")
105
+ if proteins_not_found:
106
+ st.error(f"Proteins not found in input knowledge graph: {', '.join(proteins_not_found)}")
107
+ st.warning("Currently, our system can generate predictions only for proteins included in our input knowledge graph. Real-time retrieval of relationship data from external source databases is not yet supported. However, we are actively working on integrating this capability in future updates.")
108
 
109
+ if selected_proteins:
110
+ # Option 1: Collapsible expander
111
+ with st.expander("View Selected Proteins"):
112
+ st.write(f"Total proteins selected: {len(selected_proteins)}")
113
+
114
+ # Create scrollable container with fixed height
115
+ st.markdown(
116
+ f"""
117
+ <div style="
118
+ height: 150px;
119
+ overflow-y: scroll;
120
+ border: 1px solid #ccc;
121
+ border-radius: 4px;
122
+ padding: 8px;
123
+ background-color: white;">
124
+ {'<br>'.join(selected_proteins)}
125
+ </div>
126
+ """,
127
+ unsafe_allow_html=True
128
+ )
129
+
130
+ st.markdown("<div style='padding-top: 10px;'></div>", unsafe_allow_html=True)
131
+
132
+ # Add download button
133
+ proteins_text = '\n'.join(selected_proteins)
134
+ st.download_button(
135
+ label="Download List",
136
+ data=proteins_text,
137
+ file_name="selected_proteins.txt",
138
+ mime="text/plain",
139
+ key="download_button"
140
+ )
141
+
142
+ # Add GO category selection
143
+ go_category_options = {
144
+ 'All Categories': None,
145
+ 'Molecular Function': 'GO_term_F',
146
+ 'Biological Process': 'GO_term_P',
147
+ 'Cellular Component': 'GO_term_C'
148
+ }
149
+ selected_go_category = st.selectbox(
150
+ "Select GO Category for predictions",
151
+ options=list(go_category_options.keys()),
152
+ help="Choose which GO category to generate predictions for. Selecting 'All Categories' will generate predictions for all three categories."
153
+ )
154
+
155
+ st.warning("⚠️ Due to memory and computational constraints, the maximum number of proteins that can be processed at once is limited to 100 proteins. For larger datasets, please consider running the model locally using our GitHub repository.")
156
+
157
+ if selected_proteins and selected_go_category:
158
+ # Add a button to trigger predictions
159
+ if st.button("Generate Predictions"):
160
+ st.session_state.submitted = True
161
+
162
+ if st.session_state.submitted:
163
+ with st.spinner("Generating predictions..."):
164
+ # Generate predictions only if not already in session state
165
+ if st.session_state.predictions_df is None:
166
+
167
+ # Load model config from JSON file
168
+ import json
169
+ import os
170
+
171
+ # Define data directory path
172
+ data_dir = "data"
173
+ models_dir = os.path.join(data_dir, "models")
174
+
175
+ # Load model configuration
176
+ model_config_paths = {
177
+ 'GO_term_F': os.path.join(models_dir, "prothgt-config-molecular-function.yaml"),
178
+ 'GO_term_P': os.path.join(models_dir, "prothgt-config-biological-process.yaml"),
179
+ 'GO_term_C': os.path.join(models_dir, "prothgt-config-cellular-component.yaml")
180
+ }
181
+
182
+ # Paths for model and data
183
+ model_paths = {
184
+ 'GO_term_F': os.path.join(models_dir, "prothgt-model-molecular-function.pt"),
185
+ 'GO_term_P': os.path.join(models_dir, "prothgt-model-biological-process.pt"),
186
+ 'GO_term_C': os.path.join(models_dir, "prothgt-model-cellular-component.pt")
187
+ }
188
+
189
+ # Get the selected GO category
190
+ go_category = go_category_options[selected_go_category]
191
+
192
+ # If a specific category is selected, use that model path
193
+ if go_category:
194
+ model_config_paths = [model_config_paths[go_category]]
195
+ model_paths = [model_paths[go_category]]
196
+ go_categories = [go_category]
197
+ else:
198
+ model_config_paths = [model_config_paths[cat] for cat in ['GO_term_F', 'GO_term_P', 'GO_term_C']]
199
+ model_paths = [model_paths[cat] for cat in ['GO_term_F', 'GO_term_P', 'GO_term_C']]
200
+ go_categories = ['GO_term_F', 'GO_term_P', 'GO_term_C']
201
+
202
+ # Generate predictions
203
+ predictions_df = generate_prediction_df(
204
+ protein_ids=selected_proteins,
205
+ model_paths=model_paths,
206
+ model_config_paths=model_config_paths,
207
+ go_category=go_categories
208
+ )
209
+
210
+ st.session_state.predictions_df = predictions_df
211
+
212
+ # Display and filter predictions
213
+ st.success("Predictions generated successfully!")
214
+ st.markdown("### Filter and View Predictions")
215
+
216
+ # Create filters
217
+ st.markdown("### Filter Predictions")
218
+ col1, col2, col3 = st.columns(3)
219
+
220
+ with col1:
221
+ # Protein filter
222
+ selected_protein = st.selectbox(
223
+ "Filter by Protein",
224
+ options=['All'] + sorted(st.session_state.predictions_df['Protein'].unique().tolist())
225
+ )
226
+
227
+ with col2:
228
+ # GO category filter
229
+ selected_category = st.selectbox(
230
+ "Filter by GO Category",
231
+ options=['All'] + sorted(st.session_state.predictions_df['GO_category'].unique().tolist())
232
+ )
233
+
234
+ with col3:
235
+ # Probability threshold
236
+ min_probability_threshold = st.slider(
237
+ "Minimum Probability",
238
+ min_value=0.0,
239
+ max_value=1.0,
240
+ value=0.5,
241
+ step=0.05
242
+ )
243
+
244
+ max_probability_threshold = st.slider(
245
+ "Maximum Probability",
246
+ min_value=0.0,
247
+ max_value=1.0,
248
+ value=1.0,
249
+ step=0.05
250
+ )
251
+
252
+ # Filter the dataframe using session state data
253
+ filtered_df = st.session_state.predictions_df.copy()
254
+
255
+ if selected_protein != 'All':
256
+ filtered_df = filtered_df[filtered_df['Protein'] == selected_protein]
257
+
258
+ if selected_category != 'All':
259
+ filtered_df = filtered_df[filtered_df['GO_category'] == selected_category]
260
+
261
+ filtered_df = filtered_df[(filtered_df['Probability'] >= min_probability_threshold) &
262
+ (filtered_df['Probability'] <= max_probability_threshold)]
263
+
264
+ # Sort by probability
265
+ filtered_df = filtered_df.sort_values('Probability', ascending=False)
266
+
267
+
268
+ # Display the filtered dataframe
269
+ st.dataframe(
270
+ filtered_df,
271
+ hide_index=True,
272
+ column_config={
273
+ "Probability": st.column_config.ProgressColumn(
274
+ "Probability",
275
+ format="%.2f",
276
+ min_value=0,
277
+ max_value=1,
278
+ ),
279
+ "Protein": st.column_config.TextColumn(
280
+ "Protein",
281
+ help="UniProt ID",
282
+ ),
283
+ "GO_category": st.column_config.TextColumn(
284
+ "GO Category",
285
+ help="Gene Ontology Category",
286
+ ),
287
+ "GO_term": st.column_config.TextColumn(
288
+ "GO Term",
289
+ help="Gene Ontology Term ID",
290
+ ),
291
+ }
292
+ )
293
+
294
+ # Download filtered results
295
+ st.download_button(
296
+ label="Download Filtered Results",
297
+ data=convert_df(filtered_df),
298
+ file_name="filtered_predictions.csv",
299
+ mime="text/csv",
300
+ key="download_filtered_predictions"
301
+ )
302
+
303
+ # Add a reset button in the sidebar
304
+ with st.sidebar:
305
+ if st.session_state.submitted:
306
+ if st.button("Reset"):
307
+ st.session_state.predictions_df = None
308
+ st.session_state.submitted = False
309
+ st.experimental_rerun()
run_prothgt_app.py CHANGED
@@ -1,33 +1,29 @@
1
- from datasets import load_dataset
2
- from torch_geometric.transforms import ToUndirected
3
  import torch
4
  from torch.nn import Linear
5
  from torch_geometric.nn import HGTConv, MLP
6
  import pandas as pd
 
 
 
7
 
8
  class ProtHGT(torch.nn.Module):
9
  def __init__(self, data,hidden_channels, num_heads, num_layers, mlp_hidden_layers, mlp_dropout):
10
  super().__init__()
11
 
12
- self.lin_dict = torch.nn.ModuleDict({
13
- node_type: Linear(-1, hidden_channels)
14
- for node_type in data.node_types
15
- })
16
 
17
  self.convs = torch.nn.ModuleList()
18
  for _ in range(num_layers):
19
  conv = HGTConv(hidden_channels, hidden_channels, data.metadata(), num_heads, group='sum')
20
  self.convs.append(conv)
21
 
22
- # self.left_linear = Linear(hidden_channels, hidden_channels)
23
- # self.right_linear = Linear(hidden_channels, hidden_channels)
24
- # self.sqrt_hd = hidden_channels**1/2
25
-
26
- # self.mlp =MLP([2*hidden_channels, 128, 1], dropout=0.5, norm=None)
27
  self.mlp = MLP(mlp_hidden_layers , dropout=mlp_dropout, norm=None)
28
 
29
  def generate_embeddings(self, x_dict, edge_index_dict):
30
- # Generate updated embeddings through the GNN layers
31
  x_dict = {
32
  node_type: self.lin_dict[node_type](x).relu_()
33
  for node_type, x in x_dict.items()
@@ -48,9 +44,11 @@ class ProtHGT(torch.nn.Module):
48
 
49
  return self.mlp(z).view(-1), x_dict
50
 
51
- def _load_data(protein_id, go_category=None, heterodata_path=''):
52
- heterodata = load_dataset(heterodata_path)
53
 
 
 
 
54
  # Remove unnecessary edge types in one go
55
  edge_types_to_remove = [
56
  ('Protein', 'protein_function', 'GO_term_F'),
@@ -62,68 +60,123 @@ def _load_data(protein_id, go_category=None, heterodata_path=''):
62
  ]
63
 
64
  for edge_type in edge_types_to_remove:
65
- if edge_type in heterodata:
66
- del heterodata[edge_type]
67
 
68
- # Remove reverse edges
69
- heterodata = {k: v for k, v in heterodata.items() if not isinstance(k, tuple) or 'rev' not in k[1]}
70
-
71
- protein_index = heterodata['Protein']['id_mapping'][protein_id]
72
 
73
- # Create edge indices more efficiently
74
  categories = [go_category] if go_category else ['GO_term_F', 'GO_term_P', 'GO_term_C']
75
 
76
  for category in categories:
77
- pairs = [(protein_index, i) for i in range(len(heterodata[category]))]
78
- heterodata['Protein', 'protein_function', category] = {'edge_index': pairs}
79
-
80
- return ToUndirected(merge=False)(heterodata)
 
 
 
 
 
81
 
82
  def get_available_proteins(protein_list_file='data/available_proteins.txt'):
83
  with open(protein_list_file, 'r') as file:
84
  return [line.strip() for line in file.readlines()]
85
 
86
- def _generate_predictions(heterodata, model_path, model_config, target_type):
87
-
88
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
89
- model = ProtHGT(heterodata, model_config['hidden_channels'], model_config['num_heads'], model_config['num_layers'], model_config['mlp_hidden_layers'], model_config['mlp_dropout'])
90
- print('Loading model from', model_path)
91
- model.load_state_dict(torch.load(model_path, map_location=device))
92
-
93
  model.to(device)
94
  model.eval()
95
- heterodata.to(device)
96
 
97
  with torch.no_grad():
98
- predictions, _ = model(heterodata.x_dict, heterodata.edge_index_dict, heterodata[("Protein", "protein_function", target_type)].edge_label_index, target_type)
99
- return predictions
100
-
101
- def _create_prediction_df(predictions, heterodata, protein_id, go_category):
102
- prediction_df = pd.DataFrame({
103
- 'Protein': protein_id,
104
- 'GO_category': go_category,
105
- 'GO_term': heterodata[go_category]['id_mapping'].keys(),
106
- 'Probability': predictions.tolist()
107
- })
108
- prediction_df.sort_values(by='Probability', ascending=False, inplace=True)
109
- prediction_df.reset_index(drop=True, inplace=True)
110
- return prediction_df
111
-
112
-
113
- def generate_prediction_df(protein_id, heterodata_path, model_path, model_config, go_category=None):
114
- heterodata = _load_data(protein_id, go_category, heterodata_path)
115
-
116
- if go_category:
117
- predictions = _generate_predictions(heterodata, model_path, model_config, go_category)
118
- prediction_df = _create_prediction_df(predictions, heterodata, protein_id, go_category)
119
- return prediction_df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
- else:
122
- all_predictions = []
123
- for go_category in ['GO_term_F', 'GO_term_P', 'GO_term_C']:
124
- predictions = _generate_predictions(heterodata, model_path, model_config, go_category)
125
- category_df = _create_prediction_df(predictions, heterodata, protein_id, go_category)
126
- all_predictions.append(category_df)
127
-
128
- return pd.concat(all_predictions, ignore_index=True)
129
 
 
 
 
 
1
  import torch
2
  from torch.nn import Linear
3
  from torch_geometric.nn import HGTConv, MLP
4
  import pandas as pd
5
+ import yaml
6
+ import os
7
+ from datasets import load_dataset
8
 
9
  class ProtHGT(torch.nn.Module):
10
  def __init__(self, data,hidden_channels, num_heads, num_layers, mlp_hidden_layers, mlp_dropout):
11
  super().__init__()
12
 
13
+ self.lin_dict = torch.nn.ModuleDict()
14
+ for node_type in data.node_types:
15
+ input_dim = data[node_type].x.size(1) # Get actual input dimension from data
16
+ self.lin_dict[node_type] = Linear(input_dim, hidden_channels)
17
 
18
  self.convs = torch.nn.ModuleList()
19
  for _ in range(num_layers):
20
  conv = HGTConv(hidden_channels, hidden_channels, data.metadata(), num_heads, group='sum')
21
  self.convs.append(conv)
22
 
 
 
 
 
 
23
  self.mlp = MLP(mlp_hidden_layers , dropout=mlp_dropout, norm=None)
24
 
25
  def generate_embeddings(self, x_dict, edge_index_dict):
26
+ # Generate updated embeddings through the HGT layers
27
  x_dict = {
28
  node_type: self.lin_dict[node_type](x).relu_()
29
  for node_type, x in x_dict.items()
 
44
 
45
  return self.mlp(z).view(-1), x_dict
46
 
47
+ def _load_data(protein_ids, go_category=None):
 
48
 
49
+ # heterodata = load_dataset('HUBioDataLab/ProtHGT-KG', data_files="prothgt-kg.pt")
50
+ heterodata = torch.load('data/prothgt-kg.pt')
51
+ print('Loading data...')
52
  # Remove unnecessary edge types in one go
53
  edge_types_to_remove = [
54
  ('Protein', 'protein_function', 'GO_term_F'),
 
60
  ]
61
 
62
  for edge_type in edge_types_to_remove:
63
+ if edge_type in heterodata.edge_index_dict:
64
+ del heterodata.edge_index_dict[edge_type]
65
 
66
+ # Get protein indices for all input proteins
67
+ protein_indices = [heterodata['Protein']['id_mapping'][pid] for pid in protein_ids]
 
 
68
 
69
+ # Create edge indices for prediction
70
  categories = [go_category] if go_category else ['GO_term_F', 'GO_term_P', 'GO_term_C']
71
 
72
  for category in categories:
73
+ # Create pairs for all proteins with all GO terms
74
+ n_terms = len(heterodata[category]['id_mapping'])
75
+ protein_indices_repeated = torch.tensor(protein_indices).repeat_interleave(n_terms)
76
+ term_indices = torch.arange(n_terms).repeat(len(protein_indices))
77
+
78
+ edge_index = torch.stack([protein_indices_repeated, term_indices])
79
+ heterodata.edge_index_dict[('Protein', 'protein_function', category)] = edge_index
80
+
81
+ return heterodata
82
 
83
  def get_available_proteins(protein_list_file='data/available_proteins.txt'):
84
  with open(protein_list_file, 'r') as file:
85
  return [line.strip() for line in file.readlines()]
86
 
87
+ def _generate_predictions(heterodata, model, target_type):
 
88
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
89
+
 
 
 
90
  model.to(device)
91
  model.eval()
92
+ heterodata = heterodata.to(device)
93
 
94
  with torch.no_grad():
95
+ edge_label_index = heterodata.edge_index_dict[('Protein', 'protein_function', target_type)]
96
+ predictions, _ = model(heterodata.x_dict, heterodata.edge_index_dict, edge_label_index, target_type)
97
+ predictions = torch.sigmoid(predictions)
98
+
99
+ return predictions.cpu()
100
+
101
+ def _create_prediction_df(predictions, heterodata, protein_ids, go_category):
102
+ go_category_dict = {
103
+ 'GO_term_F': 'Molecular Function',
104
+ 'GO_term_P': 'Biological Process',
105
+ 'GO_term_C': 'Cellular Component'
106
+ }
107
+ # Create a list to store individual protein predictions
108
+ all_predictions = []
109
+
110
+ # Number of GO terms for this category
111
+ n_go_terms = len(heterodata[go_category]['id_mapping'])
112
+
113
+ # Process predictions for each protein
114
+ for i, protein_id in enumerate(protein_ids):
115
+ # Get the slice of predictions for this protein
116
+ protein_predictions = predictions[i * n_go_terms:(i + 1) * n_go_terms]
117
+
118
+ prediction_df = pd.DataFrame({
119
+ 'Protein': protein_id,
120
+ 'GO_category': go_category_dict[go_category],
121
+ 'GO_term': list(heterodata[go_category]['id_mapping'].keys()),
122
+ 'Probability': protein_predictions.numpy()
123
+ })
124
+ all_predictions.append(prediction_df)
125
+
126
+ # Combine all predictions
127
+ combined_df = pd.concat(all_predictions, ignore_index=True)
128
+ combined_df.sort_values(by=['Protein', 'Probability'], ascending=[True, False], inplace=True)
129
+ combined_df.reset_index(drop=True, inplace=True)
130
+ return combined_df
131
+
132
+ def generate_prediction_df(protein_ids, model_paths, model_config_paths, go_category):
133
+ all_predictions = []
134
+
135
+ # Convert single protein ID to list if necessary
136
+ if isinstance(protein_ids, str):
137
+ protein_ids = [protein_ids]
138
+
139
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
140
+ for go_cat, model_config_path, model_path in zip(go_category, model_config_paths, model_paths):
141
+ print(f'Generating predictions for {go_cat}...')
142
+
143
+ # Load data
144
+ heterodata = _load_data(protein_ids, go_cat)
145
+
146
+ # Load model configuration
147
+ with open(model_config_path, 'r') as file:
148
+ model_config = yaml.safe_load(file)
149
+
150
+ # Initialize model with configuration
151
+ model = ProtHGT(
152
+ heterodata,
153
+ hidden_channels=model_config['hidden_channels'][0],
154
+ num_heads=model_config['num_heads'],
155
+ num_layers=model_config['num_layers'],
156
+ mlp_hidden_layers=model_config['hidden_channels'][1],
157
+ mlp_dropout=model_config['mlp_dropout']
158
+ )
159
+
160
+ # Load model weights
161
+ model.load_state_dict(torch.load(model_path, map_location=device))
162
+ print(f'Loaded model weights from {model_path}')
163
+
164
+ # Generate predictions
165
+ predictions = _generate_predictions(heterodata, model, go_cat)
166
+ prediction_df = _create_prediction_df(predictions, heterodata, protein_ids, go_cat)
167
+ all_predictions.append(prediction_df)
168
+
169
+ # Clean up memory
170
+ del heterodata
171
+ del model
172
+ del predictions
173
+ torch.cuda.empty_cache() # Clear CUDA cache if using GPU
174
+
175
+ # Combine all predictions
176
+ final_df = pd.concat(all_predictions, ignore_index=True)
177
 
178
+ # Clean up
179
+ del all_predictions
180
+ torch.cuda.empty_cache()
 
 
 
 
 
181
 
182
+ return final_df