ThorbenF commited on
Commit
6963cf4
·
verified ·
1 Parent(s): bbbb8f2

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +129 -0
  2. model_loader.py +641 -0
  3. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from model_loader import load_model
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
8
+ from torch.utils.data import DataLoader
9
+
10
+ import re
11
+ import numpy as np
12
+ import os
13
+ import pandas as pd
14
+ import copy
15
+
16
+ import transformers, datasets
17
+ from transformers.modeling_outputs import TokenClassifierOutput
18
+ from transformers.models.t5.modeling_t5 import T5Config, T5PreTrainedModel, T5Stack
19
+ from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
20
+ from transformers import T5EncoderModel, T5Tokenizer
21
+ from transformers.models.esm.modeling_esm import EsmPreTrainedModel, EsmModel
22
+ from transformers import AutoTokenizer
23
+ from transformers import TrainingArguments, Trainer, set_seed
24
+ from transformers import DataCollatorForTokenClassification
25
+
26
+ from dataclasses import dataclass
27
+ from typing import Dict, List, Optional, Tuple, Union
28
+
29
+ # for custom DataCollator
30
+ from transformers.data.data_collator import DataCollatorMixin
31
+ from transformers.tokenization_utils_base import PreTrainedTokenizerBase
32
+ from transformers.utils import PaddingStrategy
33
+
34
+ from datasets import Dataset
35
+
36
+ from scipy.special import expit
37
+
38
+ #import peft
39
+ #from peft import get_peft_config, PeftModel, PeftConfig, inject_adapter_in_model, LoraConfig
40
+
41
+ model, tokenizer = load_model()
42
+
43
+ def create_dataset(tokenizer,seqs,labels,checkpoint):
44
+
45
+ tokenized = tokenizer(seqs, max_length=max_length, padding=False, truncation=True)
46
+ dataset = Dataset.from_dict(tokenized)
47
+
48
+ if ("esm" in checkpoint) or ("ProstT5" in checkpoint):
49
+ labels = [l[:max_length-2] for l in labels]
50
+ else:
51
+ labels = [l[:max_length-1] for l in labels]
52
+
53
+ dataset = dataset.add_column("labels", labels)
54
+
55
+ return dataset
56
+
57
+ def convert_predictions(input_logits):
58
+ all_probs = []
59
+ for logits in input_logits:
60
+ logits = logits.reshape(-1, 2)
61
+
62
+ # Mask out irrelevant regions
63
+ # Compute probabilities for class 1
64
+ probabilities_class1 = expit(logits[:, 1] - logits[:, 0])
65
+
66
+ all_probs.append(probabilities_class1)
67
+
68
+ return np.concatenate(all_probs)
69
+
70
+ def normalize_scores(scores):
71
+ min_score = np.min(scores)
72
+ max_score = np.max(scores)
73
+ return (scores - min_score) / (max_score - min_score) if max_score > min_score else scores
74
+
75
+ def predict_protein_sequence(test_one_letter_sequence):
76
+ dummy_labels=[np.zeros(len(test_one_letter_sequence))]
77
+ # Replace uncommon amino acids with "X"
78
+ test_one_letter_sequence = test_one_letter_sequence.replace("O", "X").replace("B", "X").replace("U", "X").replace("Z", "X").replace("J", "X")
79
+
80
+ # Add spaces between each amino acid for ProtT5 and ProstT5 models
81
+ if "Rostlab" in checkpoint:
82
+ test_one_letter_sequence = " ".join(test_one_letter_sequence)
83
+
84
+ # Add <AA2fold> for ProstT5 model input format
85
+ if "ProstT5" in checkpoint:
86
+ test_one_letter_sequence = "<AA2fold> " + test_one_letter_sequence
87
+
88
+ test_dataset=create_dataset(tokenizer,[test_one_letter_sequence],dummy_labels,checkpoint)
89
+
90
+ if ("esm" in checkpoint) or ("ProstT5" in checkpoint):
91
+ data_collator = DataCollatorForTokenClassificationESM(tokenizer)
92
+ else:
93
+ data_collator = DataCollatorForTokenClassification(tokenizer)
94
+
95
+ test_loader = DataLoader(test_dataset, batch_size=1, collate_fn=data_collator)
96
+
97
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
98
+ model.to(device)
99
+ for batch in test_loader:
100
+ input_ids = batch['input_ids'].to(device)
101
+ attention_mask = batch['attention_mask'].to(device)
102
+ labels = batch['labels'] # Ensure to get labels from batch
103
+
104
+ outputs = model(input_ids, attention_mask=attention_mask)
105
+ logits = outputs.logits.detach().cpu().numpy()
106
+
107
+ logits=convert_predictions(logits)
108
+ logits.shape
109
+
110
+
111
+
112
+ normalized_scores = normalize_scores(logits)
113
+
114
+ normalized_scores.shape
115
+
116
+ return normalized_scores
117
+
118
+
119
+ # Create Gradio interface
120
+ interface = gr.Interface(
121
+ fn=predict_protein_sequence,
122
+ inputs=gr.Textbox(lines=2, placeholder="Enter protein sequence here..."),
123
+ outputs="binding site probability",
124
+ title="Protein sequence - Binding site prediction",
125
+ description="Enter a protein sequence to predict its possible binding sites.",
126
+ )
127
+
128
+ # Launch the app
129
+ interface.launch()
model_loader.py ADDED
@@ -0,0 +1,641 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
5
+ from torch.utils.data import DataLoader
6
+
7
+ import re
8
+ import numpy as np
9
+ import os
10
+ import pandas as pd
11
+ import copy
12
+
13
+ import transformers, datasets
14
+ from transformers.modeling_outputs import TokenClassifierOutput
15
+ from transformers.models.t5.modeling_t5 import T5Config, T5PreTrainedModel, T5Stack
16
+ from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
17
+ from transformers import T5EncoderModel, T5Tokenizer
18
+ from transformers.models.esm.modeling_esm import EsmPreTrainedModel, EsmModel
19
+ from transformers import AutoTokenizer
20
+ from transformers import TrainingArguments, Trainer, set_seed
21
+ from transformers import DataCollatorForTokenClassification
22
+
23
+ from dataclasses import dataclass
24
+ from typing import Dict, List, Optional, Tuple, Union
25
+
26
+ # for custom DataCollator
27
+ from transformers.data.data_collator import DataCollatorMixin
28
+ from transformers.tokenization_utils_base import PreTrainedTokenizerBase
29
+ from transformers.utils import PaddingStrategy
30
+
31
+ from datasets import Dataset
32
+
33
+ from scipy.special import expit
34
+
35
+ #import peft
36
+ #from peft import get_peft_config, PeftModel, PeftConfig, inject_adapter_in_model, LoraConfig
37
+
38
+ cnn_head=True #False set True for Rostlab/prot_t5_xl_half_uniref50-enc
39
+ ffn_head=False #False
40
+ transformer_head=False
41
+ custom_lora=True #False #only true for Rostlab/prot_t5_xl_half_uniref50-enc
42
+
43
+ class ClassConfig:
44
+ def __init__(self, dropout=0.2, num_labels=3):
45
+ self.dropout_rate = dropout
46
+ self.num_labels = num_labels
47
+
48
+ class T5EncoderForTokenClassification(T5PreTrainedModel):
49
+
50
+ def __init__(self, config: T5Config, class_config: ClassConfig):
51
+ super().__init__(config)
52
+ self.num_labels = class_config.num_labels
53
+ self.config = config
54
+
55
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
56
+
57
+ encoder_config = copy.deepcopy(config)
58
+ encoder_config.use_cache = False
59
+ encoder_config.is_encoder_decoder = False
60
+ self.encoder = T5Stack(encoder_config, self.shared)
61
+
62
+ self.dropout = nn.Dropout(class_config.dropout_rate)
63
+
64
+ # Initialize different heads based on class_config
65
+ if cnn_head:
66
+ self.cnn = nn.Conv1d(config.hidden_size, 512, kernel_size=3, padding=1)
67
+ self.classifier = nn.Linear(512, class_config.num_labels)
68
+ elif ffn_head:
69
+ # Multi-layer feed-forward network (FFN) head
70
+ self.ffn = nn.Sequential(
71
+ nn.Linear(config.hidden_size, 512),
72
+ nn.ReLU(),
73
+ nn.Linear(512, 256),
74
+ nn.ReLU(),
75
+ nn.Linear(256, class_config.num_labels)
76
+ )
77
+ elif transformer_head:
78
+ # Transformer layer head
79
+ encoder_layer = nn.TransformerEncoderLayer(d_model=config.hidden_size, nhead=8)
80
+ self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=1)
81
+ self.classifier = nn.Linear(config.hidden_size, class_config.num_labels)
82
+ else:
83
+ # Default classification head
84
+ self.classifier = nn.Linear(config.hidden_size, class_config.num_labels)
85
+
86
+ self.post_init()
87
+
88
+ # Model parallel
89
+ self.model_parallel = False
90
+ self.device_map = None
91
+
92
+ def parallelize(self, device_map=None):
93
+ self.device_map = (
94
+ get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
95
+ if device_map is None
96
+ else device_map
97
+ )
98
+ assert_device_map(self.device_map, len(self.encoder.block))
99
+ self.encoder.parallelize(self.device_map)
100
+ self.classifier = self.classifier.to(self.encoder.first_device)
101
+ self.model_parallel = True
102
+
103
+ def deparallelize(self):
104
+ self.encoder.deparallelize()
105
+ self.encoder = self.encoder.to("cpu")
106
+ self.model_parallel = False
107
+ self.device_map = None
108
+ torch.cuda.empty_cache()
109
+
110
+ def get_input_embeddings(self):
111
+ return self.shared
112
+
113
+ def set_input_embeddings(self, new_embeddings):
114
+ self.shared = new_embeddings
115
+ self.encoder.set_input_embeddings(new_embeddings)
116
+
117
+ def get_encoder(self):
118
+ return self.encoder
119
+
120
+ def _prune_heads(self, heads_to_prune):
121
+ for layer, heads in heads_to_prune.items():
122
+ self.encoder.layer[layer].attention.prune_heads(heads)
123
+
124
+ def forward(
125
+ self,
126
+ input_ids=None,
127
+ attention_mask=None,
128
+ head_mask=None,
129
+ inputs_embeds=None,
130
+ labels=None,
131
+ output_attentions=None,
132
+ output_hidden_states=None,
133
+ return_dict=None,
134
+ ):
135
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
136
+
137
+ outputs = self.encoder(
138
+ input_ids=input_ids,
139
+ attention_mask=attention_mask,
140
+ inputs_embeds=inputs_embeds,
141
+ head_mask=head_mask,
142
+ output_attentions=output_attentions,
143
+ output_hidden_states=output_hidden_states,
144
+ return_dict=return_dict,
145
+ )
146
+
147
+ sequence_output = outputs[0]
148
+ sequence_output = self.dropout(sequence_output)
149
+
150
+ # Forward pass through the selected head
151
+ if cnn_head:
152
+ # CNN head
153
+ sequence_output = sequence_output.permute(0, 2, 1) # Prepare shape for CNN
154
+ cnn_output = self.cnn(sequence_output)
155
+ cnn_output = F.relu(cnn_output)
156
+ cnn_output = cnn_output.permute(0, 2, 1) # Shape back for classifier
157
+ logits = self.classifier(cnn_output)
158
+ elif ffn_head:
159
+ # FFN head
160
+ logits = self.ffn(sequence_output)
161
+ elif transformer_head:
162
+ # Transformer head
163
+ transformer_output = self.transformer_encoder(sequence_output)
164
+ logits = self.classifier(transformer_output)
165
+ else:
166
+ # Default classification head
167
+ logits = self.classifier(sequence_output)
168
+
169
+ loss = None
170
+ if labels is not None:
171
+ loss_fct = CrossEntropyLoss()
172
+ active_loss = attention_mask.view(-1) == 1
173
+ active_logits = logits.view(-1, self.num_labels)
174
+ active_labels = torch.where(
175
+ active_loss, labels.view(-1), torch.tensor(-100).type_as(labels)
176
+ )
177
+ valid_logits = active_logits[active_labels != -100]
178
+ valid_labels = active_labels[active_labels != -100]
179
+ valid_labels = valid_labels.to(valid_logits.device)
180
+ valid_labels = valid_labels.long()
181
+ loss = loss_fct(valid_logits, valid_labels)
182
+
183
+ if not return_dict:
184
+ output = (logits,) + outputs[2:]
185
+ return ((loss,) + output) if loss is not None else output
186
+
187
+ return TokenClassifierOutput(
188
+ loss=loss,
189
+ logits=logits,
190
+ hidden_states=outputs.hidden_states,
191
+ attentions=outputs.attentions,
192
+ )
193
+
194
+ # Modifies an existing transformer and introduce the LoRA layers
195
+
196
+ class CustomLoRAConfig:
197
+ def __init__(self):
198
+ self.lora_rank = 4
199
+ self.lora_init_scale = 0.01
200
+ self.lora_modules = ".*SelfAttention|.*EncDecAttention"
201
+ self.lora_layers = "q|k|v|o"
202
+ self.trainable_param_names = ".*layer_norm.*|.*lora_[ab].*"
203
+ self.lora_scaling_rank = 1
204
+ # lora_modules and lora_layers are speicified with regular expressions
205
+ # see https://www.w3schools.com/python/python_regex.asp for reference
206
+
207
+ class LoRALinear(nn.Module):
208
+ def __init__(self, linear_layer, rank, scaling_rank, init_scale):
209
+ super().__init__()
210
+ self.in_features = linear_layer.in_features
211
+ self.out_features = linear_layer.out_features
212
+ self.rank = rank
213
+ self.scaling_rank = scaling_rank
214
+ self.weight = linear_layer.weight
215
+ self.bias = linear_layer.bias
216
+ if self.rank > 0:
217
+ self.lora_a = nn.Parameter(torch.randn(rank, linear_layer.in_features) * init_scale)
218
+ if init_scale < 0:
219
+ self.lora_b = nn.Parameter(torch.randn(linear_layer.out_features, rank) * init_scale)
220
+ else:
221
+ self.lora_b = nn.Parameter(torch.zeros(linear_layer.out_features, rank))
222
+ if self.scaling_rank:
223
+ self.multi_lora_a = nn.Parameter(
224
+ torch.ones(self.scaling_rank, linear_layer.in_features)
225
+ + torch.randn(self.scaling_rank, linear_layer.in_features) * init_scale
226
+ )
227
+ if init_scale < 0:
228
+ self.multi_lora_b = nn.Parameter(
229
+ torch.ones(linear_layer.out_features, self.scaling_rank)
230
+ + torch.randn(linear_layer.out_features, self.scaling_rank) * init_scale
231
+ )
232
+ else:
233
+ self.multi_lora_b = nn.Parameter(torch.ones(linear_layer.out_features, self.scaling_rank))
234
+
235
+ def forward(self, input):
236
+ if self.scaling_rank == 1 and self.rank == 0:
237
+ # parsimonious implementation for ia3 and lora scaling
238
+ if self.multi_lora_a.requires_grad:
239
+ hidden = F.linear((input * self.multi_lora_a.flatten()), self.weight, self.bias)
240
+ else:
241
+ hidden = F.linear(input, self.weight, self.bias)
242
+ if self.multi_lora_b.requires_grad:
243
+ hidden = hidden * self.multi_lora_b.flatten()
244
+ return hidden
245
+ else:
246
+ # general implementation for lora (adding and scaling)
247
+ weight = self.weight
248
+ if self.scaling_rank:
249
+ weight = weight * torch.matmul(self.multi_lora_b, self.multi_lora_a) / self.scaling_rank
250
+ if self.rank:
251
+ weight = weight + torch.matmul(self.lora_b, self.lora_a) / self.rank
252
+ return F.linear(input, weight, self.bias)
253
+
254
+ def extra_repr(self):
255
+ return "in_features={}, out_features={}, bias={}, rank={}, scaling_rank={}".format(
256
+ self.in_features, self.out_features, self.bias is not None, self.rank, self.scaling_rank
257
+ )
258
+
259
+
260
+ def modify_with_lora(transformer, config):
261
+ for m_name, module in dict(transformer.named_modules()).items():
262
+ if re.fullmatch(config.lora_modules, m_name):
263
+ for c_name, layer in dict(module.named_children()).items():
264
+ if re.fullmatch(config.lora_layers, c_name):
265
+ assert isinstance(
266
+ layer, nn.Linear
267
+ ), f"LoRA can only be applied to torch.nn.Linear, but {layer} is {type(layer)}."
268
+ setattr(
269
+ module,
270
+ c_name,
271
+ LoRALinear(layer, config.lora_rank, config.lora_scaling_rank, config.lora_init_scale),
272
+ )
273
+ return transformer
274
+
275
+
276
+ def load_T5_model_classification(checkpoint, num_labels, half_precision, full = False, deepspeed=True):
277
+ # Load model and tokenizer
278
+
279
+ if "ankh" in checkpoint :
280
+ model = T5EncoderModel.from_pretrained(checkpoint)
281
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
282
+
283
+ elif "prot_t5" in checkpoint:
284
+ # possible to load the half precision model (thanks to @pawel-rezo for pointing that out)
285
+ if half_precision and deepspeed:
286
+ #tokenizer = T5Tokenizer.from_pretrained('Rostlab/prot_t5_xl_half_uniref50-enc', do_lower_case=False)
287
+ #model = T5EncoderModel.from_pretrained("Rostlab/prot_t5_xl_half_uniref50-enc", torch_dtype=torch.float16)#.to(torch.device('cuda')
288
+ tokenizer = T5Tokenizer.from_pretrained(checkpoint, do_lower_case=False)
289
+ model = T5EncoderModel.from_pretrained(checkpoint, torch_dtype=torch.float16).to(torch.device('cuda'))
290
+ else:
291
+ model = T5EncoderModel.from_pretrained(checkpoint)
292
+ tokenizer = T5Tokenizer.from_pretrained(checkpoint)
293
+
294
+ elif "ProstT5" in checkpoint:
295
+ if half_precision and deepspeed:
296
+ tokenizer = T5Tokenizer.from_pretrained(checkpoint, do_lower_case=False)
297
+ model = T5EncoderModel.from_pretrained(checkpoint, torch_dtype=torch.float16).to(torch.device('cuda'))
298
+ else:
299
+ model = T5EncoderModel.from_pretrained(checkpoint)
300
+ tokenizer = T5Tokenizer.from_pretrained(checkpoint)
301
+
302
+ # Create new Classifier model with PT5 dimensions
303
+ class_config=ClassConfig(num_labels=num_labels)
304
+ class_model=T5EncoderForTokenClassification(model.config,class_config)
305
+
306
+ # Set encoder and embedding weights to checkpoint weights
307
+ class_model.shared=model.shared
308
+ class_model.encoder=model.encoder
309
+
310
+ # Delete the checkpoint model
311
+ model=class_model
312
+ del class_model
313
+
314
+ if full == True:
315
+ return model, tokenizer
316
+
317
+ # Print number of trainable parameters
318
+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())
319
+ params = sum([np.prod(p.size()) for p in model_parameters])
320
+ print("T5_Classfier\nTrainable Parameter: "+ str(params))
321
+
322
+ if custom_lora:
323
+ #the linear CustomLoRAConfig allows better quality predictions, but more memory is needed
324
+ # Add model modification lora
325
+ config = CustomLoRAConfig()
326
+
327
+ # Add LoRA layers
328
+ model = modify_with_lora(model, config)
329
+
330
+ # Freeze Embeddings and Encoder (except LoRA)
331
+ for (param_name, param) in model.shared.named_parameters():
332
+ param.requires_grad = False
333
+ for (param_name, param) in model.encoder.named_parameters():
334
+ param.requires_grad = False
335
+
336
+ for (param_name, param) in model.named_parameters():
337
+ if re.fullmatch(config.trainable_param_names, param_name):
338
+ param.requires_grad = True
339
+
340
+ else:
341
+ # lora modification
342
+ peft_config = LoraConfig(
343
+ r=4, lora_alpha=1, bias="all", target_modules=["q","k","v","o"]
344
+ )
345
+
346
+ model = inject_adapter_in_model(peft_config, model)
347
+
348
+ # Unfreeze the prediction head
349
+ for (param_name, param) in model.classifier.named_parameters():
350
+ param.requires_grad = True
351
+
352
+ # Print trainable Parameter
353
+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())
354
+ params = sum([np.prod(p.size()) for p in model_parameters])
355
+ print("T5_LoRA_Classfier\nTrainable Parameter: "+ str(params) + "\n")
356
+
357
+ return model, tokenizer
358
+
359
+ class EsmForTokenClassificationCustom(EsmPreTrainedModel):
360
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
361
+ _keys_to_ignore_on_load_missing = [r"position_ids", r"cnn", r"ffn", r"transformer"]
362
+
363
+ def __init__(self, config):
364
+ super().__init__(config)
365
+ self.num_labels = config.num_labels
366
+ self.esm = EsmModel(config, add_pooling_layer=False)
367
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
368
+
369
+ if cnn_head:
370
+ self.cnn = nn.Conv1d(config.hidden_size, 512, kernel_size=3, padding=1)
371
+ self.classifier = nn.Linear(512, config.num_labels)
372
+ elif ffn_head:
373
+ # Multi-layer feed-forward network (FFN) as an alternative head
374
+ self.ffn = nn.Sequential(
375
+ nn.Linear(config.hidden_size, 512),
376
+ nn.ReLU(),
377
+ nn.Linear(512, 256),
378
+ nn.ReLU(),
379
+ nn.Linear(256, config.num_labels)
380
+ )
381
+ elif transformer_head:
382
+ # Transformer layer as an alternative head
383
+ encoder_layer = nn.TransformerEncoderLayer(d_model=config.hidden_size, nhead=8)
384
+ self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=1)
385
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
386
+ else:
387
+ # Default classification head
388
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
389
+
390
+ self.init_weights()
391
+
392
+ def forward(
393
+ self,
394
+ input_ids: Optional[torch.LongTensor] = None,
395
+ attention_mask: Optional[torch.Tensor] = None,
396
+ position_ids: Optional[torch.LongTensor] = None,
397
+ head_mask: Optional[torch.Tensor] = None,
398
+ inputs_embeds: Optional[torch.FloatTensor] = None,
399
+ labels: Optional[torch.LongTensor] = None,
400
+ output_attentions: Optional[bool] = None,
401
+ output_hidden_states: Optional[bool] = None,
402
+ return_dict: Optional[bool] = None,
403
+ ) -> Union[Tuple, TokenClassifierOutput]:
404
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
405
+ outputs = self.esm(
406
+ input_ids,
407
+ attention_mask=attention_mask,
408
+ position_ids=position_ids,
409
+ head_mask=head_mask,
410
+ inputs_embeds=inputs_embeds,
411
+ output_attentions=output_attentions,
412
+ output_hidden_states=output_hidden_states,
413
+ return_dict=return_dict,
414
+ )
415
+
416
+ sequence_output = outputs[0]
417
+ sequence_output = self.dropout(sequence_output)
418
+
419
+ if cnn_head:
420
+ sequence_output = sequence_output.transpose(1, 2)
421
+ sequence_output = self.cnn(sequence_output)
422
+ sequence_output = sequence_output.transpose(1, 2)
423
+ logits = self.classifier(sequence_output)
424
+ elif ffn_head:
425
+ logits = self.ffn(sequence_output)
426
+ elif transformer_head:
427
+ # Apply transformer encoder for the transformer head
428
+ sequence_output = self.transformer_encoder(sequence_output)
429
+ logits = self.classifier(sequence_output)
430
+ else:
431
+ logits = self.classifier(sequence_output)
432
+
433
+ loss = None
434
+ if labels is not None:
435
+ loss_fct = CrossEntropyLoss()
436
+ active_loss = attention_mask.view(-1) == 1
437
+ active_logits = logits.view(-1, self.num_labels)
438
+ active_labels = torch.where(
439
+ active_loss, labels.view(-1), torch.tensor(-100).type_as(labels)
440
+ )
441
+ valid_logits = active_logits[active_labels != -100]
442
+ valid_labels = active_labels[active_labels != -100]
443
+ valid_labels = valid_labels.type(torch.LongTensor).to('cuda:0')
444
+ loss = loss_fct(valid_logits, valid_labels)
445
+
446
+ if not return_dict:
447
+ output = (logits,) + outputs[2:]
448
+ return ((loss,) + output) if loss is not None else output
449
+
450
+ return TokenClassifierOutput(
451
+ loss=loss,
452
+ logits=logits,
453
+ hidden_states=outputs.hidden_states,
454
+ attentions=outputs.attentions,
455
+ )
456
+
457
+ def _init_weights(self, module):
458
+ if isinstance(module, nn.Linear) or isinstance(module, nn.Conv1d):
459
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
460
+ if module.bias is not None:
461
+ module.bias.data.zero_()
462
+
463
+ # based on transformers DataCollatorForTokenClassification
464
+ @dataclass
465
+ class DataCollatorForTokenClassificationESM(DataCollatorMixin):
466
+ """
467
+ Data collator that will dynamically pad the inputs received, as well as the labels.
468
+ Args:
469
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
470
+ The tokenizer used for encoding the data.
471
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
472
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
473
+ among:
474
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
475
+ sequence is provided).
476
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
477
+ acceptable input length for the model if that argument is not provided.
478
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
479
+ max_length (`int`, *optional*):
480
+ Maximum length of the returned list and optionally padding length (see above).
481
+ pad_to_multiple_of (`int`, *optional*):
482
+ If set will pad the sequence to a multiple of the provided value.
483
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
484
+ 7.5 (Volta).
485
+ label_pad_token_id (`int`, *optional*, defaults to -100):
486
+ The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
487
+ return_tensors (`str`):
488
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
489
+ """
490
+
491
+ tokenizer: PreTrainedTokenizerBase
492
+ padding: Union[bool, str, PaddingStrategy] = True
493
+ max_length: Optional[int] = None
494
+ pad_to_multiple_of: Optional[int] = None
495
+ label_pad_token_id: int = -100
496
+ return_tensors: str = "pt"
497
+
498
+ def torch_call(self, features):
499
+ import torch
500
+
501
+ label_name = "label" if "label" in features[0].keys() else "labels"
502
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
503
+
504
+ no_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
505
+
506
+ batch = self.tokenizer.pad(
507
+ no_labels_features,
508
+ padding=self.padding,
509
+ max_length=self.max_length,
510
+ pad_to_multiple_of=self.pad_to_multiple_of,
511
+ return_tensors="pt",
512
+ )
513
+
514
+ if labels is None:
515
+ return batch
516
+
517
+ sequence_length = batch["input_ids"].shape[1]
518
+ padding_side = self.tokenizer.padding_side
519
+
520
+ def to_list(tensor_or_iterable):
521
+ if isinstance(tensor_or_iterable, torch.Tensor):
522
+ return tensor_or_iterable.tolist()
523
+ return list(tensor_or_iterable)
524
+
525
+ if padding_side == "right":
526
+ batch[label_name] = [
527
+ # to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
528
+ # changed to pad the special tokens at the beginning and end of the sequence
529
+ [self.label_pad_token_id] + to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)-1) for label in labels
530
+ ]
531
+ else:
532
+ batch[label_name] = [
533
+ [self.label_pad_token_id] * (sequence_length - len(label)) + to_list(label) for label in labels
534
+ ]
535
+
536
+ batch[label_name] = torch.tensor(batch[label_name], dtype=torch.float)
537
+ return batch
538
+
539
+ def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
540
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
541
+ import torch
542
+
543
+ # Tensorize if necessary.
544
+ if isinstance(examples[0], (list, tuple, np.ndarray)):
545
+ examples = [torch.tensor(e, dtype=torch.long) for e in examples]
546
+
547
+ length_of_first = examples[0].size(0)
548
+
549
+ # Check if padding is necessary.
550
+
551
+ are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
552
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
553
+ return torch.stack(examples, dim=0)
554
+
555
+ # If yes, check if we have a `pad_token`.
556
+ if tokenizer._pad_token is None:
557
+ raise ValueError(
558
+ "You are attempting to pad samples but the tokenizer you are using"
559
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
560
+ )
561
+
562
+ # Creating the full tensor and filling it with our data.
563
+ max_length = max(x.size(0) for x in examples)
564
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
565
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
566
+ result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
567
+ for i, example in enumerate(examples):
568
+ if tokenizer.padding_side == "right":
569
+ result[i, : example.shape[0]] = example
570
+ else:
571
+ result[i, -example.shape[0] :] = example
572
+ return result
573
+
574
+ def tolist(x):
575
+ if isinstance(x, list):
576
+ return x
577
+ elif hasattr(x, "numpy"): # Checks for TF tensors without needing the import
578
+ x = x.numpy()
579
+ return x.tolist()
580
+
581
+ #load ESM2 models
582
+ def load_esm_model_classification(checkpoint, num_labels, half_precision, full=False, deepspeed=True):
583
+
584
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
585
+
586
+
587
+ if half_precision and deepspeed:
588
+ model = EsmForTokenClassificationCustom.from_pretrained(checkpoint,
589
+ num_labels = num_labels,
590
+ ignore_mismatched_sizes=True,
591
+ torch_dtype = torch.float16)
592
+ else:
593
+ model = EsmForTokenClassificationCustom.from_pretrained(checkpoint,
594
+ num_labels = num_labels,
595
+ ignore_mismatched_sizes=True)
596
+
597
+ if full == True:
598
+ return model, tokenizer
599
+
600
+ peft_config = LoraConfig(
601
+ r=4, lora_alpha=1, bias="all", target_modules=["query","key","value","dense"]
602
+ )
603
+
604
+ model = inject_adapter_in_model(peft_config, model)
605
+
606
+ #model.gradient_checkpointing_enable()
607
+
608
+ # Unfreeze the prediction head
609
+ for (param_name, param) in model.classifier.named_parameters():
610
+ param.requires_grad = True
611
+
612
+ return model, tokenizer
613
+
614
+ def load_model():
615
+ checkpoint='ThorbenF/prot_t5_xl_uniref50'
616
+ #best_model_path='ThorbenF/prot_t5_xl_uniref50/cpt.pth'
617
+ full=False
618
+ deepspeed=False
619
+ mixed=False
620
+ num_labels=2
621
+
622
+ print(checkpoint, num_labels, mixed, full, deepspeed)
623
+
624
+ # Determine model type and load accordingly
625
+ if "esm" in checkpoint:
626
+ model, tokenizer = load_esm_model_classification(checkpoint, num_labels, mixed, full, deepspeed)
627
+ else:
628
+ model, tokenizer = load_T5_model_classification(checkpoint, num_labels, mixed, full, deepspeed)
629
+
630
+
631
+ checkpoint_dir = model.config.name_or_path # This will point to the local directory
632
+
633
+ print(checkpoint_dir)
634
+ # Construct the path to the custom checkpoint file
635
+ best_model_path = os.path.join(checkpoint_dir, 'cpt.pth')
636
+
637
+ # Load the best model state
638
+ state_dict = torch.load(best_model_path, weights_only=True)
639
+ model.load_state_dict(state_dict)
640
+
641
+ return model, tokenizer
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch>=1.13.0
2
+ transformers>=4.30.0
3
+ datasets>=2.9.0
4
+ peft>=0.0.7
5
+ scipy>=1.7.0
6
+ pandas>=1.1.0
7
+ numpy>=1.19.0
8
+ scikit-learn>=0.24.0