ThorbenF commited on
Commit
ce6b085
·
1 Parent(s): 6963cf4

Your commit message

Browse files
.ipynb_checkpoints/app-checkpoint.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from model_loader import load_model
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
8
+ from torch.utils.data import DataLoader
9
+
10
+ import re
11
+ import numpy as np
12
+ import os
13
+ import pandas as pd
14
+ import copy
15
+
16
+ import transformers, datasets
17
+ from transformers.modeling_outputs import TokenClassifierOutput
18
+ from transformers.models.t5.modeling_t5 import T5Config, T5PreTrainedModel, T5Stack
19
+ from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
20
+ from transformers import T5EncoderModel, T5Tokenizer
21
+ from transformers.models.esm.modeling_esm import EsmPreTrainedModel, EsmModel
22
+ from transformers import AutoTokenizer
23
+ from transformers import TrainingArguments, Trainer, set_seed
24
+ from transformers import DataCollatorForTokenClassification
25
+
26
+ from dataclasses import dataclass
27
+ from typing import Dict, List, Optional, Tuple, Union
28
+
29
+ # for custom DataCollator
30
+ from transformers.data.data_collator import DataCollatorMixin
31
+ from transformers.tokenization_utils_base import PreTrainedTokenizerBase
32
+ from transformers.utils import PaddingStrategy
33
+
34
+ from datasets import Dataset
35
+
36
+ from scipy.special import expit
37
+
38
+ #import peft
39
+ #from peft import get_peft_config, PeftModel, PeftConfig, inject_adapter_in_model, LoraConfig
40
+
41
+ model, tokenizer = load_model()
42
+
43
+ def create_dataset(tokenizer,seqs,labels,checkpoint):
44
+
45
+ tokenized = tokenizer(seqs, max_length=max_length, padding=False, truncation=True)
46
+ dataset = Dataset.from_dict(tokenized)
47
+
48
+ if ("esm" in checkpoint) or ("ProstT5" in checkpoint):
49
+ labels = [l[:max_length-2] for l in labels]
50
+ else:
51
+ labels = [l[:max_length-1] for l in labels]
52
+
53
+ dataset = dataset.add_column("labels", labels)
54
+
55
+ return dataset
56
+
57
+ def convert_predictions(input_logits):
58
+ all_probs = []
59
+ for logits in input_logits:
60
+ logits = logits.reshape(-1, 2)
61
+
62
+ # Mask out irrelevant regions
63
+ # Compute probabilities for class 1
64
+ probabilities_class1 = expit(logits[:, 1] - logits[:, 0])
65
+
66
+ all_probs.append(probabilities_class1)
67
+
68
+ return np.concatenate(all_probs)
69
+
70
+ def normalize_scores(scores):
71
+ min_score = np.min(scores)
72
+ max_score = np.max(scores)
73
+ return (scores - min_score) / (max_score - min_score) if max_score > min_score else scores
74
+
75
+ def predict_protein_sequence(test_one_letter_sequence):
76
+ dummy_labels=[np.zeros(len(test_one_letter_sequence))]
77
+ # Replace uncommon amino acids with "X"
78
+ test_one_letter_sequence = test_one_letter_sequence.replace("O", "X").replace("B", "X").replace("U", "X").replace("Z", "X").replace("J", "X")
79
+
80
+ # Add spaces between each amino acid for ProtT5 and ProstT5 models
81
+ if "Rostlab" in checkpoint:
82
+ test_one_letter_sequence = " ".join(test_one_letter_sequence)
83
+
84
+ # Add <AA2fold> for ProstT5 model input format
85
+ if "ProstT5" in checkpoint:
86
+ test_one_letter_sequence = "<AA2fold> " + test_one_letter_sequence
87
+
88
+ test_dataset=create_dataset(tokenizer,[test_one_letter_sequence],dummy_labels,checkpoint)
89
+
90
+ if ("esm" in checkpoint) or ("ProstT5" in checkpoint):
91
+ data_collator = DataCollatorForTokenClassificationESM(tokenizer)
92
+ else:
93
+ data_collator = DataCollatorForTokenClassification(tokenizer)
94
+
95
+ test_loader = DataLoader(test_dataset, batch_size=1, collate_fn=data_collator)
96
+
97
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
98
+ model.to(device)
99
+ for batch in test_loader:
100
+ input_ids = batch['input_ids'].to(device)
101
+ attention_mask = batch['attention_mask'].to(device)
102
+ labels = batch['labels'] # Ensure to get labels from batch
103
+
104
+ outputs = model(input_ids, attention_mask=attention_mask)
105
+ logits = outputs.logits.detach().cpu().numpy()
106
+
107
+ logits=convert_predictions(logits)
108
+ logits.shape
109
+
110
+
111
+
112
+ normalized_scores = normalize_scores(logits)
113
+
114
+ normalized_scores.shape
115
+
116
+ return normalized_scores
117
+
118
+
119
+ # Create Gradio interface
120
+ interface = gr.Interface(
121
+ fn=predict_protein_sequence,
122
+ inputs=gr.Textbox(lines=2, placeholder="Enter protein sequence here..."),
123
+ outputs="binding site probability",
124
+ title="Protein sequence - Binding site prediction",
125
+ description="Enter a protein sequence to predict its possible binding sites.",
126
+ )
127
+
128
+ # Launch the app
129
+ interface.launch()
.ipynb_checkpoints/model_loader-checkpoint.py ADDED
@@ -0,0 +1,641 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
5
+ from torch.utils.data import DataLoader
6
+
7
+ import re
8
+ import numpy as np
9
+ import os
10
+ import pandas as pd
11
+ import copy
12
+
13
+ import transformers, datasets
14
+ from transformers.modeling_outputs import TokenClassifierOutput
15
+ from transformers.models.t5.modeling_t5 import T5Config, T5PreTrainedModel, T5Stack
16
+ from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
17
+ from transformers import T5EncoderModel, T5Tokenizer
18
+ from transformers.models.esm.modeling_esm import EsmPreTrainedModel, EsmModel
19
+ from transformers import AutoTokenizer
20
+ from transformers import TrainingArguments, Trainer, set_seed
21
+ from transformers import DataCollatorForTokenClassification
22
+
23
+ from dataclasses import dataclass
24
+ from typing import Dict, List, Optional, Tuple, Union
25
+
26
+ # for custom DataCollator
27
+ from transformers.data.data_collator import DataCollatorMixin
28
+ from transformers.tokenization_utils_base import PreTrainedTokenizerBase
29
+ from transformers.utils import PaddingStrategy
30
+
31
+ from datasets import Dataset
32
+
33
+ from scipy.special import expit
34
+
35
+ #import peft
36
+ #from peft import get_peft_config, PeftModel, PeftConfig, inject_adapter_in_model, LoraConfig
37
+
38
+ cnn_head=True #False set True for Rostlab/prot_t5_xl_half_uniref50-enc
39
+ ffn_head=False #False
40
+ transformer_head=False
41
+ custom_lora=True #False #only true for Rostlab/prot_t5_xl_half_uniref50-enc
42
+
43
+ class ClassConfig:
44
+ def __init__(self, dropout=0.2, num_labels=3):
45
+ self.dropout_rate = dropout
46
+ self.num_labels = num_labels
47
+
48
+ class T5EncoderForTokenClassification(T5PreTrainedModel):
49
+
50
+ def __init__(self, config: T5Config, class_config: ClassConfig):
51
+ super().__init__(config)
52
+ self.num_labels = class_config.num_labels
53
+ self.config = config
54
+
55
+ self.shared = nn.Embedding(config.vocab_size, config.d_model)
56
+
57
+ encoder_config = copy.deepcopy(config)
58
+ encoder_config.use_cache = False
59
+ encoder_config.is_encoder_decoder = False
60
+ self.encoder = T5Stack(encoder_config, self.shared)
61
+
62
+ self.dropout = nn.Dropout(class_config.dropout_rate)
63
+
64
+ # Initialize different heads based on class_config
65
+ if cnn_head:
66
+ self.cnn = nn.Conv1d(config.hidden_size, 512, kernel_size=3, padding=1)
67
+ self.classifier = nn.Linear(512, class_config.num_labels)
68
+ elif ffn_head:
69
+ # Multi-layer feed-forward network (FFN) head
70
+ self.ffn = nn.Sequential(
71
+ nn.Linear(config.hidden_size, 512),
72
+ nn.ReLU(),
73
+ nn.Linear(512, 256),
74
+ nn.ReLU(),
75
+ nn.Linear(256, class_config.num_labels)
76
+ )
77
+ elif transformer_head:
78
+ # Transformer layer head
79
+ encoder_layer = nn.TransformerEncoderLayer(d_model=config.hidden_size, nhead=8)
80
+ self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=1)
81
+ self.classifier = nn.Linear(config.hidden_size, class_config.num_labels)
82
+ else:
83
+ # Default classification head
84
+ self.classifier = nn.Linear(config.hidden_size, class_config.num_labels)
85
+
86
+ self.post_init()
87
+
88
+ # Model parallel
89
+ self.model_parallel = False
90
+ self.device_map = None
91
+
92
+ def parallelize(self, device_map=None):
93
+ self.device_map = (
94
+ get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))
95
+ if device_map is None
96
+ else device_map
97
+ )
98
+ assert_device_map(self.device_map, len(self.encoder.block))
99
+ self.encoder.parallelize(self.device_map)
100
+ self.classifier = self.classifier.to(self.encoder.first_device)
101
+ self.model_parallel = True
102
+
103
+ def deparallelize(self):
104
+ self.encoder.deparallelize()
105
+ self.encoder = self.encoder.to("cpu")
106
+ self.model_parallel = False
107
+ self.device_map = None
108
+ torch.cuda.empty_cache()
109
+
110
+ def get_input_embeddings(self):
111
+ return self.shared
112
+
113
+ def set_input_embeddings(self, new_embeddings):
114
+ self.shared = new_embeddings
115
+ self.encoder.set_input_embeddings(new_embeddings)
116
+
117
+ def get_encoder(self):
118
+ return self.encoder
119
+
120
+ def _prune_heads(self, heads_to_prune):
121
+ for layer, heads in heads_to_prune.items():
122
+ self.encoder.layer[layer].attention.prune_heads(heads)
123
+
124
+ def forward(
125
+ self,
126
+ input_ids=None,
127
+ attention_mask=None,
128
+ head_mask=None,
129
+ inputs_embeds=None,
130
+ labels=None,
131
+ output_attentions=None,
132
+ output_hidden_states=None,
133
+ return_dict=None,
134
+ ):
135
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
136
+
137
+ outputs = self.encoder(
138
+ input_ids=input_ids,
139
+ attention_mask=attention_mask,
140
+ inputs_embeds=inputs_embeds,
141
+ head_mask=head_mask,
142
+ output_attentions=output_attentions,
143
+ output_hidden_states=output_hidden_states,
144
+ return_dict=return_dict,
145
+ )
146
+
147
+ sequence_output = outputs[0]
148
+ sequence_output = self.dropout(sequence_output)
149
+
150
+ # Forward pass through the selected head
151
+ if cnn_head:
152
+ # CNN head
153
+ sequence_output = sequence_output.permute(0, 2, 1) # Prepare shape for CNN
154
+ cnn_output = self.cnn(sequence_output)
155
+ cnn_output = F.relu(cnn_output)
156
+ cnn_output = cnn_output.permute(0, 2, 1) # Shape back for classifier
157
+ logits = self.classifier(cnn_output)
158
+ elif ffn_head:
159
+ # FFN head
160
+ logits = self.ffn(sequence_output)
161
+ elif transformer_head:
162
+ # Transformer head
163
+ transformer_output = self.transformer_encoder(sequence_output)
164
+ logits = self.classifier(transformer_output)
165
+ else:
166
+ # Default classification head
167
+ logits = self.classifier(sequence_output)
168
+
169
+ loss = None
170
+ if labels is not None:
171
+ loss_fct = CrossEntropyLoss()
172
+ active_loss = attention_mask.view(-1) == 1
173
+ active_logits = logits.view(-1, self.num_labels)
174
+ active_labels = torch.where(
175
+ active_loss, labels.view(-1), torch.tensor(-100).type_as(labels)
176
+ )
177
+ valid_logits = active_logits[active_labels != -100]
178
+ valid_labels = active_labels[active_labels != -100]
179
+ valid_labels = valid_labels.to(valid_logits.device)
180
+ valid_labels = valid_labels.long()
181
+ loss = loss_fct(valid_logits, valid_labels)
182
+
183
+ if not return_dict:
184
+ output = (logits,) + outputs[2:]
185
+ return ((loss,) + output) if loss is not None else output
186
+
187
+ return TokenClassifierOutput(
188
+ loss=loss,
189
+ logits=logits,
190
+ hidden_states=outputs.hidden_states,
191
+ attentions=outputs.attentions,
192
+ )
193
+
194
+ # Modifies an existing transformer and introduce the LoRA layers
195
+
196
+ class CustomLoRAConfig:
197
+ def __init__(self):
198
+ self.lora_rank = 4
199
+ self.lora_init_scale = 0.01
200
+ self.lora_modules = ".*SelfAttention|.*EncDecAttention"
201
+ self.lora_layers = "q|k|v|o"
202
+ self.trainable_param_names = ".*layer_norm.*|.*lora_[ab].*"
203
+ self.lora_scaling_rank = 1
204
+ # lora_modules and lora_layers are speicified with regular expressions
205
+ # see https://www.w3schools.com/python/python_regex.asp for reference
206
+
207
+ class LoRALinear(nn.Module):
208
+ def __init__(self, linear_layer, rank, scaling_rank, init_scale):
209
+ super().__init__()
210
+ self.in_features = linear_layer.in_features
211
+ self.out_features = linear_layer.out_features
212
+ self.rank = rank
213
+ self.scaling_rank = scaling_rank
214
+ self.weight = linear_layer.weight
215
+ self.bias = linear_layer.bias
216
+ if self.rank > 0:
217
+ self.lora_a = nn.Parameter(torch.randn(rank, linear_layer.in_features) * init_scale)
218
+ if init_scale < 0:
219
+ self.lora_b = nn.Parameter(torch.randn(linear_layer.out_features, rank) * init_scale)
220
+ else:
221
+ self.lora_b = nn.Parameter(torch.zeros(linear_layer.out_features, rank))
222
+ if self.scaling_rank:
223
+ self.multi_lora_a = nn.Parameter(
224
+ torch.ones(self.scaling_rank, linear_layer.in_features)
225
+ + torch.randn(self.scaling_rank, linear_layer.in_features) * init_scale
226
+ )
227
+ if init_scale < 0:
228
+ self.multi_lora_b = nn.Parameter(
229
+ torch.ones(linear_layer.out_features, self.scaling_rank)
230
+ + torch.randn(linear_layer.out_features, self.scaling_rank) * init_scale
231
+ )
232
+ else:
233
+ self.multi_lora_b = nn.Parameter(torch.ones(linear_layer.out_features, self.scaling_rank))
234
+
235
+ def forward(self, input):
236
+ if self.scaling_rank == 1 and self.rank == 0:
237
+ # parsimonious implementation for ia3 and lora scaling
238
+ if self.multi_lora_a.requires_grad:
239
+ hidden = F.linear((input * self.multi_lora_a.flatten()), self.weight, self.bias)
240
+ else:
241
+ hidden = F.linear(input, self.weight, self.bias)
242
+ if self.multi_lora_b.requires_grad:
243
+ hidden = hidden * self.multi_lora_b.flatten()
244
+ return hidden
245
+ else:
246
+ # general implementation for lora (adding and scaling)
247
+ weight = self.weight
248
+ if self.scaling_rank:
249
+ weight = weight * torch.matmul(self.multi_lora_b, self.multi_lora_a) / self.scaling_rank
250
+ if self.rank:
251
+ weight = weight + torch.matmul(self.lora_b, self.lora_a) / self.rank
252
+ return F.linear(input, weight, self.bias)
253
+
254
+ def extra_repr(self):
255
+ return "in_features={}, out_features={}, bias={}, rank={}, scaling_rank={}".format(
256
+ self.in_features, self.out_features, self.bias is not None, self.rank, self.scaling_rank
257
+ )
258
+
259
+
260
+ def modify_with_lora(transformer, config):
261
+ for m_name, module in dict(transformer.named_modules()).items():
262
+ if re.fullmatch(config.lora_modules, m_name):
263
+ for c_name, layer in dict(module.named_children()).items():
264
+ if re.fullmatch(config.lora_layers, c_name):
265
+ assert isinstance(
266
+ layer, nn.Linear
267
+ ), f"LoRA can only be applied to torch.nn.Linear, but {layer} is {type(layer)}."
268
+ setattr(
269
+ module,
270
+ c_name,
271
+ LoRALinear(layer, config.lora_rank, config.lora_scaling_rank, config.lora_init_scale),
272
+ )
273
+ return transformer
274
+
275
+
276
+ def load_T5_model_classification(checkpoint, num_labels, half_precision, full = False, deepspeed=True):
277
+ # Load model and tokenizer
278
+
279
+ if "ankh" in checkpoint :
280
+ model = T5EncoderModel.from_pretrained(checkpoint)
281
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
282
+
283
+ elif "prot_t5" in checkpoint:
284
+ # possible to load the half precision model (thanks to @pawel-rezo for pointing that out)
285
+ if half_precision and deepspeed:
286
+ #tokenizer = T5Tokenizer.from_pretrained('Rostlab/prot_t5_xl_half_uniref50-enc', do_lower_case=False)
287
+ #model = T5EncoderModel.from_pretrained("Rostlab/prot_t5_xl_half_uniref50-enc", torch_dtype=torch.float16)#.to(torch.device('cuda')
288
+ tokenizer = T5Tokenizer.from_pretrained(checkpoint, do_lower_case=False)
289
+ model = T5EncoderModel.from_pretrained(checkpoint, torch_dtype=torch.float16).to(torch.device('cuda'))
290
+ else:
291
+ model = T5EncoderModel.from_pretrained(checkpoint)
292
+ tokenizer = T5Tokenizer.from_pretrained(checkpoint)
293
+
294
+ elif "ProstT5" in checkpoint:
295
+ if half_precision and deepspeed:
296
+ tokenizer = T5Tokenizer.from_pretrained(checkpoint, do_lower_case=False)
297
+ model = T5EncoderModel.from_pretrained(checkpoint, torch_dtype=torch.float16).to(torch.device('cuda'))
298
+ else:
299
+ model = T5EncoderModel.from_pretrained(checkpoint)
300
+ tokenizer = T5Tokenizer.from_pretrained(checkpoint)
301
+
302
+ # Create new Classifier model with PT5 dimensions
303
+ class_config=ClassConfig(num_labels=num_labels)
304
+ class_model=T5EncoderForTokenClassification(model.config,class_config)
305
+
306
+ # Set encoder and embedding weights to checkpoint weights
307
+ class_model.shared=model.shared
308
+ class_model.encoder=model.encoder
309
+
310
+ # Delete the checkpoint model
311
+ model=class_model
312
+ del class_model
313
+
314
+ if full == True:
315
+ return model, tokenizer
316
+
317
+ # Print number of trainable parameters
318
+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())
319
+ params = sum([np.prod(p.size()) for p in model_parameters])
320
+ print("T5_Classfier\nTrainable Parameter: "+ str(params))
321
+
322
+ if custom_lora:
323
+ #the linear CustomLoRAConfig allows better quality predictions, but more memory is needed
324
+ # Add model modification lora
325
+ config = CustomLoRAConfig()
326
+
327
+ # Add LoRA layers
328
+ model = modify_with_lora(model, config)
329
+
330
+ # Freeze Embeddings and Encoder (except LoRA)
331
+ for (param_name, param) in model.shared.named_parameters():
332
+ param.requires_grad = False
333
+ for (param_name, param) in model.encoder.named_parameters():
334
+ param.requires_grad = False
335
+
336
+ for (param_name, param) in model.named_parameters():
337
+ if re.fullmatch(config.trainable_param_names, param_name):
338
+ param.requires_grad = True
339
+
340
+ else:
341
+ # lora modification
342
+ peft_config = LoraConfig(
343
+ r=4, lora_alpha=1, bias="all", target_modules=["q","k","v","o"]
344
+ )
345
+
346
+ model = inject_adapter_in_model(peft_config, model)
347
+
348
+ # Unfreeze the prediction head
349
+ for (param_name, param) in model.classifier.named_parameters():
350
+ param.requires_grad = True
351
+
352
+ # Print trainable Parameter
353
+ model_parameters = filter(lambda p: p.requires_grad, model.parameters())
354
+ params = sum([np.prod(p.size()) for p in model_parameters])
355
+ print("T5_LoRA_Classfier\nTrainable Parameter: "+ str(params) + "\n")
356
+
357
+ return model, tokenizer
358
+
359
+ class EsmForTokenClassificationCustom(EsmPreTrainedModel):
360
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
361
+ _keys_to_ignore_on_load_missing = [r"position_ids", r"cnn", r"ffn", r"transformer"]
362
+
363
+ def __init__(self, config):
364
+ super().__init__(config)
365
+ self.num_labels = config.num_labels
366
+ self.esm = EsmModel(config, add_pooling_layer=False)
367
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
368
+
369
+ if cnn_head:
370
+ self.cnn = nn.Conv1d(config.hidden_size, 512, kernel_size=3, padding=1)
371
+ self.classifier = nn.Linear(512, config.num_labels)
372
+ elif ffn_head:
373
+ # Multi-layer feed-forward network (FFN) as an alternative head
374
+ self.ffn = nn.Sequential(
375
+ nn.Linear(config.hidden_size, 512),
376
+ nn.ReLU(),
377
+ nn.Linear(512, 256),
378
+ nn.ReLU(),
379
+ nn.Linear(256, config.num_labels)
380
+ )
381
+ elif transformer_head:
382
+ # Transformer layer as an alternative head
383
+ encoder_layer = nn.TransformerEncoderLayer(d_model=config.hidden_size, nhead=8)
384
+ self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=1)
385
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
386
+ else:
387
+ # Default classification head
388
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
389
+
390
+ self.init_weights()
391
+
392
+ def forward(
393
+ self,
394
+ input_ids: Optional[torch.LongTensor] = None,
395
+ attention_mask: Optional[torch.Tensor] = None,
396
+ position_ids: Optional[torch.LongTensor] = None,
397
+ head_mask: Optional[torch.Tensor] = None,
398
+ inputs_embeds: Optional[torch.FloatTensor] = None,
399
+ labels: Optional[torch.LongTensor] = None,
400
+ output_attentions: Optional[bool] = None,
401
+ output_hidden_states: Optional[bool] = None,
402
+ return_dict: Optional[bool] = None,
403
+ ) -> Union[Tuple, TokenClassifierOutput]:
404
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
405
+ outputs = self.esm(
406
+ input_ids,
407
+ attention_mask=attention_mask,
408
+ position_ids=position_ids,
409
+ head_mask=head_mask,
410
+ inputs_embeds=inputs_embeds,
411
+ output_attentions=output_attentions,
412
+ output_hidden_states=output_hidden_states,
413
+ return_dict=return_dict,
414
+ )
415
+
416
+ sequence_output = outputs[0]
417
+ sequence_output = self.dropout(sequence_output)
418
+
419
+ if cnn_head:
420
+ sequence_output = sequence_output.transpose(1, 2)
421
+ sequence_output = self.cnn(sequence_output)
422
+ sequence_output = sequence_output.transpose(1, 2)
423
+ logits = self.classifier(sequence_output)
424
+ elif ffn_head:
425
+ logits = self.ffn(sequence_output)
426
+ elif transformer_head:
427
+ # Apply transformer encoder for the transformer head
428
+ sequence_output = self.transformer_encoder(sequence_output)
429
+ logits = self.classifier(sequence_output)
430
+ else:
431
+ logits = self.classifier(sequence_output)
432
+
433
+ loss = None
434
+ if labels is not None:
435
+ loss_fct = CrossEntropyLoss()
436
+ active_loss = attention_mask.view(-1) == 1
437
+ active_logits = logits.view(-1, self.num_labels)
438
+ active_labels = torch.where(
439
+ active_loss, labels.view(-1), torch.tensor(-100).type_as(labels)
440
+ )
441
+ valid_logits = active_logits[active_labels != -100]
442
+ valid_labels = active_labels[active_labels != -100]
443
+ valid_labels = valid_labels.type(torch.LongTensor).to('cuda:0')
444
+ loss = loss_fct(valid_logits, valid_labels)
445
+
446
+ if not return_dict:
447
+ output = (logits,) + outputs[2:]
448
+ return ((loss,) + output) if loss is not None else output
449
+
450
+ return TokenClassifierOutput(
451
+ loss=loss,
452
+ logits=logits,
453
+ hidden_states=outputs.hidden_states,
454
+ attentions=outputs.attentions,
455
+ )
456
+
457
+ def _init_weights(self, module):
458
+ if isinstance(module, nn.Linear) or isinstance(module, nn.Conv1d):
459
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
460
+ if module.bias is not None:
461
+ module.bias.data.zero_()
462
+
463
+ # based on transformers DataCollatorForTokenClassification
464
+ @dataclass
465
+ class DataCollatorForTokenClassificationESM(DataCollatorMixin):
466
+ """
467
+ Data collator that will dynamically pad the inputs received, as well as the labels.
468
+ Args:
469
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
470
+ The tokenizer used for encoding the data.
471
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
472
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
473
+ among:
474
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
475
+ sequence is provided).
476
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
477
+ acceptable input length for the model if that argument is not provided.
478
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
479
+ max_length (`int`, *optional*):
480
+ Maximum length of the returned list and optionally padding length (see above).
481
+ pad_to_multiple_of (`int`, *optional*):
482
+ If set will pad the sequence to a multiple of the provided value.
483
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
484
+ 7.5 (Volta).
485
+ label_pad_token_id (`int`, *optional*, defaults to -100):
486
+ The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
487
+ return_tensors (`str`):
488
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
489
+ """
490
+
491
+ tokenizer: PreTrainedTokenizerBase
492
+ padding: Union[bool, str, PaddingStrategy] = True
493
+ max_length: Optional[int] = None
494
+ pad_to_multiple_of: Optional[int] = None
495
+ label_pad_token_id: int = -100
496
+ return_tensors: str = "pt"
497
+
498
+ def torch_call(self, features):
499
+ import torch
500
+
501
+ label_name = "label" if "label" in features[0].keys() else "labels"
502
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
503
+
504
+ no_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
505
+
506
+ batch = self.tokenizer.pad(
507
+ no_labels_features,
508
+ padding=self.padding,
509
+ max_length=self.max_length,
510
+ pad_to_multiple_of=self.pad_to_multiple_of,
511
+ return_tensors="pt",
512
+ )
513
+
514
+ if labels is None:
515
+ return batch
516
+
517
+ sequence_length = batch["input_ids"].shape[1]
518
+ padding_side = self.tokenizer.padding_side
519
+
520
+ def to_list(tensor_or_iterable):
521
+ if isinstance(tensor_or_iterable, torch.Tensor):
522
+ return tensor_or_iterable.tolist()
523
+ return list(tensor_or_iterable)
524
+
525
+ if padding_side == "right":
526
+ batch[label_name] = [
527
+ # to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
528
+ # changed to pad the special tokens at the beginning and end of the sequence
529
+ [self.label_pad_token_id] + to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)-1) for label in labels
530
+ ]
531
+ else:
532
+ batch[label_name] = [
533
+ [self.label_pad_token_id] * (sequence_length - len(label)) + to_list(label) for label in labels
534
+ ]
535
+
536
+ batch[label_name] = torch.tensor(batch[label_name], dtype=torch.float)
537
+ return batch
538
+
539
+ def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
540
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
541
+ import torch
542
+
543
+ # Tensorize if necessary.
544
+ if isinstance(examples[0], (list, tuple, np.ndarray)):
545
+ examples = [torch.tensor(e, dtype=torch.long) for e in examples]
546
+
547
+ length_of_first = examples[0].size(0)
548
+
549
+ # Check if padding is necessary.
550
+
551
+ are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
552
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
553
+ return torch.stack(examples, dim=0)
554
+
555
+ # If yes, check if we have a `pad_token`.
556
+ if tokenizer._pad_token is None:
557
+ raise ValueError(
558
+ "You are attempting to pad samples but the tokenizer you are using"
559
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
560
+ )
561
+
562
+ # Creating the full tensor and filling it with our data.
563
+ max_length = max(x.size(0) for x in examples)
564
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
565
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
566
+ result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
567
+ for i, example in enumerate(examples):
568
+ if tokenizer.padding_side == "right":
569
+ result[i, : example.shape[0]] = example
570
+ else:
571
+ result[i, -example.shape[0] :] = example
572
+ return result
573
+
574
+ def tolist(x):
575
+ if isinstance(x, list):
576
+ return x
577
+ elif hasattr(x, "numpy"): # Checks for TF tensors without needing the import
578
+ x = x.numpy()
579
+ return x.tolist()
580
+
581
+ #load ESM2 models
582
+ def load_esm_model_classification(checkpoint, num_labels, half_precision, full=False, deepspeed=True):
583
+
584
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
585
+
586
+
587
+ if half_precision and deepspeed:
588
+ model = EsmForTokenClassificationCustom.from_pretrained(checkpoint,
589
+ num_labels = num_labels,
590
+ ignore_mismatched_sizes=True,
591
+ torch_dtype = torch.float16)
592
+ else:
593
+ model = EsmForTokenClassificationCustom.from_pretrained(checkpoint,
594
+ num_labels = num_labels,
595
+ ignore_mismatched_sizes=True)
596
+
597
+ if full == True:
598
+ return model, tokenizer
599
+
600
+ peft_config = LoraConfig(
601
+ r=4, lora_alpha=1, bias="all", target_modules=["query","key","value","dense"]
602
+ )
603
+
604
+ model = inject_adapter_in_model(peft_config, model)
605
+
606
+ #model.gradient_checkpointing_enable()
607
+
608
+ # Unfreeze the prediction head
609
+ for (param_name, param) in model.classifier.named_parameters():
610
+ param.requires_grad = True
611
+
612
+ return model, tokenizer
613
+
614
+ def load_model():
615
+ checkpoint='ThorbenF/prot_t5_xl_uniref50'
616
+ #best_model_path='ThorbenF/prot_t5_xl_uniref50/cpt.pth'
617
+ full=False
618
+ deepspeed=False
619
+ mixed=False
620
+ num_labels=2
621
+
622
+ print(checkpoint, num_labels, mixed, full, deepspeed)
623
+
624
+ # Determine model type and load accordingly
625
+ if "esm" in checkpoint:
626
+ model, tokenizer = load_esm_model_classification(checkpoint, num_labels, mixed, full, deepspeed)
627
+ else:
628
+ model, tokenizer = load_T5_model_classification(checkpoint, num_labels, mixed, full, deepspeed)
629
+
630
+
631
+ checkpoint_dir = model.config.name_or_path # This will point to the local directory
632
+
633
+ print(checkpoint_dir)
634
+ # Construct the path to the custom checkpoint file
635
+ best_model_path = os.path.join(checkpoint_dir, 'cpt.pth')
636
+
637
+ # Load the best model state
638
+ state_dict = torch.load(best_model_path, weights_only=True)
639
+ model.load_state_dict(state_dict)
640
+
641
+ return model, tokenizer
.ipynb_checkpoints/requirements-checkpoint.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ torch>=1.13.0
2
+ transformers>=4.30.0
3
+ datasets>=2.9.0
4
+ peft>=0.0.7
5
+ scipy>=1.7.0
6
+ pandas>=1.1.0
7
+ numpy>=1.19.0
8
+ scikit-learn>=0.24.0
9
+ sentencepiece
model_loader.ipynb ADDED
@@ -0,0 +1,871 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 38,
6
+ "id": "14ff5741-629c-445a-a8a9-b3d9db1f3ddb",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import torch\n",
11
+ "import torch.nn as nn\n",
12
+ "import torch.nn.functional as F\n",
13
+ "from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n",
14
+ "from torch.utils.data import DataLoader\n",
15
+ "\n",
16
+ "import re\n",
17
+ "import numpy as np\n",
18
+ "import os\n",
19
+ "import pandas as pd\n",
20
+ "import copy\n",
21
+ "\n",
22
+ "import transformers, datasets\n",
23
+ "from transformers.modeling_outputs import TokenClassifierOutput\n",
24
+ "from transformers.models.t5.modeling_t5 import T5Config, T5PreTrainedModel, T5Stack\n",
25
+ "from transformers.utils.model_parallel_utils import assert_device_map, get_device_map\n",
26
+ "from transformers import T5EncoderModel, T5Tokenizer\n",
27
+ "from transformers.models.esm.modeling_esm import EsmPreTrainedModel, EsmModel\n",
28
+ "from transformers import AutoTokenizer\n",
29
+ "from transformers import TrainingArguments, Trainer, set_seed\n",
30
+ "from transformers import DataCollatorForTokenClassification\n",
31
+ "\n",
32
+ "from dataclasses import dataclass\n",
33
+ "from typing import Dict, List, Optional, Tuple, Union\n",
34
+ "\n",
35
+ "# for custom DataCollator\n",
36
+ "from transformers.data.data_collator import DataCollatorMixin\n",
37
+ "from transformers.tokenization_utils_base import PreTrainedTokenizerBase\n",
38
+ "from transformers.utils import PaddingStrategy\n",
39
+ "\n",
40
+ "from datasets import Dataset\n",
41
+ "\n",
42
+ "from scipy.special import expit\n",
43
+ "\n",
44
+ "import peft\n",
45
+ "from peft import get_peft_config, PeftModel, PeftConfig, inject_adapter_in_model, LoraConfig"
46
+ ]
47
+ },
48
+ {
49
+ "cell_type": "code",
50
+ "execution_count": 6,
51
+ "id": "5ec16a71-ed5d-46a6-98b2-55bc5d0fbe07",
52
+ "metadata": {},
53
+ "outputs": [],
54
+ "source": [
55
+ "cnn_head=True #False set True for Rostlab/prot_t5_xl_half_uniref50-enc\n",
56
+ "ffn_head=False #False\n",
57
+ "transformer_head=False\n",
58
+ "custom_lora=True #False #only true for Rostlab/prot_t5_xl_half_uniref50-enc"
59
+ ]
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": 8,
64
+ "id": "cc7151ca-0daf-4e75-a865-ab52f9b28f2e",
65
+ "metadata": {},
66
+ "outputs": [],
67
+ "source": [
68
+ "class ClassConfig:\n",
69
+ " def __init__(self, dropout=0.2, num_labels=3):\n",
70
+ " self.dropout_rate = dropout\n",
71
+ " self.num_labels = num_labels\n",
72
+ "\n",
73
+ "class T5EncoderForTokenClassification(T5PreTrainedModel):\n",
74
+ "\n",
75
+ " def __init__(self, config: T5Config, class_config: ClassConfig):\n",
76
+ " super().__init__(config)\n",
77
+ " self.num_labels = class_config.num_labels\n",
78
+ " self.config = config\n",
79
+ "\n",
80
+ " self.shared = nn.Embedding(config.vocab_size, config.d_model)\n",
81
+ "\n",
82
+ " encoder_config = copy.deepcopy(config)\n",
83
+ " encoder_config.use_cache = False\n",
84
+ " encoder_config.is_encoder_decoder = False\n",
85
+ " self.encoder = T5Stack(encoder_config, self.shared)\n",
86
+ "\n",
87
+ " self.dropout = nn.Dropout(class_config.dropout_rate)\n",
88
+ "\n",
89
+ " # Initialize different heads based on class_config\n",
90
+ " if cnn_head:\n",
91
+ " self.cnn = nn.Conv1d(config.hidden_size, 512, kernel_size=3, padding=1)\n",
92
+ " self.classifier = nn.Linear(512, class_config.num_labels)\n",
93
+ " elif ffn_head:\n",
94
+ " # Multi-layer feed-forward network (FFN) head\n",
95
+ " self.ffn = nn.Sequential(\n",
96
+ " nn.Linear(config.hidden_size, 512),\n",
97
+ " nn.ReLU(),\n",
98
+ " nn.Linear(512, 256),\n",
99
+ " nn.ReLU(),\n",
100
+ " nn.Linear(256, class_config.num_labels)\n",
101
+ " )\n",
102
+ " elif transformer_head:\n",
103
+ " # Transformer layer head\n",
104
+ " encoder_layer = nn.TransformerEncoderLayer(d_model=config.hidden_size, nhead=8)\n",
105
+ " self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=1)\n",
106
+ " self.classifier = nn.Linear(config.hidden_size, class_config.num_labels)\n",
107
+ " else:\n",
108
+ " # Default classification head\n",
109
+ " self.classifier = nn.Linear(config.hidden_size, class_config.num_labels)\n",
110
+ " \n",
111
+ " self.post_init()\n",
112
+ "\n",
113
+ " # Model parallel\n",
114
+ " self.model_parallel = False\n",
115
+ " self.device_map = None\n",
116
+ "\n",
117
+ " def parallelize(self, device_map=None):\n",
118
+ " self.device_map = (\n",
119
+ " get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))\n",
120
+ " if device_map is None\n",
121
+ " else device_map\n",
122
+ " )\n",
123
+ " assert_device_map(self.device_map, len(self.encoder.block))\n",
124
+ " self.encoder.parallelize(self.device_map)\n",
125
+ " self.classifier = self.classifier.to(self.encoder.first_device)\n",
126
+ " self.model_parallel = True\n",
127
+ "\n",
128
+ " def deparallelize(self):\n",
129
+ " self.encoder.deparallelize()\n",
130
+ " self.encoder = self.encoder.to(\"cpu\")\n",
131
+ " self.model_parallel = False\n",
132
+ " self.device_map = None\n",
133
+ " torch.cuda.empty_cache()\n",
134
+ "\n",
135
+ " def get_input_embeddings(self):\n",
136
+ " return self.shared\n",
137
+ "\n",
138
+ " def set_input_embeddings(self, new_embeddings):\n",
139
+ " self.shared = new_embeddings\n",
140
+ " self.encoder.set_input_embeddings(new_embeddings)\n",
141
+ "\n",
142
+ " def get_encoder(self):\n",
143
+ " return self.encoder\n",
144
+ "\n",
145
+ " def _prune_heads(self, heads_to_prune):\n",
146
+ " for layer, heads in heads_to_prune.items():\n",
147
+ " self.encoder.layer[layer].attention.prune_heads(heads)\n",
148
+ "\n",
149
+ " def forward(\n",
150
+ " self,\n",
151
+ " input_ids=None,\n",
152
+ " attention_mask=None,\n",
153
+ " head_mask=None,\n",
154
+ " inputs_embeds=None,\n",
155
+ " labels=None,\n",
156
+ " output_attentions=None,\n",
157
+ " output_hidden_states=None,\n",
158
+ " return_dict=None,\n",
159
+ " ):\n",
160
+ " return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n",
161
+ "\n",
162
+ " outputs = self.encoder(\n",
163
+ " input_ids=input_ids,\n",
164
+ " attention_mask=attention_mask,\n",
165
+ " inputs_embeds=inputs_embeds,\n",
166
+ " head_mask=head_mask,\n",
167
+ " output_attentions=output_attentions,\n",
168
+ " output_hidden_states=output_hidden_states,\n",
169
+ " return_dict=return_dict,\n",
170
+ " )\n",
171
+ "\n",
172
+ " sequence_output = outputs[0]\n",
173
+ " sequence_output = self.dropout(sequence_output)\n",
174
+ "\n",
175
+ " # Forward pass through the selected head\n",
176
+ " if cnn_head:\n",
177
+ " # CNN head\n",
178
+ " sequence_output = sequence_output.permute(0, 2, 1) # Prepare shape for CNN\n",
179
+ " cnn_output = self.cnn(sequence_output)\n",
180
+ " cnn_output = F.relu(cnn_output)\n",
181
+ " cnn_output = cnn_output.permute(0, 2, 1) # Shape back for classifier\n",
182
+ " logits = self.classifier(cnn_output)\n",
183
+ " elif ffn_head:\n",
184
+ " # FFN head\n",
185
+ " logits = self.ffn(sequence_output)\n",
186
+ " elif transformer_head:\n",
187
+ " # Transformer head\n",
188
+ " transformer_output = self.transformer_encoder(sequence_output)\n",
189
+ " logits = self.classifier(transformer_output)\n",
190
+ " else:\n",
191
+ " # Default classification head\n",
192
+ " logits = self.classifier(sequence_output)\n",
193
+ "\n",
194
+ " loss = None\n",
195
+ " if labels is not None:\n",
196
+ " loss_fct = CrossEntropyLoss()\n",
197
+ " active_loss = attention_mask.view(-1) == 1\n",
198
+ " active_logits = logits.view(-1, self.num_labels)\n",
199
+ " active_labels = torch.where(\n",
200
+ " active_loss, labels.view(-1), torch.tensor(-100).type_as(labels)\n",
201
+ " )\n",
202
+ " valid_logits = active_logits[active_labels != -100]\n",
203
+ " valid_labels = active_labels[active_labels != -100]\n",
204
+ " valid_labels = valid_labels.to(valid_logits.device)\n",
205
+ " valid_labels = valid_labels.long()\n",
206
+ " loss = loss_fct(valid_logits, valid_labels)\n",
207
+ "\n",
208
+ " if not return_dict:\n",
209
+ " output = (logits,) + outputs[2:]\n",
210
+ " return ((loss,) + output) if loss is not None else output\n",
211
+ "\n",
212
+ " return TokenClassifierOutput(\n",
213
+ " loss=loss,\n",
214
+ " logits=logits,\n",
215
+ " hidden_states=outputs.hidden_states,\n",
216
+ " attentions=outputs.attentions,\n",
217
+ " )"
218
+ ]
219
+ },
220
+ {
221
+ "cell_type": "code",
222
+ "execution_count": 10,
223
+ "id": "e5e751ba-f4d3-4a28-bea0-82633f1dabb4",
224
+ "metadata": {},
225
+ "outputs": [],
226
+ "source": [
227
+ "# Modifies an existing transformer and introduce the LoRA layers\n",
228
+ "\n",
229
+ "class CustomLoRAConfig:\n",
230
+ " def __init__(self):\n",
231
+ " self.lora_rank = 4\n",
232
+ " self.lora_init_scale = 0.01\n",
233
+ " self.lora_modules = \".*SelfAttention|.*EncDecAttention\"\n",
234
+ " self.lora_layers = \"q|k|v|o\"\n",
235
+ " self.trainable_param_names = \".*layer_norm.*|.*lora_[ab].*\"\n",
236
+ " self.lora_scaling_rank = 1\n",
237
+ " # lora_modules and lora_layers are speicified with regular expressions\n",
238
+ " # see https://www.w3schools.com/python/python_regex.asp for reference\n",
239
+ " \n",
240
+ "class LoRALinear(nn.Module):\n",
241
+ " def __init__(self, linear_layer, rank, scaling_rank, init_scale):\n",
242
+ " super().__init__()\n",
243
+ " self.in_features = linear_layer.in_features\n",
244
+ " self.out_features = linear_layer.out_features\n",
245
+ " self.rank = rank\n",
246
+ " self.scaling_rank = scaling_rank\n",
247
+ " self.weight = linear_layer.weight\n",
248
+ " self.bias = linear_layer.bias\n",
249
+ " if self.rank > 0:\n",
250
+ " self.lora_a = nn.Parameter(torch.randn(rank, linear_layer.in_features) * init_scale)\n",
251
+ " if init_scale < 0:\n",
252
+ " self.lora_b = nn.Parameter(torch.randn(linear_layer.out_features, rank) * init_scale)\n",
253
+ " else:\n",
254
+ " self.lora_b = nn.Parameter(torch.zeros(linear_layer.out_features, rank))\n",
255
+ " if self.scaling_rank:\n",
256
+ " self.multi_lora_a = nn.Parameter(\n",
257
+ " torch.ones(self.scaling_rank, linear_layer.in_features)\n",
258
+ " + torch.randn(self.scaling_rank, linear_layer.in_features) * init_scale\n",
259
+ " )\n",
260
+ " if init_scale < 0:\n",
261
+ " self.multi_lora_b = nn.Parameter(\n",
262
+ " torch.ones(linear_layer.out_features, self.scaling_rank)\n",
263
+ " + torch.randn(linear_layer.out_features, self.scaling_rank) * init_scale\n",
264
+ " )\n",
265
+ " else:\n",
266
+ " self.multi_lora_b = nn.Parameter(torch.ones(linear_layer.out_features, self.scaling_rank))\n",
267
+ "\n",
268
+ " def forward(self, input):\n",
269
+ " if self.scaling_rank == 1 and self.rank == 0:\n",
270
+ " # parsimonious implementation for ia3 and lora scaling\n",
271
+ " if self.multi_lora_a.requires_grad:\n",
272
+ " hidden = F.linear((input * self.multi_lora_a.flatten()), self.weight, self.bias)\n",
273
+ " else:\n",
274
+ " hidden = F.linear(input, self.weight, self.bias)\n",
275
+ " if self.multi_lora_b.requires_grad:\n",
276
+ " hidden = hidden * self.multi_lora_b.flatten()\n",
277
+ " return hidden\n",
278
+ " else:\n",
279
+ " # general implementation for lora (adding and scaling)\n",
280
+ " weight = self.weight\n",
281
+ " if self.scaling_rank:\n",
282
+ " weight = weight * torch.matmul(self.multi_lora_b, self.multi_lora_a) / self.scaling_rank\n",
283
+ " if self.rank:\n",
284
+ " weight = weight + torch.matmul(self.lora_b, self.lora_a) / self.rank\n",
285
+ " return F.linear(input, weight, self.bias)\n",
286
+ "\n",
287
+ " def extra_repr(self):\n",
288
+ " return \"in_features={}, out_features={}, bias={}, rank={}, scaling_rank={}\".format(\n",
289
+ " self.in_features, self.out_features, self.bias is not None, self.rank, self.scaling_rank\n",
290
+ " )\n",
291
+ "\n",
292
+ "\n",
293
+ "def modify_with_lora(transformer, config):\n",
294
+ " for m_name, module in dict(transformer.named_modules()).items():\n",
295
+ " if re.fullmatch(config.lora_modules, m_name):\n",
296
+ " for c_name, layer in dict(module.named_children()).items():\n",
297
+ " if re.fullmatch(config.lora_layers, c_name):\n",
298
+ " assert isinstance(\n",
299
+ " layer, nn.Linear\n",
300
+ " ), f\"LoRA can only be applied to torch.nn.Linear, but {layer} is {type(layer)}.\"\n",
301
+ " setattr(\n",
302
+ " module,\n",
303
+ " c_name,\n",
304
+ " LoRALinear(layer, config.lora_rank, config.lora_scaling_rank, config.lora_init_scale),\n",
305
+ " )\n",
306
+ " return transformer\n",
307
+ "\n"
308
+ ]
309
+ },
310
+ {
311
+ "cell_type": "code",
312
+ "execution_count": 12,
313
+ "id": "43a56311-3279-466a-bc95-590381f1b13c",
314
+ "metadata": {},
315
+ "outputs": [],
316
+ "source": [
317
+ "def load_T5_model_classification(checkpoint, num_labels, half_precision, full = False, deepspeed=True):\n",
318
+ " # Load model and tokenizer\n",
319
+ "\n",
320
+ " if \"ankh\" in checkpoint :\n",
321
+ " model = T5EncoderModel.from_pretrained(checkpoint)\n",
322
+ " tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n",
323
+ "\n",
324
+ " elif \"prot_t5\" in checkpoint:\n",
325
+ " # possible to load the half precision model (thanks to @pawel-rezo for pointing that out)\n",
326
+ " if half_precision and deepspeed:\n",
327
+ " #tokenizer = T5Tokenizer.from_pretrained('Rostlab/prot_t5_xl_half_uniref50-enc', do_lower_case=False)\n",
328
+ " #model = T5EncoderModel.from_pretrained(\"Rostlab/prot_t5_xl_half_uniref50-enc\", torch_dtype=torch.float16)#.to(torch.device('cuda')\n",
329
+ " tokenizer = T5Tokenizer.from_pretrained(checkpoint, do_lower_case=False)\n",
330
+ " model = T5EncoderModel.from_pretrained(checkpoint, torch_dtype=torch.float16).to(torch.device('cuda'))\n",
331
+ " else:\n",
332
+ " model = T5EncoderModel.from_pretrained(checkpoint)\n",
333
+ " tokenizer = T5Tokenizer.from_pretrained(checkpoint)\n",
334
+ " \n",
335
+ " elif \"ProstT5\" in checkpoint:\n",
336
+ " if half_precision and deepspeed: \n",
337
+ " tokenizer = T5Tokenizer.from_pretrained(checkpoint, do_lower_case=False)\n",
338
+ " model = T5EncoderModel.from_pretrained(checkpoint, torch_dtype=torch.float16).to(torch.device('cuda'))\n",
339
+ " else:\n",
340
+ " model = T5EncoderModel.from_pretrained(checkpoint)\n",
341
+ " tokenizer = T5Tokenizer.from_pretrained(checkpoint) \n",
342
+ " \n",
343
+ " # Create new Classifier model with PT5 dimensions\n",
344
+ " class_config=ClassConfig(num_labels=num_labels)\n",
345
+ " class_model=T5EncoderForTokenClassification(model.config,class_config)\n",
346
+ " \n",
347
+ " # Set encoder and embedding weights to checkpoint weights\n",
348
+ " class_model.shared=model.shared\n",
349
+ " class_model.encoder=model.encoder \n",
350
+ " \n",
351
+ " # Delete the checkpoint model\n",
352
+ " model=class_model\n",
353
+ " del class_model\n",
354
+ " \n",
355
+ " if full == True:\n",
356
+ " return model, tokenizer \n",
357
+ " \n",
358
+ " # Print number of trainable parameters\n",
359
+ " model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n",
360
+ " params = sum([np.prod(p.size()) for p in model_parameters])\n",
361
+ " print(\"T5_Classfier\\nTrainable Parameter: \"+ str(params)) \n",
362
+ "\n",
363
+ " if custom_lora:\n",
364
+ " #the linear CustomLoRAConfig allows better quality predictions, but more memory is needed\n",
365
+ " # Add model modification lora\n",
366
+ " config = CustomLoRAConfig()\n",
367
+ " \n",
368
+ " # Add LoRA layers\n",
369
+ " model = modify_with_lora(model, config)\n",
370
+ " \n",
371
+ " # Freeze Embeddings and Encoder (except LoRA)\n",
372
+ " for (param_name, param) in model.shared.named_parameters():\n",
373
+ " param.requires_grad = False\n",
374
+ " for (param_name, param) in model.encoder.named_parameters():\n",
375
+ " param.requires_grad = False \n",
376
+ " \n",
377
+ " for (param_name, param) in model.named_parameters():\n",
378
+ " if re.fullmatch(config.trainable_param_names, param_name):\n",
379
+ " param.requires_grad = True\n",
380
+ "\n",
381
+ " else:\n",
382
+ " # lora modification\n",
383
+ " peft_config = LoraConfig(\n",
384
+ " r=4, lora_alpha=1, bias=\"all\", target_modules=[\"q\",\"k\",\"v\",\"o\"]\n",
385
+ " )\n",
386
+ " \n",
387
+ " model = inject_adapter_in_model(peft_config, model)\n",
388
+ " \n",
389
+ " # Unfreeze the prediction head\n",
390
+ " for (param_name, param) in model.classifier.named_parameters():\n",
391
+ " param.requires_grad = True \n",
392
+ "\n",
393
+ " # Print trainable Parameter \n",
394
+ " model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n",
395
+ " params = sum([np.prod(p.size()) for p in model_parameters])\n",
396
+ " print(\"T5_LoRA_Classfier\\nTrainable Parameter: \"+ str(params) + \"\\n\")\n",
397
+ " \n",
398
+ " return model, tokenizer"
399
+ ]
400
+ },
401
+ {
402
+ "cell_type": "code",
403
+ "execution_count": 14,
404
+ "id": "7ba720bc-a003-4984-a965-cb2f42344e85",
405
+ "metadata": {},
406
+ "outputs": [],
407
+ "source": [
408
+ "class EsmForTokenClassificationCustom(EsmPreTrainedModel):\n",
409
+ " _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n",
410
+ " _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"cnn\", r\"ffn\", r\"transformer\"]\n",
411
+ "\n",
412
+ " def __init__(self, config):\n",
413
+ " super().__init__(config)\n",
414
+ " self.num_labels = config.num_labels\n",
415
+ " self.esm = EsmModel(config, add_pooling_layer=False)\n",
416
+ " self.dropout = nn.Dropout(config.hidden_dropout_prob)\n",
417
+ "\n",
418
+ " if cnn_head:\n",
419
+ " self.cnn = nn.Conv1d(config.hidden_size, 512, kernel_size=3, padding=1)\n",
420
+ " self.classifier = nn.Linear(512, config.num_labels)\n",
421
+ " elif ffn_head:\n",
422
+ " # Multi-layer feed-forward network (FFN) as an alternative head\n",
423
+ " self.ffn = nn.Sequential(\n",
424
+ " nn.Linear(config.hidden_size, 512),\n",
425
+ " nn.ReLU(),\n",
426
+ " nn.Linear(512, 256),\n",
427
+ " nn.ReLU(),\n",
428
+ " nn.Linear(256, config.num_labels)\n",
429
+ " )\n",
430
+ " elif transformer_head:\n",
431
+ " # Transformer layer as an alternative head\n",
432
+ " encoder_layer = nn.TransformerEncoderLayer(d_model=config.hidden_size, nhead=8)\n",
433
+ " self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=1)\n",
434
+ " self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n",
435
+ " else:\n",
436
+ " # Default classification head\n",
437
+ " self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n",
438
+ "\n",
439
+ " self.init_weights()\n",
440
+ "\n",
441
+ " def forward(\n",
442
+ " self,\n",
443
+ " input_ids: Optional[torch.LongTensor] = None,\n",
444
+ " attention_mask: Optional[torch.Tensor] = None,\n",
445
+ " position_ids: Optional[torch.LongTensor] = None,\n",
446
+ " head_mask: Optional[torch.Tensor] = None,\n",
447
+ " inputs_embeds: Optional[torch.FloatTensor] = None,\n",
448
+ " labels: Optional[torch.LongTensor] = None,\n",
449
+ " output_attentions: Optional[bool] = None,\n",
450
+ " output_hidden_states: Optional[bool] = None,\n",
451
+ " return_dict: Optional[bool] = None,\n",
452
+ " ) -> Union[Tuple, TokenClassifierOutput]:\n",
453
+ " return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n",
454
+ " outputs = self.esm(\n",
455
+ " input_ids,\n",
456
+ " attention_mask=attention_mask,\n",
457
+ " position_ids=position_ids,\n",
458
+ " head_mask=head_mask,\n",
459
+ " inputs_embeds=inputs_embeds,\n",
460
+ " output_attentions=output_attentions,\n",
461
+ " output_hidden_states=output_hidden_states,\n",
462
+ " return_dict=return_dict,\n",
463
+ " )\n",
464
+ " \n",
465
+ " sequence_output = outputs[0]\n",
466
+ " sequence_output = self.dropout(sequence_output)\n",
467
+ "\n",
468
+ " if cnn_head:\n",
469
+ " sequence_output = sequence_output.transpose(1, 2)\n",
470
+ " sequence_output = self.cnn(sequence_output)\n",
471
+ " sequence_output = sequence_output.transpose(1, 2)\n",
472
+ " logits = self.classifier(sequence_output)\n",
473
+ " elif ffn_head:\n",
474
+ " logits = self.ffn(sequence_output)\n",
475
+ " elif transformer_head:\n",
476
+ " # Apply transformer encoder for the transformer head\n",
477
+ " sequence_output = self.transformer_encoder(sequence_output)\n",
478
+ " logits = self.classifier(sequence_output)\n",
479
+ " else:\n",
480
+ " logits = self.classifier(sequence_output)\n",
481
+ "\n",
482
+ " loss = None\n",
483
+ " if labels is not None:\n",
484
+ " loss_fct = CrossEntropyLoss()\n",
485
+ " active_loss = attention_mask.view(-1) == 1\n",
486
+ " active_logits = logits.view(-1, self.num_labels)\n",
487
+ " active_labels = torch.where(\n",
488
+ " active_loss, labels.view(-1), torch.tensor(-100).type_as(labels)\n",
489
+ " )\n",
490
+ " valid_logits = active_logits[active_labels != -100]\n",
491
+ " valid_labels = active_labels[active_labels != -100]\n",
492
+ " valid_labels = valid_labels.type(torch.LongTensor).to('cuda:0')\n",
493
+ " loss = loss_fct(valid_logits, valid_labels)\n",
494
+ "\n",
495
+ " if not return_dict:\n",
496
+ " output = (logits,) + outputs[2:]\n",
497
+ " return ((loss,) + output) if loss is not None else output\n",
498
+ "\n",
499
+ " return TokenClassifierOutput(\n",
500
+ " loss=loss,\n",
501
+ " logits=logits,\n",
502
+ " hidden_states=outputs.hidden_states,\n",
503
+ " attentions=outputs.attentions,\n",
504
+ " )\n",
505
+ "\n",
506
+ " def _init_weights(self, module):\n",
507
+ " if isinstance(module, nn.Linear) or isinstance(module, nn.Conv1d):\n",
508
+ " module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n",
509
+ " if module.bias is not None:\n",
510
+ " module.bias.data.zero_()\n",
511
+ "\n",
512
+ "# based on transformers DataCollatorForTokenClassification\n",
513
+ "@dataclass\n",
514
+ "class DataCollatorForTokenClassificationESM(DataCollatorMixin):\n",
515
+ " \"\"\"\n",
516
+ " Data collator that will dynamically pad the inputs received, as well as the labels.\n",
517
+ " Args:\n",
518
+ " tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):\n",
519
+ " The tokenizer used for encoding the data.\n",
520
+ " padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):\n",
521
+ " Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n",
522
+ " among:\n",
523
+ " - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single\n",
524
+ " sequence is provided).\n",
525
+ " - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n",
526
+ " acceptable input length for the model if that argument is not provided.\n",
527
+ " - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).\n",
528
+ " max_length (`int`, *optional*):\n",
529
+ " Maximum length of the returned list and optionally padding length (see above).\n",
530
+ " pad_to_multiple_of (`int`, *optional*):\n",
531
+ " If set will pad the sequence to a multiple of the provided value.\n",
532
+ " This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n",
533
+ " 7.5 (Volta).\n",
534
+ " label_pad_token_id (`int`, *optional*, defaults to -100):\n",
535
+ " The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).\n",
536
+ " return_tensors (`str`):\n",
537
+ " The type of Tensor to return. Allowable values are \"np\", \"pt\" and \"tf\".\n",
538
+ " \"\"\"\n",
539
+ "\n",
540
+ " tokenizer: PreTrainedTokenizerBase\n",
541
+ " padding: Union[bool, str, PaddingStrategy] = True\n",
542
+ " max_length: Optional[int] = None\n",
543
+ " pad_to_multiple_of: Optional[int] = None\n",
544
+ " label_pad_token_id: int = -100\n",
545
+ " return_tensors: str = \"pt\"\n",
546
+ "\n",
547
+ " def torch_call(self, features):\n",
548
+ " import torch\n",
549
+ "\n",
550
+ " label_name = \"label\" if \"label\" in features[0].keys() else \"labels\"\n",
551
+ " labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None\n",
552
+ "\n",
553
+ " no_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]\n",
554
+ "\n",
555
+ " batch = self.tokenizer.pad(\n",
556
+ " no_labels_features,\n",
557
+ " padding=self.padding,\n",
558
+ " max_length=self.max_length,\n",
559
+ " pad_to_multiple_of=self.pad_to_multiple_of,\n",
560
+ " return_tensors=\"pt\",\n",
561
+ " )\n",
562
+ "\n",
563
+ " if labels is None:\n",
564
+ " return batch\n",
565
+ "\n",
566
+ " sequence_length = batch[\"input_ids\"].shape[1]\n",
567
+ " padding_side = self.tokenizer.padding_side\n",
568
+ "\n",
569
+ " def to_list(tensor_or_iterable):\n",
570
+ " if isinstance(tensor_or_iterable, torch.Tensor):\n",
571
+ " return tensor_or_iterable.tolist()\n",
572
+ " return list(tensor_or_iterable)\n",
573
+ "\n",
574
+ " if padding_side == \"right\":\n",
575
+ " batch[label_name] = [\n",
576
+ " # to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels\n",
577
+ " # changed to pad the special tokens at the beginning and end of the sequence\n",
578
+ " [self.label_pad_token_id] + to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)-1) for label in labels\n",
579
+ " ]\n",
580
+ " else:\n",
581
+ " batch[label_name] = [\n",
582
+ " [self.label_pad_token_id] * (sequence_length - len(label)) + to_list(label) for label in labels\n",
583
+ " ]\n",
584
+ "\n",
585
+ " batch[label_name] = torch.tensor(batch[label_name], dtype=torch.float)\n",
586
+ " return batch\n",
587
+ "\n",
588
+ "def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):\n",
589
+ " \"\"\"Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary.\"\"\"\n",
590
+ " import torch\n",
591
+ "\n",
592
+ " # Tensorize if necessary.\n",
593
+ " if isinstance(examples[0], (list, tuple, np.ndarray)):\n",
594
+ " examples = [torch.tensor(e, dtype=torch.long) for e in examples]\n",
595
+ "\n",
596
+ " length_of_first = examples[0].size(0)\n",
597
+ "\n",
598
+ " # Check if padding is necessary.\n",
599
+ "\n",
600
+ " are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)\n",
601
+ " if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):\n",
602
+ " return torch.stack(examples, dim=0)\n",
603
+ "\n",
604
+ " # If yes, check if we have a `pad_token`.\n",
605
+ " if tokenizer._pad_token is None:\n",
606
+ " raise ValueError(\n",
607
+ " \"You are attempting to pad samples but the tokenizer you are using\"\n",
608
+ " f\" ({tokenizer.__class__.__name__}) does not have a pad token.\"\n",
609
+ " )\n",
610
+ "\n",
611
+ " # Creating the full tensor and filling it with our data.\n",
612
+ " max_length = max(x.size(0) for x in examples)\n",
613
+ " if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n",
614
+ " max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of\n",
615
+ " result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)\n",
616
+ " for i, example in enumerate(examples):\n",
617
+ " if tokenizer.padding_side == \"right\":\n",
618
+ " result[i, : example.shape[0]] = example\n",
619
+ " else:\n",
620
+ " result[i, -example.shape[0] :] = example\n",
621
+ " return result\n",
622
+ "\n",
623
+ "def tolist(x):\n",
624
+ " if isinstance(x, list):\n",
625
+ " return x\n",
626
+ " elif hasattr(x, \"numpy\"): # Checks for TF tensors without needing the import\n",
627
+ " x = x.numpy()\n",
628
+ " return x.tolist()"
629
+ ]
630
+ },
631
+ {
632
+ "cell_type": "code",
633
+ "execution_count": 16,
634
+ "id": "ea511812-1244-4e51-b63c-b4da7822f0b7",
635
+ "metadata": {},
636
+ "outputs": [],
637
+ "source": [
638
+ "#load ESM2 models\n",
639
+ "def load_esm_model_classification(checkpoint, num_labels, half_precision, full=False, deepspeed=True):\n",
640
+ " \n",
641
+ " tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n",
642
+ "\n",
643
+ " \n",
644
+ " if half_precision and deepspeed:\n",
645
+ " model = EsmForTokenClassificationCustom.from_pretrained(checkpoint, \n",
646
+ " num_labels = num_labels, \n",
647
+ " ignore_mismatched_sizes=True,\n",
648
+ " torch_dtype = torch.float16)\n",
649
+ " else:\n",
650
+ " model = EsmForTokenClassificationCustom.from_pretrained(checkpoint, \n",
651
+ " num_labels = num_labels,\n",
652
+ " ignore_mismatched_sizes=True)\n",
653
+ " \n",
654
+ " if full == True:\n",
655
+ " return model, tokenizer \n",
656
+ " \n",
657
+ " peft_config = LoraConfig(\n",
658
+ " r=4, lora_alpha=1, bias=\"all\", target_modules=[\"query\",\"key\",\"value\",\"dense\"]\n",
659
+ " )\n",
660
+ " \n",
661
+ " model = inject_adapter_in_model(peft_config, model)\n",
662
+ "\n",
663
+ " #model.gradient_checkpointing_enable()\n",
664
+ " \n",
665
+ " # Unfreeze the prediction head\n",
666
+ " for (param_name, param) in model.classifier.named_parameters():\n",
667
+ " param.requires_grad = True \n",
668
+ " \n",
669
+ " return model, tokenizer"
670
+ ]
671
+ },
672
+ {
673
+ "cell_type": "code",
674
+ "execution_count": 22,
675
+ "id": "8941bbbb-57c5-4f3d-89d9-12b2d306e7a1",
676
+ "metadata": {},
677
+ "outputs": [],
678
+ "source": [
679
+ "checkpoint='../Pretrained/Rostlab/prot_t5_xl_uniref50'\n",
680
+ "best_model_path='../refined_models/ChallengeFinetuning/Rostlab/prot_t5_xl_uniref50/manual_checkpoint/cpt.pth'\n",
681
+ "full=False\n",
682
+ "deepspeed=False\n",
683
+ "mixed=False \n",
684
+ "num_labels=2"
685
+ ]
686
+ },
687
+ {
688
+ "cell_type": "code",
689
+ "execution_count": null,
690
+ "id": "4f007331-34d4-4c1d-9311-e91db23d9ed5",
691
+ "metadata": {},
692
+ "outputs": [],
693
+ "source": [
694
+ "/home/frohlkin/Projects/PLM/Publication/hf_webpage/pretrained"
695
+ ]
696
+ },
697
+ {
698
+ "cell_type": "code",
699
+ "execution_count": 24,
700
+ "id": "18d4ad06-b195-4cc6-a3c8-fa3e761838dc",
701
+ "metadata": {},
702
+ "outputs": [
703
+ {
704
+ "name": "stdout",
705
+ "output_type": "stream",
706
+ "text": [
707
+ "../Pretrained/Rostlab/prot_t5_xl_uniref50 2 False False False\n",
708
+ "T5_Classfier\n",
709
+ "Trainable Parameter: 1209716226\n",
710
+ "T5_LoRA_Classfier\n",
711
+ "Trainable Parameter: 4082178\n",
712
+ "\n"
713
+ ]
714
+ },
715
+ {
716
+ "data": {
717
+ "text/plain": [
718
+ "<All keys matched successfully>"
719
+ ]
720
+ },
721
+ "execution_count": 24,
722
+ "metadata": {},
723
+ "output_type": "execute_result"
724
+ }
725
+ ],
726
+ "source": [
727
+ "print(checkpoint, num_labels, mixed, full, deepspeed)\n",
728
+ " \n",
729
+ "# Determine model type and load accordingly\n",
730
+ "if \"esm\" in checkpoint:\n",
731
+ " model, tokenizer = load_esm_model_classification(checkpoint, num_labels, mixed, full, deepspeed)\n",
732
+ "else:\n",
733
+ " model, tokenizer = load_T5_model_classification(checkpoint, num_labels, mixed, full, deepspeed)\n",
734
+ "\n",
735
+ "# Load the best model state\n",
736
+ "state_dict = torch.load(best_model_path, weights_only=True)\n",
737
+ "model.load_state_dict(state_dict)"
738
+ ]
739
+ },
740
+ {
741
+ "cell_type": "code",
742
+ "execution_count": 30,
743
+ "id": "4e215923-dfe2-4426-aedf-5cb81f7f0db2",
744
+ "metadata": {},
745
+ "outputs": [],
746
+ "source": [
747
+ "test_one_letter_sequence='AWYAAK'\n",
748
+ "max_length=1500"
749
+ ]
750
+ },
751
+ {
752
+ "cell_type": "code",
753
+ "execution_count": 40,
754
+ "id": "7174ea02-ed51-46f5-84c0-6bcd760670d4",
755
+ "metadata": {},
756
+ "outputs": [
757
+ {
758
+ "data": {
759
+ "text/plain": [
760
+ "(7,)"
761
+ ]
762
+ },
763
+ "execution_count": 40,
764
+ "metadata": {},
765
+ "output_type": "execute_result"
766
+ }
767
+ ],
768
+ "source": [
769
+ "def create_dataset(tokenizer,seqs,labels,checkpoint):\n",
770
+ " \n",
771
+ " tokenized = tokenizer(seqs, max_length=max_length, padding=False, truncation=True)\n",
772
+ " dataset = Dataset.from_dict(tokenized)\n",
773
+ " \n",
774
+ " if (\"esm\" in checkpoint) or (\"ProstT5\" in checkpoint):\n",
775
+ " labels = [l[:max_length-2] for l in labels] \n",
776
+ " else:\n",
777
+ " labels = [l[:max_length-1] for l in labels] \n",
778
+ " \n",
779
+ " dataset = dataset.add_column(\"labels\", labels)\n",
780
+ " \n",
781
+ " return dataset\n",
782
+ " \n",
783
+ "def convert_predictions(input_logits):\n",
784
+ " all_probs = []\n",
785
+ " for logits in input_logits:\n",
786
+ " logits = logits.reshape(-1, 2)\n",
787
+ "\n",
788
+ " # Mask out irrelevant regions\n",
789
+ " # Compute probabilities for class 1\n",
790
+ " probabilities_class1 = expit(logits[:, 1] - logits[:, 0])\n",
791
+ " \n",
792
+ " all_probs.append(probabilities_class1)\n",
793
+ " \n",
794
+ " return np.concatenate(all_probs)\n",
795
+ " \n",
796
+ " \n",
797
+ "dummy_labels=[np.zeros(len(test_one_letter_sequence))]\n",
798
+ "# Replace uncommon amino acids with \"X\"\n",
799
+ "test_one_letter_sequence = test_one_letter_sequence.replace(\"O\", \"X\").replace(\"B\", \"X\").replace(\"U\", \"X\").replace(\"Z\", \"X\").replace(\"J\", \"X\")\n",
800
+ "\n",
801
+ "# Add spaces between each amino acid for ProtT5 and ProstT5 models\n",
802
+ "if \"Rostlab\" in checkpoint:\n",
803
+ " test_one_letter_sequence = \" \".join(test_one_letter_sequence)\n",
804
+ "\n",
805
+ "# Add <AA2fold> for ProstT5 model input format\n",
806
+ "if \"ProstT5\" in checkpoint:\n",
807
+ " test_one_letter_sequence = \"<AA2fold> \" + test_one_letter_sequence\n",
808
+ " \n",
809
+ "test_dataset=create_dataset(tokenizer,[test_one_letter_sequence],dummy_labels,checkpoint)\n",
810
+ "\n",
811
+ "if (\"esm\" in checkpoint) or (\"ProstT5\" in checkpoint):\n",
812
+ " data_collator = DataCollatorForTokenClassificationESM(tokenizer)\n",
813
+ "else:\n",
814
+ " data_collator = DataCollatorForTokenClassification(tokenizer)\n",
815
+ "\n",
816
+ "test_loader = DataLoader(test_dataset, batch_size=1, collate_fn=data_collator)\n",
817
+ "\n",
818
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
819
+ "model.to(device)\n",
820
+ "for batch in test_loader:\n",
821
+ " input_ids = batch['input_ids'].to(device)\n",
822
+ " attention_mask = batch['attention_mask'].to(device)\n",
823
+ " labels = batch['labels'] # Ensure to get labels from batch\n",
824
+ "\n",
825
+ " outputs = model(input_ids, attention_mask=attention_mask)\n",
826
+ " logits = outputs.logits.detach().cpu().numpy()\n",
827
+ "\n",
828
+ "logits=convert_predictions(logits)\n",
829
+ "logits.shape\n",
830
+ "\n",
831
+ "def normalize_scores(scores):\n",
832
+ " min_score = np.min(scores)\n",
833
+ " max_score = np.max(scores)\n",
834
+ " return (scores - min_score) / (max_score - min_score) if max_score > min_score else scores\n",
835
+ "\n",
836
+ "normalized_scores = normalize_scores(logits)\n",
837
+ "\n",
838
+ "normalized_scores.shape"
839
+ ]
840
+ },
841
+ {
842
+ "cell_type": "code",
843
+ "execution_count": null,
844
+ "id": "58b5ae4d-9e8e-4d07-ab46-76d23cc29016",
845
+ "metadata": {},
846
+ "outputs": [],
847
+ "source": []
848
+ }
849
+ ],
850
+ "metadata": {
851
+ "kernelspec": {
852
+ "display_name": "Python [conda env:LLM] *",
853
+ "language": "python",
854
+ "name": "conda-env-LLM-py"
855
+ },
856
+ "language_info": {
857
+ "codemirror_mode": {
858
+ "name": "ipython",
859
+ "version": 3
860
+ },
861
+ "file_extension": ".py",
862
+ "mimetype": "text/x-python",
863
+ "name": "python",
864
+ "nbconvert_exporter": "python",
865
+ "pygments_lexer": "ipython3",
866
+ "version": "3.12.2"
867
+ }
868
+ },
869
+ "nbformat": 4,
870
+ "nbformat_minor": 5
871
+ }
requirements.txt CHANGED
@@ -5,4 +5,5 @@ peft>=0.0.7
5
  scipy>=1.7.0
6
  pandas>=1.1.0
7
  numpy>=1.19.0
8
- scikit-learn>=0.24.0
 
 
5
  scipy>=1.7.0
6
  pandas>=1.1.0
7
  numpy>=1.19.0
8
+ scikit-learn>=0.24.0
9
+ sentencepiece