lhallee commited on
Commit
ec65020
·
verified ·
1 Parent(s): 1a727ac

Update modeling_esm_plusplus.py

Browse files
Files changed (1) hide show
  1. modeling_esm_plusplus.py +129 -30
modeling_esm_plusplus.py CHANGED
@@ -398,9 +398,7 @@ class UnifiedTransformerBlock(nn.Module):
398
  attn_output, attn_weights = self.attn(x, attention_mask, output_attentions)
399
  x = x + self.dropout(attn_output) / self.scaling_factor
400
  x = x + self.dropout(self.ffn(x)) / self.scaling_factor
401
- if output_attentions:
402
- return x, attn_weights
403
- return x
404
 
405
 
406
  ### Model Outputs
@@ -452,6 +450,7 @@ class TransformerStack(nn.Module):
452
  ]
453
  )
454
  self.norm = nn.LayerNorm(d_model, bias=False)
 
455
 
456
  def forward(
457
  self,
@@ -478,12 +477,18 @@ class TransformerStack(nn.Module):
478
  attention_mask = attention_mask[:, None, None, :].expand(batch_size, 1, seq_len, seq_len).bool()
479
 
480
  for block in self.blocks:
481
- if output_attentions:
482
- x, attn_weights = block(x, attention_mask, output_attentions)
483
- if attentions is not None:
484
- attentions += (attn_weights,)
 
 
 
485
  else:
486
- x = block(x, attention_mask, output_attentions)
 
 
 
487
 
488
  if output_hidden_states:
489
  assert hidden_states is not None
@@ -509,25 +514,30 @@ class ProteinDataset(Dataset):
509
  return self.sequences[idx]
510
 
511
 
512
- ### ESM++ Models
513
- class ESMplusplusForMaskedLM(PreTrainedModel):
514
- """ESM++ model for masked language modeling.
515
-
516
- Implements the base ESM++ architecture with a masked language modeling head.
517
  """
518
  config_class = ESMplusplusConfig
519
- def __init__(self, config: ESMplusplusConfig, **kwargs):
520
- super().__init__(config, **kwargs)
521
- self.config = config
522
- self.vocab_size = config.vocab_size
523
- self.embed = nn.Embedding(self.vocab_size, config.hidden_size)
524
- self.transformer = TransformerStack(config.hidden_size, config.num_attention_heads, config.num_hidden_layers, config.dropout)
525
- self.sequence_head = RegressionHead(config.hidden_size, self.vocab_size)
526
- self.ce_loss = nn.CrossEntropyLoss()
527
- self.tokenizer = EsmSequenceTokenizer()
 
 
 
 
 
 
 
528
 
529
  @classmethod
530
- def from_pretrained_esm(cls, model_name: str) -> "ESMplusplusForMaskedLM":
531
  """Load a pretrained ESM++ model."""
532
  if '300' in model_name:
533
  return ESMplusplus_300M()
@@ -548,6 +558,26 @@ class ESMplusplusForMaskedLM(PreTrainedModel):
548
  else:
549
  attention_mask = attention_mask.unsqueeze(-1)
550
  return (x * attention_mask).sum(dim=1) / attention_mask.sum(dim=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551
 
552
  def _collate_fn(self, sequences: list[str]) -> tuple[torch.Tensor, torch.Tensor]:
553
  """Collate function for batching sequences."""
@@ -606,8 +636,14 @@ class ESMplusplusForMaskedLM(PreTrainedModel):
606
  return residue_embeddings
607
  elif pooling_type == 'mean':
608
  return self.mean_pooling(residue_embeddings, attention_mask)
 
 
 
 
 
 
609
  else:
610
- return residue_embeddings[:, 0, :]
611
 
612
  if sql:
613
  import sqlite3
@@ -653,6 +689,67 @@ class ESMplusplusForMaskedLM(PreTrainedModel):
653
 
654
  return embeddings_dict
655
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656
  def forward(
657
  self,
658
  input_ids: Optional[torch.Tensor] = None,
@@ -695,9 +792,9 @@ class ESMplusplusForMaskedLM(PreTrainedModel):
695
  )
696
 
697
 
698
- class ESMplusplusForSequenceClassification(ESMplusplusForMaskedLM):
699
- """ESM++ model for sequence classification.
700
-
701
  Extends the base ESM++ model with a classification head.
702
  """
703
  def __init__(self, config: ESMplusplusConfig, **kwargs):
@@ -709,6 +806,7 @@ class ESMplusplusForSequenceClassification(ESMplusplusForMaskedLM):
709
  self.mse = nn.MSELoss()
710
  self.ce = nn.CrossEntropyLoss()
711
  self.bce = nn.BCEWithLogitsLoss()
 
712
 
713
  def forward(
714
  self,
@@ -775,9 +873,9 @@ class ESMplusplusForSequenceClassification(ESMplusplusForMaskedLM):
775
  )
776
 
777
 
778
- class ESMplusplusForTokenClassification(ESMplusplusForMaskedLM):
779
- """ESM++ model for token classification.
780
-
781
  Extends the base ESM++ model with a token classification head.
782
  """
783
  def __init__(self, config: ESMplusplusConfig):
@@ -787,6 +885,7 @@ class ESMplusplusForTokenClassification(ESMplusplusForMaskedLM):
787
  self.classifier = RegressionHead(config.hidden_size, config.num_labels, config.hidden_size * 4)
788
  # Large intermediate projections help with sequence classification tasks (*4)
789
  self.loss_fct = nn.CrossEntropyLoss()
 
790
 
791
  def forward(
792
  self,
 
398
  attn_output, attn_weights = self.attn(x, attention_mask, output_attentions)
399
  x = x + self.dropout(attn_output) / self.scaling_factor
400
  x = x + self.dropout(self.ffn(x)) / self.scaling_factor
401
+ return x, attn_weights
 
 
402
 
403
 
404
  ### Model Outputs
 
450
  ]
451
  )
452
  self.norm = nn.LayerNorm(d_model, bias=False)
453
+ self.gradient_checkpointing = False
454
 
455
  def forward(
456
  self,
 
477
  attention_mask = attention_mask[:, None, None, :].expand(batch_size, 1, seq_len, seq_len).bool()
478
 
479
  for block in self.blocks:
480
+ if self.gradient_checkpointing and self.training:
481
+ x, attn_weights = self._gradient_checkpointing_func(
482
+ block.__call__,
483
+ x,
484
+ attention_mask,
485
+ output_attentions,
486
+ )
487
  else:
488
+ x, attn_weights = block(x, attention_mask, output_attentions)
489
+
490
+ if attentions is not None:
491
+ attentions += (attn_weights,)
492
 
493
  if output_hidden_states:
494
  assert hidden_states is not None
 
514
  return self.sequences[idx]
515
 
516
 
517
+ class PreTrainedESMplusplusModel(PreTrainedModel):
518
+ """
519
+ init weights for ESM++ models
 
 
520
  """
521
  config_class = ESMplusplusConfig
522
+ base_model_prefix = "esm++"
523
+ supports_gradient_checkpointing = True
524
+
525
+ def _init_weights(self, module):
526
+ """Initialize the weights"""
527
+ if isinstance(module, nn.Linear):
528
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
529
+ if module.bias is not None:
530
+ module.bias.data.zero_()
531
+ elif isinstance(module, nn.Embedding):
532
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
533
+ if module.padding_idx is not None:
534
+ module.weight.data[module.padding_idx].zero_()
535
+ elif isinstance(module, nn.LayerNorm):
536
+ module.bias.data.zero_()
537
+ module.weight.data.fill_(1.0)
538
 
539
  @classmethod
540
+ def from_pretrained_esm(cls, model_name: str):
541
  """Load a pretrained ESM++ model."""
542
  if '300' in model_name:
543
  return ESMplusplus_300M()
 
558
  else:
559
  attention_mask = attention_mask.unsqueeze(-1)
560
  return (x * attention_mask).sum(dim=1) / attention_mask.sum(dim=1)
561
+
562
+ def max_pooling(self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
563
+ """Apply max pooling to sequence outputs."""
564
+ if attention_mask is None:
565
+ return x.max(dim=1).values
566
+ else:
567
+ attention_mask = attention_mask.unsqueeze(-1)
568
+ return (x * attention_mask).max(dim=1).values
569
+
570
+ def min_pooling(self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
571
+ """Apply min pooling to sequence outputs."""
572
+ if attention_mask is None:
573
+ return x.min(dim=1).values
574
+ else:
575
+ attention_mask = attention_mask.unsqueeze(-1)
576
+ return (x * attention_mask).min(dim=1).values
577
+
578
+ def cls_pooling(self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
579
+ """Apply cls pooling to sequence outputs."""
580
+ return x[:, 0, :]
581
 
582
  def _collate_fn(self, sequences: list[str]) -> tuple[torch.Tensor, torch.Tensor]:
583
  """Collate function for batching sequences."""
 
636
  return residue_embeddings
637
  elif pooling_type == 'mean':
638
  return self.mean_pooling(residue_embeddings, attention_mask)
639
+ elif pooling_type == 'max':
640
+ return self.max_pooling(residue_embeddings, attention_mask)
641
+ elif pooling_type == 'min':
642
+ return self.min_pooling(residue_embeddings, attention_mask)
643
+ elif pooling_type == 'cls':
644
+ return self.cls_pooling(residue_embeddings, attention_mask)
645
  else:
646
+ raise ValueError(f"Invalid pooling type: {pooling_type}")
647
 
648
  if sql:
649
  import sqlite3
 
689
 
690
  return embeddings_dict
691
 
692
+
693
+ ### ESM++ Models
694
+ class ESMplusplusModel(PreTrainedESMplusplusModel):
695
+ """
696
+ ESM++ model. transformer model with no heads
697
+ """
698
+ config_class = ESMplusplusConfig
699
+ def __init__(self, config: ESMplusplusConfig, **kwargs):
700
+ super().__init__(config, **kwargs)
701
+ self.config = config
702
+ self.vocab_size = config.vocab_size
703
+ self.embed = nn.Embedding(self.vocab_size, config.hidden_size)
704
+ self.transformer = TransformerStack(config.hidden_size, config.num_attention_heads, config.num_hidden_layers, config.dropout)
705
+ self.tokenizer = EsmSequenceTokenizer()
706
+ self.init_weights()
707
+
708
+ def forward(
709
+ self,
710
+ input_ids: Optional[torch.Tensor] = None,
711
+ attention_mask: Optional[torch.Tensor] = None,
712
+ inputs_embeds: Optional[torch.Tensor] = None,
713
+ output_attentions: Optional[bool] = None,
714
+ output_hidden_states: Optional[bool] = None,
715
+ return_dict: Optional[bool] = None, # to play nice with HF adjacent packages
716
+ ) -> TransformerOutput:
717
+ """Forward pass for masked language modeling.
718
+
719
+ Args:
720
+ input_ids: Input token IDs
721
+ attention_mask: Attention mask
722
+ inputs_embeds: Optional precomputed embeddings
723
+ output_hidden_states: Whether to return all hidden states
724
+ output_attentions: Whether to return attention weights
725
+
726
+ Returns:
727
+ TransformerOutput containing last hidden state and optionally all hidden states and attention weights
728
+ """
729
+ if inputs_embeds is None:
730
+ x = self.embed(input_ids)
731
+ else:
732
+ x = inputs_embeds
733
+ return self.transformer(x, attention_mask, output_hidden_states, output_attentions)
734
+
735
+
736
+ class ESMplusplusForMaskedLM(PreTrainedESMplusplusModel):
737
+ """
738
+ ESM++ model for masked language modeling.
739
+ Implements the base ESM++ architecture with a masked language modeling head.
740
+ """
741
+ config_class = ESMplusplusConfig
742
+ def __init__(self, config: ESMplusplusConfig, **kwargs):
743
+ super().__init__(config, **kwargs)
744
+ self.config = config
745
+ self.vocab_size = config.vocab_size
746
+ self.embed = nn.Embedding(self.vocab_size, config.hidden_size)
747
+ self.transformer = TransformerStack(config.hidden_size, config.num_attention_heads, config.num_hidden_layers, config.dropout)
748
+ self.sequence_head = RegressionHead(config.hidden_size, self.vocab_size)
749
+ self.ce_loss = nn.CrossEntropyLoss()
750
+ self.tokenizer = EsmSequenceTokenizer()
751
+ self.init_weights()
752
+
753
  def forward(
754
  self,
755
  input_ids: Optional[torch.Tensor] = None,
 
792
  )
793
 
794
 
795
+ class ESMplusplusForSequenceClassification(PreTrainedESMplusplusModel):
796
+ """
797
+ ESM++ model for sequence classification.
798
  Extends the base ESM++ model with a classification head.
799
  """
800
  def __init__(self, config: ESMplusplusConfig, **kwargs):
 
806
  self.mse = nn.MSELoss()
807
  self.ce = nn.CrossEntropyLoss()
808
  self.bce = nn.BCEWithLogitsLoss()
809
+ self.init_weights()
810
 
811
  def forward(
812
  self,
 
873
  )
874
 
875
 
876
+ class ESMplusplusForTokenClassification(PreTrainedESMplusplusModel):
877
+ """
878
+ ESM++ model for token classification.
879
  Extends the base ESM++ model with a token classification head.
880
  """
881
  def __init__(self, config: ESMplusplusConfig):
 
885
  self.classifier = RegressionHead(config.hidden_size, config.num_labels, config.hidden_size * 4)
886
  # Large intermediate projections help with sequence classification tasks (*4)
887
  self.loss_fct = nn.CrossEntropyLoss()
888
+ self.init_weights()
889
 
890
  def forward(
891
  self,