# coding=utf-8 # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for ESM.""" import os from typing import List, Optional #from transformers.models.esm.tokenization_esm import PreTrainedTokenizer from transformers import EsmTokenizer, PreTrainedTokenizer VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} def load_vocab_file(vocab_file): with open(vocab_file, "r") as f: lines = f.read().splitlines() return [l.strip() for l in lines] class IsoformerTokenizer(PreTrainedTokenizer): """ Constructs Isoformer tokenizer. """ def __init__( self, config, **kwargs ): dna_hf_tokenizer = EsmTokenizer("dna_vocab_list.txt", model_max_length=196608) dna_hf_tokenizer.eos_token = None # Stops the tokenizer adding an EOS/SEP token at the end dna_hf_tokenizer.init_kwargs["eos_token"] = None # Ensures it doesn't come back when reloading dna_hf_tokenizer.bos_token = None # Stops the tokenizer adding an BOS/SEP token at the end dna_hf_tokenizer.init_kwargs["bos_token"] = None # Ensures it doesn't come back when reloading rna_hf_tokenizer = EsmTokenizer("rna_vocab_list.txt", model_max_length=1024) rna_hf_tokenizer.eos_token = None # Stops the tokenizer adding an EOS/SEP token at the end rna_hf_tokenizer.init_kwargs["eos_token"] = None # Ensures it doesn't come back when reloading protein_hf_tokenizer = EsmTokenizer("protein_vocab_list.txt", model_max_length=1024) # protein_hf_tokenizer.eos_token = None # Stops the tokenizer adding an EOS/SEP token at the end # protein_hf_tokenizer.init_kwargs["eos_token"] = None # Ensures it doesn't come back when reloading self.num_tokens_per_seq_nuctf = config.num_tokens_per_seq_nuctf self.num_tokens_per_seq_nuctf_rna = config.num_tokens_per_seq_nuctf_rna self.num_protein_tokens_per_seq = config.num_protein_tokens_per_seq self.dna_tokenizer = dna_hf_tokenizer self.rna_tokenizer = rna_hf_tokenizer self.protein_tokenizer = protein_hf_tokenizer self.dna_tokens = open("dna_vocab_list.txt", "r").read() .split("\n") self.rna_tokens = open("rna_vocab_list.txt", "r").read() .split("\n") self.protein_tokens = open("protein_vocab_list.txt", "r").read() .split("\n") self.config = config super().__init__(**kwargs) def __call__(self, dna_input, rna_input, protein_input): dna_output = self.dna_tokenizer(dna_input) #, max_length=196608, padding="max_length") rna_output = self.rna_tokenizer(rna_input, max_length=1024, padding="max_length") protein_output = self.protein_tokenizer(protein_input, max_length=1024, padding="max_length") return dna_output, rna_output, protein_output def _add_tokens(self, *args, **kwargs): pass # Override this with an empty method to stop errors def save_vocabulary(self, save_directory, filename_prefix): vocab_file_dna = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "dna_vocab_list.txt") vocab_file_rna = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "rna_vocab_list.txt") vocab_file_protein = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "protein_vocab_list.txt") with open(vocab_file_dna, "w") as f: f.write("\n".join(self.dna_tokens)) with open(vocab_file_rna, "w") as f: f.write("\n".join(self.rna_tokens)) with open(vocab_file_protein, "w") as f: f.write("\n".join(self.protein_tokens)) return (vocab_file_dna,vocab_file_rna,vocab_file_protein, )