|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Tokenization classes for ESM.""" |
|
import os |
|
from typing import List, Optional |
|
|
|
|
|
from transformers import EsmTokenizer, PreTrainedTokenizer |
|
|
|
|
|
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} |
|
|
|
|
|
def load_vocab_file(vocab_file): |
|
with open(vocab_file, "r") as f: |
|
lines = f.read().splitlines() |
|
return [l.strip() for l in lines] |
|
|
|
|
|
class IsoformerTokenizer(PreTrainedTokenizer): |
|
""" |
|
Constructs Isoformer tokenizer. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
**kwargs |
|
): |
|
|
|
dna_hf_tokenizer = EsmTokenizer("dna_vocab_list.txt", model_max_length=196608) |
|
dna_hf_tokenizer.eos_token = None |
|
dna_hf_tokenizer.init_kwargs["eos_token"] = None |
|
dna_hf_tokenizer.bos_token = None |
|
dna_hf_tokenizer.init_kwargs["bos_token"] = None |
|
|
|
|
|
rna_hf_tokenizer = EsmTokenizer("rna_vocab_list.txt", model_max_length=1024) |
|
rna_hf_tokenizer.eos_token = None |
|
rna_hf_tokenizer.init_kwargs["eos_token"] = None |
|
|
|
protein_hf_tokenizer = EsmTokenizer("protein_vocab_list.txt", model_max_length=1024) |
|
|
|
|
|
|
|
self.dna_tokenizer = dna_hf_tokenizer |
|
self.rna_tokenizer = rna_hf_tokenizer |
|
self.protein_tokenizer = protein_hf_tokenizer |
|
|
|
self.dna_tokens = open("dna_vocab_list.txt", "r").read() .split("\n") |
|
self.rna_tokens = open("rna_vocab_list.txt", "r").read() .split("\n") |
|
self.protein_tokens = open("protein_vocab_list.txt", "r").read() .split("\n") |
|
|
|
super().__init__(**kwargs) |
|
|
|
def __call__(self, dna_input, rna_input, protein_input): |
|
dna_output = self.dna_tokenizer(dna_input) |
|
rna_output = self.rna_tokenizer(rna_input, max_length=1024, padding="max_length") |
|
protein_output = self.protein_tokenizer(protein_input, max_length=1024, padding="max_length") |
|
return dna_output, rna_output, protein_output |
|
|
|
def _add_tokens(self, *args, **kwargs): |
|
pass |
|
|
|
def save_vocabulary(self, save_directory, filename_prefix): |
|
vocab_file_dna = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "dna_vocab_list.txt") |
|
vocab_file_rna = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "rna_vocab_list.txt") |
|
vocab_file_protein = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "protein_vocab_list.txt") |
|
|
|
with open(vocab_file_dna, "w") as f: |
|
f.write("\n".join(self.dna_tokens)) |
|
with open(vocab_file_rna, "w") as f: |
|
f.write("\n".join(self.rna_tokens)) |
|
with open(vocab_file_protein, "w") as f: |
|
f.write("\n".join(self.protein_tokens)) |
|
return (vocab_file_dna,vocab_file_rna,vocab_file_protein, ) |