File size: 1,998 Bytes
91a838e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
from transformers import RobertaTokenizerFast

# split the dna seq by len
# AAGTGGCAGA ----->  AA, GT, GG, CA, GA
class DNATokenizerFast(RobertaTokenizerFast):
    def __init__(self, vocab_file=None, merges_file=None, k_mer=2, stride=1,
        errors="replace",
        bos_token="<s>",
        eos_token="</s>",
        sep_token="</s>",
        cls_token="<s>",
        unk_token="<unk>",
        pad_token="<pad>",
        mask_token="<mask>",
        add_prefix_space=False,
        **kwargs
    ):
        self.k_mer = k_mer
        self.stride = stride
        self.model_max_length = 1000000
        super().__init__(
            vocab_file=vocab_file,
            merges_file=merges_file,
            errors=errors,
            bos_token=bos_token,
            eos_token=eos_token,
            unk_token=unk_token,
            sep_token=sep_token,
            cls_token=cls_token,
            pad_token=pad_token,
            mask_token=mask_token,
            add_prefix_space=add_prefix_space,
            **kwargs,
        )

    def cut_and_encode(self, sequence, add_special_tokens):
        seq_len = ((int)((len(sequence)-self.k_mer) / self.stride)) * self.stride
        tokens = [sequence[i:i + self.k_mer] for i in range(0, seq_len + 1, self.stride)]
        token_ids = [self._convert_token_to_id(token) for token in tokens]
        if add_special_tokens:
            token_ids = [self.cls_token_id] + token_ids + [self.eos_token_id]
        return tokens, token_ids

    def _convert_token_to_id(self, token):
        index = self._tokenizer.token_to_id(token)
        if index:
            return index
        if token == '':
            return self.pad_token_id
        return self.unk_token_id

    def __call__(self, seq_list, add_special_tokens=False):
        token_ids_list = []
        for seq in seq_list:
            _, token_ids = self.cut_and_encode(seq, add_special_tokens)
            token_ids_list.append(token_ids)
        return {"input_ids": token_ids_list}