HexYang commited on
Commit
91a838e
·
verified ·
1 Parent(s): 64bd4a3

Upload dna_tokenizer_fast.py

Browse files
Files changed (1) hide show
  1. dna_tokenizer_fast.py +57 -0
dna_tokenizer_fast.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import RobertaTokenizerFast
2
+
3
+ # split the dna seq by len
4
+ # AAGTGGCAGA -----> AA, GT, GG, CA, GA
5
+ class DNATokenizerFast(RobertaTokenizerFast):
6
+ def __init__(self, vocab_file=None, merges_file=None, k_mer=2, stride=1,
7
+ errors="replace",
8
+ bos_token="<s>",
9
+ eos_token="</s>",
10
+ sep_token="</s>",
11
+ cls_token="<s>",
12
+ unk_token="<unk>",
13
+ pad_token="<pad>",
14
+ mask_token="<mask>",
15
+ add_prefix_space=False,
16
+ **kwargs
17
+ ):
18
+ self.k_mer = k_mer
19
+ self.stride = stride
20
+ self.model_max_length = 1000000
21
+ super().__init__(
22
+ vocab_file=vocab_file,
23
+ merges_file=merges_file,
24
+ errors=errors,
25
+ bos_token=bos_token,
26
+ eos_token=eos_token,
27
+ unk_token=unk_token,
28
+ sep_token=sep_token,
29
+ cls_token=cls_token,
30
+ pad_token=pad_token,
31
+ mask_token=mask_token,
32
+ add_prefix_space=add_prefix_space,
33
+ **kwargs,
34
+ )
35
+
36
+ def cut_and_encode(self, sequence, add_special_tokens):
37
+ seq_len = ((int)((len(sequence)-self.k_mer) / self.stride)) * self.stride
38
+ tokens = [sequence[i:i + self.k_mer] for i in range(0, seq_len + 1, self.stride)]
39
+ token_ids = [self._convert_token_to_id(token) for token in tokens]
40
+ if add_special_tokens:
41
+ token_ids = [self.cls_token_id] + token_ids + [self.eos_token_id]
42
+ return tokens, token_ids
43
+
44
+ def _convert_token_to_id(self, token):
45
+ index = self._tokenizer.token_to_id(token)
46
+ if index:
47
+ return index
48
+ if token == '':
49
+ return self.pad_token_id
50
+ return self.unk_token_id
51
+
52
+ def __call__(self, seq_list, add_special_tokens=False):
53
+ token_ids_list = []
54
+ for seq in seq_list:
55
+ _, token_ids = self.cut_and_encode(seq, add_special_tokens)
56
+ token_ids_list.append(token_ids)
57
+ return {"input_ids": token_ids_list}