mgelard commited on
Commit
f11f568
·
verified ·
1 Parent(s): ee13054

Upload tokenizer

Browse files
Files changed (4) hide show
  1. special_tokens_map.json +1 -0
  2. tokenizer.py +154 -0
  3. tokenizer_config.json +12 -0
  4. vocab.json +1 -0
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
tokenizer.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from typing import List, Optional, Union
4
+
5
+ import numpy as np
6
+ import torch
7
+ from transformers import PretrainedConfig, PreTrainedTokenizer
8
+
9
+
10
+ class BinnedOmicTokenizerConfig(PretrainedConfig):
11
+ def __init__(self, **kwargs):
12
+ super().__init__(**kwargs)
13
+ self.n_expressions_bins = kwargs.get("n_expressions_bins", 64)
14
+ self.min_omic_value = kwargs.get("min_omic_value", 0.0)
15
+ self.max_omic_value = kwargs.get("max_omic_value", 1.0)
16
+ self.use_max_normalization = kwargs.get("use_max_normalization", True)
17
+ self.normalization_factor = kwargs.get(
18
+ "normalization_factor", 5.547176906585117
19
+ )
20
+ self.prepend_cls_token = kwargs.get("prepend_cls_token", False)
21
+ self.fixed_sequence_length = kwargs.get("fixed_sequence_length", None)
22
+ self.unpadded_length = kwargs.get("unpadded_length", None)
23
+
24
+
25
+ class BinnedOmicTokenizer(PreTrainedTokenizer):
26
+ def __init__(
27
+ self,
28
+ n_expressions_bins: int = 64,
29
+ min_omic_value: float = 0.0,
30
+ max_omic_value: float = 1.0,
31
+ use_max_normalization: bool = True,
32
+ normalization_factor: float = 1.0,
33
+ prepend_cls_token: bool = False,
34
+ fixed_sequence_length: Optional[int] = None,
35
+ unpadded_length: Optional[int] = None,
36
+ **kwargs,
37
+ ):
38
+ bin_tokens = [str(i) for i in range(n_expressions_bins)]
39
+ special_tokens = ["<pad>", "<mask>", "<cls>"]
40
+
41
+ vocab = {tok: i for i, tok in enumerate(bin_tokens)}
42
+ offset = len(vocab)
43
+ for i, tok in enumerate(special_tokens):
44
+ vocab[tok] = offset + i
45
+
46
+ ids_to_tokens = {i: tok for tok, i in vocab.items()}
47
+
48
+ self.vocab = vocab
49
+ self.ids_to_tokens = ids_to_tokens
50
+
51
+ self.n_expressions_bins = n_expressions_bins
52
+ self.min_omic_value = min_omic_value
53
+ self.max_omic_value = max_omic_value
54
+ self.use_max_normalization = use_max_normalization
55
+ self.normalization_factor = normalization_factor
56
+ self.prepend_cls_token = prepend_cls_token
57
+ self.fixed_sequence_length = fixed_sequence_length
58
+ self.unpadded_length = unpadded_length
59
+
60
+ self.bin_edges = np.linspace(min_omic_value, max_omic_value, n_expressions_bins)
61
+
62
+ self.pad_token = "<pad>"
63
+ self.mask_token = "<mask>"
64
+ self.cls_token = "<cls>"
65
+
66
+ super().__init__(**kwargs)
67
+
68
+ def _convert_token_to_id(self, token: str) -> int:
69
+ return self.vocab.get(token, self.vocab[self.unk_token])
70
+
71
+ def _convert_id_to_token(self, index: int) -> str:
72
+ return self.ids_to_tokens.get(index, self.unk_token)
73
+
74
+ def get_vocab(self) -> dict:
75
+ return self.vocab
76
+
77
+ def _tokenize(self, text, **kwargs):
78
+ raise NotImplementedError("Use `encode` or `batch_encode_plus` methods.")
79
+
80
+ def encode(
81
+ self,
82
+ gene_expr: Union[np.ndarray, List[float]],
83
+ pad_to_fixed_length: bool = False,
84
+ max_length: Optional[int] = None,
85
+ return_tensors: Optional[str] = None,
86
+ **kwargs,
87
+ ) -> Union[List[int], torch.Tensor]:
88
+ gene_expr = np.array(gene_expr)
89
+
90
+ if self.use_max_normalization:
91
+ gene_expr = gene_expr / self.normalization_factor
92
+
93
+ token_ids = np.digitize(gene_expr, self.bin_edges).astype(int)
94
+ token_ids[gene_expr == 0.0] = 0
95
+
96
+ if self.prepend_cls_token:
97
+ token_ids = np.concatenate([[self.cls_token_id], token_ids])
98
+
99
+ if pad_to_fixed_length:
100
+ current_max_length = self.fixed_sequence_length or max_length
101
+ if current_max_length is None:
102
+ raise ValueError("fixed_sequence_length or max_length must be set.")
103
+ pad_len = current_max_length - len(token_ids)
104
+ if pad_len > 0:
105
+ token_ids = np.concatenate([token_ids, [self.pad_token_id] * pad_len])
106
+ else:
107
+ token_ids = token_ids[:current_max_length]
108
+
109
+ if return_tensors == "pt":
110
+ return torch.tensor(token_ids).unsqueeze(0)
111
+ return token_ids.tolist() # type: ignore
112
+
113
+ def batch_encode_plus(
114
+ self,
115
+ batch_gene_expr: Union[np.ndarray, List[np.ndarray]],
116
+ pad_to_fixed_length: bool = False,
117
+ max_length: Optional[int] = None,
118
+ return_tensors: Optional[str] = None,
119
+ **kwargs,
120
+ ):
121
+ if isinstance(batch_gene_expr, list):
122
+ batch_gene_expr = np.array(batch_gene_expr)
123
+
124
+ encoded = [
125
+ self.encode(
126
+ gene_expr,
127
+ pad_to_fixed_length=pad_to_fixed_length,
128
+ max_length=max_length,
129
+ return_tensors=None,
130
+ **kwargs,
131
+ )
132
+ for gene_expr in batch_gene_expr
133
+ ]
134
+
135
+ encoded = np.array(encoded, dtype=np.int64)
136
+
137
+ if return_tensors == "pt":
138
+ return {"input_ids": torch.tensor(encoded)}
139
+ return {"input_ids": encoded}
140
+
141
+ @property
142
+ def vocab_size(self) -> int:
143
+ return len(self.vocab)
144
+
145
+ def save_vocabulary(
146
+ self, save_directory: str, filename_prefix: Optional[str] = None
147
+ ):
148
+ vocab_file = os.path.join(
149
+ save_directory,
150
+ (filename_prefix + "-" if filename_prefix else "") + "vocab.json",
151
+ )
152
+ with open(vocab_file, "w") as f:
153
+ json.dump(self.vocab, f)
154
+ return (vocab_file,)
tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {},
3
+ "auto_map": {
4
+ "AutoTokenizer": [
5
+ "tokenizer.BinnedOmicTokenizer",
6
+ null
7
+ ]
8
+ },
9
+ "clean_up_tokenization_spaces": true,
10
+ "model_max_length": 1000000000000000019884624838656,
11
+ "tokenizer_class": "BinnedOmicTokenizer"
12
+ }
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, "10": 10, "11": 11, "12": 12, "13": 13, "14": 14, "15": 15, "16": 16, "17": 17, "18": 18, "19": 19, "20": 20, "21": 21, "22": 22, "23": 23, "24": 24, "25": 25, "26": 26, "27": 27, "28": 28, "29": 29, "30": 30, "31": 31, "32": 32, "33": 33, "34": 34, "35": 35, "36": 36, "37": 37, "38": 38, "39": 39, "40": 40, "41": 41, "42": 42, "43": 43, "44": 44, "45": 45, "46": 46, "47": 47, "48": 48, "49": 49, "50": 50, "51": 51, "52": 52, "53": 53, "54": 54, "55": 55, "56": 56, "57": 57, "58": 58, "59": 59, "60": 60, "61": 61, "62": 62, "63": 63, "<pad>": 64, "<mask>": 65, "<cls>": 66}