manueldeprada HF Staff commited on
Commit
47784f5
·
1 Parent(s): 7ff9178
README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - custom_generate
5
+ - sampling
6
+ - kvcache
7
+ ---
8
+
9
+ # Sampling with KV Cache
10
+
11
+ ## Description
12
+ A clean, hackable implementation of sampling (also called ancestral sampling or multinomial sampling) with full KV cache support. This is a simplified alternative to the complex generation mixin in transformers, designed for readability and ease of modification while maintaining full performance.
13
+
14
+ The implementation supports both sampling and greedy decoding modes, with optional temperature scaling and top-k/top-p filtering.
15
+
16
+ ## Base model
17
+ - [HuggingFaceTB/SmolLM2-135M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct)
18
+
19
+ ## Model compatibility
20
+ Most transformer LLM/VLM models trained for causal language modeling.
21
+
22
+ ## Relevant Arguments
23
+ - `temperature` (float): Sampling temperature (default: 1.0, higher = more random)
24
+ - `top_k` (int): Only consider top-k most probable tokens (default: None)
25
+ - `top_p` (float): Only consider tokens with cumulative probability <= top_p (default: None)
26
+ - `do_sample` (bool): Whether to use sampling (True, default) or greedy decoding (False)
27
+
28
+ ### Logits Processing Order
29
+ Logits processors are applied in sequence: `temperature → softmax → top_k → top_p` (same as HuggingFace's `LogitProcessor` system). Temperature scaling occurs before top-p filtering, affecting the probability distribution that top-p operates on.
30
+
31
+ For example, with `temperature=1.0`, `top_p=0.9` might include tokens A, B, C. With `temperature=0.5`, probability mass is much more concentrated, so `top_p=0.9` might only include token A.
32
+
33
+ ## Outputs
34
+ When `return_dict_in_generate=True`, returns a dictionary with:
35
+ - `sequences`: Generated token IDs
36
+ - `scores`: Log probabilities of sampled tokens (with temperature/sampling modifications)
37
+ - `logprobs`: Original model log probabilities (T=1, no modifications)
38
+ Otherwise, returns a tensor of generated token IDs.
39
+
40
+ ## Example usage
41
+
42
+ ```py
43
+ from transformers import AutoModelForCausalLM, AutoTokenizer
44
+
45
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct")
46
+ model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct", device_map="auto")
47
+
48
+ inputs = tokenizer(["The quick brown"], return_tensors="pt").to(model.device)
49
+
50
+ # Basic sampling
51
+ gen_out = model.generate(**inputs, custom_generate="manueldeprada/sampling_with_kvcache", trust_remote_code=True)
52
+
53
+ # With temperature
54
+ gen_out = model.generate(**inputs, custom_generate="manueldeprada/sampling_with_kvcache", temperature=0.8, trust_remote_code=True)
55
+
56
+ # With top-k
57
+ gen_out = model.generate(**inputs, custom_generate="manueldeprada/sampling_with_kvcache", top_k=50, trust_remote_code=True)
58
+
59
+ # With top-p (nucleus sampling)
60
+ gen_out = model.generate(**inputs, custom_generate="manueldeprada/sampling_with_kvcache", top_p=0.9, trust_remote_code=True)
61
+
62
+ # Greedy decoding (no sampling)
63
+ gen_out = model.generate(**inputs, custom_generate="manueldeprada/sampling_with_kvcache", do_sample=False, trust_remote_code=True)
64
+
65
+ # Get detailed output with probabilities
66
+ gen_out = model.generate(
67
+ **inputs,
68
+ custom_generate="manueldeprada/sampling_with_kvcache",
69
+ return_dict_in_generate=True,
70
+ trust_remote_code=True
71
+ )
72
+ print(f"Generated text: {tokenizer.batch_decode(gen_out['sequences'], skip_special_tokens=True)}")
73
+ print(f"Sampling scores: {gen_out['scores']}")
74
+ print(f"Model log probabilities: {gen_out['logprobs']}")
75
+ ```
76
+
77
+ ## Algorithm
78
+ 1. Initialize KV cache and prepare input sequences
79
+ 2. For each generation step:
80
+ - Get logits from the model for the current sequence
81
+ - Apply temperature scaling to logits
82
+ - Optionally apply top-k filtering (keep only top-k tokens)
83
+ - Optionally apply top-p filtering (nucleus sampling)
84
+ - Convert to probabilities using softmax
85
+ - Sample from the probability distribution (or take argmax for greedy)
86
+ - Append the selected token to the sequence
87
+ - Update KV cache and track sequence completion
88
+ 3. Return generated sequences and probability information
89
+
90
+
91
+
92
+
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 0,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 576,
11
+ "initializer_range": 0.041666666666666664,
12
+ "intermediate_size": 1536,
13
+ "is_llama_config": true,
14
+ "max_position_embeddings": 8192,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 9,
17
+ "num_hidden_layers": 30,
18
+ "num_key_value_heads": 3,
19
+ "pretraining_tp": 1,
20
+ "rms_norm_eps": 1e-05,
21
+ "rope_interleaved": false,
22
+ "rope_scaling": null,
23
+ "rope_theta": 100000,
24
+ "tie_word_embeddings": true,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.40.1",
27
+ "use_cache": true,
28
+ "vocab_size": 49152
29
+ }
custom_generate/generate.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def next_logits_with_cache_update(model, model_kwargs, input_ids):
5
+ """
6
+ Gets the next token logits and updates the KV cache:
7
+ - Runs the model forward pass
8
+ - Extracts logits for the last token
9
+ - Updates the KV cache
10
+ - Returns updated `model_kwargs` and `logits`
11
+
12
+ Args:
13
+ model: The language model
14
+ model_kwargs: Model keyword arguments including KV cache
15
+ input_ids: Current input token IDs
16
+
17
+ Returns:
18
+ Updated model_kwargs, logits for the next token
19
+ """
20
+ model_inputs = model.prepare_inputs_for_generation(input_ids, **model_kwargs)
21
+ with torch.no_grad():
22
+ outputs = model(**model_inputs, return_dict=True)
23
+
24
+ logits = outputs.logits[:, -1].detach()
25
+ model_kwargs = model._update_model_kwargs_for_generation(
26
+ outputs, model_kwargs, is_encoder_decoder=model.config.is_encoder_decoder
27
+ )
28
+ del outputs
29
+ return model_kwargs, logits
30
+
31
+ def init_gen(model_kwargs, model, max_new_tokens, bos_token_id):
32
+ """
33
+ Initializes the generation process and prepares the KV cache:
34
+ - Sets up input sequences and model inputs
35
+ - Prepares the KV cache for generation
36
+ - Returns updated `model_kwargs` and `input_ids`
37
+
38
+ Args:
39
+ model_kwargs: Model keyword arguments
40
+ model: The language model
41
+ max_new_tokens: Maximum number of new tokens to generate
42
+ bos_token_id: Beginning-of-sequence token ID
43
+
44
+ Returns:
45
+ Model keyword arguments and input token IDs
46
+ """
47
+
48
+ input_ids, model_input_name, model_kwargs = model._prepare_model_inputs(
49
+ None, bos_token_id, model_kwargs
50
+ )
51
+
52
+ batch_size = input_ids.shape[0]
53
+ model._prepare_cache_for_generation(
54
+ model.generation_config, model_kwargs, None, batch_size,
55
+ max_cache_length=max_new_tokens, device=input_ids.device
56
+ )
57
+
58
+ # Get initial cache position
59
+ model_kwargs = model._get_initial_cache_position(input_ids.shape[1], input_ids.device, model_kwargs)
60
+ return model_kwargs, input_ids
61
+
62
+ def _apply_top_k(ps, model):
63
+ """Apply top-k filtering to probabilities."""
64
+ if not hasattr(model, 'generation_config') or not hasattr(model.generation_config, 'top_k'):
65
+ return ps
66
+
67
+ top_k = model.generation_config.top_k
68
+ if top_k is None or top_k >= ps.size(-1):
69
+ return ps
70
+
71
+ indices_to_remove = ps < torch.topk(ps, top_k)[0][..., -1, None]
72
+ ps[indices_to_remove] = 0.0
73
+ return ps / ps.sum(dim=-1, keepdim=True)
74
+
75
+ def _apply_top_p(ps, model):
76
+ """Apply top-p (nucleus) filtering to probabilities."""
77
+ if not hasattr(model, 'generation_config') or not hasattr(model.generation_config, 'top_p'):
78
+ return ps
79
+
80
+ top_p = model.generation_config.top_p
81
+ if top_p is None or top_p >= 1.0:
82
+ return ps
83
+
84
+ sorted_probs, sorted_indices = torch.sort(ps, descending=True)
85
+ cumulative_probs = torch.cumsum(sorted_probs, dim=-1)
86
+
87
+ sorted_indices_to_remove = cumulative_probs > top_p
88
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
89
+ sorted_indices_to_remove[..., 0] = 0
90
+
91
+ indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
92
+ ps[indices_to_remove] = 0.0
93
+ return ps / ps.sum(dim=-1, keepdim=True)
94
+
95
+ def sampling_with_kvcache(model_kwargs, model, eos_token_ids, pad_token_id, bos_token_id, do_sample=True, max_new_tokens=20, temperature=1.0):
96
+ """
97
+ Sampling implementation with proper KV caching.
98
+
99
+ Args:
100
+ prompts: List of input prompts
101
+ model: The language model
102
+ max_new_tokens: Maximum number of new tokens to generate
103
+ eos_token_ids: List of end-of-sequence token IDs
104
+ pad_token_id: Padding token ID
105
+ bos_token_id: Beginning-of-sequence token ID
106
+ max_new_tokens: Maximum number of new tokens to generate
107
+
108
+ Returns:
109
+ Generated sequences, log probabilities, and metadata
110
+ """
111
+ # Initialize the generation process and prepare the KV cache
112
+ model_kwargs, input_ids = init_gen(model_kwargs, model, max_new_tokens, bos_token_id)
113
+ batch_size, _ = input_ids.shape
114
+
115
+ # Keeps track of which sequences are finished and their lengths
116
+ active_seqs = input_ids.new_ones((batch_size, 1), dtype=torch.bool)
117
+ # Modified log probabilities of the sequences
118
+ scores = torch.zeros((batch_size, max_new_tokens), dtype=model.dtype)
119
+ # Unfiltered sequence log probabilities (temperature=1, no sampling processors applied)
120
+ logprobs = torch.zeros((batch_size, max_new_tokens), dtype=model.dtype)
121
+
122
+ for i in range(max_new_tokens):
123
+ # Get the next token probabilities and update the KV cache
124
+ model_kwargs, logits = next_logits_with_cache_update(model, model_kwargs, input_ids)
125
+ # Store original model probabilities (temperature=1, no sampling processors applied)
126
+ model_ps = logits.softmax(-1)
127
+
128
+ # Logit processors (temperature, top-k, top-p). We can chain these!
129
+ ps = (logits/temperature).softmax(-1)
130
+ ps = _apply_top_k(ps, model)
131
+ ps = _apply_top_p(ps, model)
132
+
133
+ # Sample the next token and gather the log probabilities
134
+ if do_sample: # Sampling
135
+ next_token_ids = torch.multinomial(ps, 1) * active_seqs + pad_token_id * ~active_seqs
136
+ else: # Greedy decoding
137
+ next_token_ids = torch.argmax(ps, dim=-1).unsqueeze(-1) * active_seqs + pad_token_id * ~active_seqs
138
+ next_token_logprobs = ps.gather(-1, next_token_ids).log()
139
+ next_token_model_logprobs = model_ps.gather(-1, next_token_ids).log()
140
+
141
+ input_ids = torch.cat([input_ids, next_token_ids], dim=-1)
142
+ scores[:, i] = (next_token_logprobs * active_seqs).squeeze()
143
+ logprobs[:, i] = (next_token_model_logprobs * active_seqs).squeeze()
144
+
145
+ active_seqs &= ~torch.isin(next_token_ids, eos_token_ids)
146
+ if active_seqs.sum() == 0:
147
+ break
148
+ return input_ids.detach().cpu(), scores[:,:i+1], logprobs[:,:i+1]
149
+
150
+ def generate(model, **kwargs):
151
+ """
152
+ Sampling strategy - multinomial sampling with temperature and optional top-k/top-p filtering.
153
+ Simple implementation with proper KV caching support.
154
+
155
+ Args:
156
+ model: The language model
157
+ model_kwargs: Model keyword arguments from the tokenizer
158
+ generation_config: Generation configuration
159
+ temperature: Sampling temperature (higher = more random)
160
+ top_k: Only consider top-k most probable tokens
161
+ top_p: Only consider tokens with cumulative probability <= top_p
162
+ **kwargs: Additional arguments
163
+
164
+ Returns:
165
+ Generated token IDs
166
+ """
167
+ generation_config = model.generation_config
168
+ max_new_tokens = kwargs.get('max_new_tokens', generation_config.max_new_tokens)
169
+ max_new_tokens = 512 if max_new_tokens is None else max_new_tokens
170
+ do_sample = kwargs.get('do_sample', True)
171
+ eos_token_ids = kwargs.get('eos_token_ids', generation_config.eos_token_id)
172
+ if eos_token_ids is None:
173
+ raise ValueError("Model generation config does not have an EOS token id. You must provide it to generate() with the eos_token_ids argument.")
174
+ eos_token_ids = torch.as_tensor(eos_token_ids, device=model.device)
175
+ if eos_token_ids is not None and eos_token_ids.ndim == 0:
176
+ eos_token_ids = eos_token_ids.unsqueeze(0)
177
+
178
+ pad_token_id = kwargs.get('pad_token_id', generation_config.pad_token_id if generation_config.pad_token_id is not None else eos_token_ids[0])
179
+ bos_token_id = kwargs.get('bos_token_id', generation_config.bos_token_id)
180
+ if bos_token_id is None:
181
+ raise ValueError("Model generation config does not have a BOS token id. You must provide it to generate() with the bos_token_id argument.")
182
+ temperature = kwargs.get('temperature', 1.0)
183
+ return_dict = kwargs.get('return_dict_in_generate', False)
184
+
185
+ generated_ids, scores, logprobs = sampling_with_kvcache(
186
+ model_kwargs=kwargs,
187
+ model=model,
188
+ eos_token_ids=eos_token_ids,
189
+ pad_token_id=pad_token_id,
190
+ bos_token_id=bos_token_id,
191
+ do_sample=do_sample,
192
+ max_new_tokens=max_new_tokens,
193
+ temperature=temperature,
194
+ )
195
+
196
+ if return_dict:
197
+ return {
198
+ "sequences": generated_ids,
199
+ "scores": scores,
200
+ "logprobs": logprobs,
201
+ }
202
+ else:
203
+ return generated_ids
204
+
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.40.1"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80521b40281d6ce74e35c9282c22539e75aa0ac8578892b2a59955ef78d55da1
3
+ size 269060552
special_tokens_map.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<|im_start|>",
5
+ "<|im_end|>",
6
+ "<repo_name>",
7
+ "<reponame>",
8
+ "<file_sep>",
9
+ "<filename>",
10
+ "<gh_stars>",
11
+ "<issue_start>",
12
+ "<issue_comment>",
13
+ "<issue_closed>",
14
+ "<jupyter_start>",
15
+ "<jupyter_text>",
16
+ "<jupyter_code>",
17
+ "<jupyter_output>",
18
+ "<jupyter_script>",
19
+ "<empty_output>"
20
+ ],
21
+ "bos_token": {
22
+ "content": "<|endoftext|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "eos_token": {
29
+ "content": "<|endoftext|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ },
35
+ "unk_token": {
36
+ "content": "<|endoftext|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false
41
+ }
42
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<repo_name>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<reponame>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": "<file_sep>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": "<filename>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "<gh_stars>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<issue_start>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<issue_comment>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<issue_closed>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<jupyter_start>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "12": {
101
+ "content": "<jupyter_text>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "13": {
109
+ "content": "<jupyter_code>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "14": {
117
+ "content": "<jupyter_output>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "15": {
125
+ "content": "<jupyter_script>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "16": {
133
+ "content": "<empty_output>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ }
140
+ },
141
+ "additional_special_tokens": [
142
+ "<|endoftext|>",
143
+ "<|im_start|>",
144
+ "<|im_end|>",
145
+ "<repo_name>",
146
+ "<reponame>",
147
+ "<file_sep>",
148
+ "<filename>",
149
+ "<gh_stars>",
150
+ "<issue_start>",
151
+ "<issue_comment>",
152
+ "<issue_closed>",
153
+ "<jupyter_start>",
154
+ "<jupyter_text>",
155
+ "<jupyter_code>",
156
+ "<jupyter_output>",
157
+ "<jupyter_script>",
158
+ "<empty_output>"
159
+ ],
160
+ "bos_token": "<|endoftext|>",
161
+ "clean_up_tokenization_spaces": false,
162
+ "eos_token": "<|endoftext|>",
163
+ "model_max_length": 8192,
164
+ "tokenizer_class": "GPT2Tokenizer",
165
+ "unk_token": "<|endoftext|>",
166
+ "vocab_size": 49152
167
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff