Ttimofeyka
commited on
Upload folder using huggingface_hub
Browse files- README.md +36 -0
- config.json +27 -0
- mergekit_config.yml +13 -0
- model-00001-of-00014.safetensors +3 -0
- model-00002-of-00014.safetensors +3 -0
- model-00003-of-00014.safetensors +3 -0
- model-00004-of-00014.safetensors +3 -0
- model-00005-of-00014.safetensors +3 -0
- model-00006-of-00014.safetensors +3 -0
- model-00007-of-00014.safetensors +3 -0
- model-00008-of-00014.safetensors +3 -0
- model-00009-of-00014.safetensors +3 -0
- model-00010-of-00014.safetensors +3 -0
- model-00011-of-00014.safetensors +3 -0
- model-00012-of-00014.safetensors +3 -0
- model-00013-of-00014.safetensors +3 -0
- model-00014-of-00014.safetensors +3 -0
- model.safetensors.index.json +1 -0
- special_tokens_map.json +30 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +0 -0
README.md
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model:
|
3 |
+
- TheDrummer/Cydonia-22B-v1.2
|
4 |
+
tags:
|
5 |
+
- merge
|
6 |
+
- mergekit
|
7 |
+
- lazymergekit
|
8 |
+
- TheDrummer/Cydonia-22B-v1.2
|
9 |
+
---
|
10 |
+
|
11 |
+
# Cydonia-Mini-18B
|
12 |
+
|
13 |
+
## 💻 Usage
|
14 |
+
|
15 |
+
```python
|
16 |
+
!pip install -qU transformers accelerate
|
17 |
+
|
18 |
+
from transformers import AutoTokenizer
|
19 |
+
import transformers
|
20 |
+
import torch
|
21 |
+
|
22 |
+
model = "Ttimofeyka/Cydonia-Mini-18B"
|
23 |
+
messages = [{"role": "user", "content": "What is a large language model?"}]
|
24 |
+
|
25 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
26 |
+
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
27 |
+
pipeline = transformers.pipeline(
|
28 |
+
"text-generation",
|
29 |
+
model=model,
|
30 |
+
torch_dtype=torch.float16,
|
31 |
+
device_map="auto",
|
32 |
+
)
|
33 |
+
|
34 |
+
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
35 |
+
print(outputs[0]["generated_text"])
|
36 |
+
```
|
config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "TheDrummer/Cydonia-22B-v1.2",
|
3 |
+
"architectures": [
|
4 |
+
"MistralForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 1,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"head_dim": 128,
|
10 |
+
"hidden_act": "silu",
|
11 |
+
"hidden_size": 6144,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 16384,
|
14 |
+
"max_position_embeddings": 32768,
|
15 |
+
"model_type": "mistral",
|
16 |
+
"num_attention_heads": 48,
|
17 |
+
"num_hidden_layers": 16,
|
18 |
+
"num_key_value_heads": 8,
|
19 |
+
"rms_norm_eps": 1e-05,
|
20 |
+
"rope_theta": 1000000.0,
|
21 |
+
"sliding_window": null,
|
22 |
+
"tie_word_embeddings": false,
|
23 |
+
"torch_dtype": "bfloat16",
|
24 |
+
"transformers_version": "4.46.1",
|
25 |
+
"use_cache": true,
|
26 |
+
"vocab_size": 32768
|
27 |
+
}
|
mergekit_config.yml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
slices:
|
3 |
+
- sources:
|
4 |
+
- model: TheDrummer/Cydonia-22B-v1.2
|
5 |
+
layer_range: [0, 16]
|
6 |
+
- model: TheDrummer/Cydonia-22B-v1.2
|
7 |
+
layer_range: [20, 36]
|
8 |
+
- model: TheDrummer/Cydonia-22B-v1.2
|
9 |
+
layer_range: [38, 54]
|
10 |
+
|
11 |
+
merge_method: passthrough
|
12 |
+
base_model: TheDrummer/Cydonia-22B-v1.2
|
13 |
+
dtype: bfloat16
|
model-00001-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b4e28915b07cf7ebb42391800a106b9bcea379a2122a0cd1e372d67841125c7b
|
3 |
+
size 805318992
|
model-00002-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:70d6f2bc8a99a7f2d8e4dc0a0c93e04946875e0e06dd2e97113812799565c16f
|
3 |
+
size 981492888
|
model-00003-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b7b94d64be142bf540c02b79e613e3cc2656f7d76a2e55d7bbe5add0136b4b5e
|
3 |
+
size 981492888
|
model-00004-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c07d792955d7f3525c30a4ed3cc0b85b634505b61ef1b72d1f755bd548c7a42b
|
3 |
+
size 994088312
|
model-00005-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0776ca00023260b3e8534b7b35061798e68bf5ff39f9e2724b83daf723da4dee
|
3 |
+
size 943756776
|
model-00006-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:983660312bc9e33b872c317a6d12dcad479b0706152271cfd13f0db8ce1d88cd
|
3 |
+
size 981492888
|
model-00007-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b38ccd7f5aed4a31861a9a978dc46081f6357e615c0e128bfc82dc41ca9e91d
|
3 |
+
size 981492888
|
model-00008-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fb266c5316cc972107c2cd78a7aba47a36b7dcbe2f58a511e52a96d737eb6e92
|
3 |
+
size 994088312
|
model-00009-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9e9402992fb1e75a715667f54b4082f6b3c8eecc2342645c4fddb33c01b2909
|
3 |
+
size 943756768
|
model-00010-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:387eed6cb5129bc4be3229fcf76310ce0f8745457d4f7c044a966580e90fabfe
|
3 |
+
size 981492896
|
model-00011-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9dad9e814ab05cd3ba5934fed40b31d7237ed04657fd70c6e08aa8754913e0d9
|
3 |
+
size 981492896
|
model-00012-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8349904866d7954c59eba544341f5fc6be6ae765a8b586eea93023a6f6ed431f
|
3 |
+
size 994088328
|
model-00013-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8b42dcc668b1487bfdb2e815392caf31e5a8408f639eb6368b76e52561f125ab
|
3 |
+
size 943756792
|
model-00014-of-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fb6dd76bea44841d657be6667d9aa243150f46a9e4882e24bb2adcfed51b247d
|
3 |
+
size 780166168
|
model.safetensors.index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"metadata": {"mergekit_version": "0.0.5.1", "total_size": 13287960576}, "weight_map": {"lm_head.weight": "model-00001-of-00014.safetensors", "model.embed_tokens.weight": "model-00001-of-00014.safetensors", "model.layers.0.input_layernorm.weight": "model-00001-of-00014.safetensors", "model.layers.0.mlp.down_proj.weight": "model-00002-of-00014.safetensors", "model.layers.0.mlp.gate_proj.weight": "model-00002-of-00014.safetensors", "model.layers.0.mlp.up_proj.weight": "model-00002-of-00014.safetensors", "model.layers.0.post_attention_layernorm.weight": "model-00002-of-00014.safetensors", "model.layers.0.self_attn.k_proj.weight": "model-00002-of-00014.safetensors", "model.layers.0.self_attn.o_proj.weight": "model-00002-of-00014.safetensors", "model.layers.0.self_attn.q_proj.weight": "model-00002-of-00014.safetensors", "model.layers.0.self_attn.v_proj.weight": "model-00002-of-00014.safetensors", "model.layers.1.input_layernorm.weight": "model-00002-of-00014.safetensors", "model.layers.1.mlp.down_proj.weight": "model-00002-of-00014.safetensors", "model.layers.1.mlp.gate_proj.weight": "model-00003-of-00014.safetensors", "model.layers.1.mlp.up_proj.weight": "model-00003-of-00014.safetensors", "model.layers.1.post_attention_layernorm.weight": "model-00003-of-00014.safetensors", "model.layers.1.self_attn.k_proj.weight": "model-00003-of-00014.safetensors", "model.layers.1.self_attn.o_proj.weight": "model-00003-of-00014.safetensors", "model.layers.1.self_attn.q_proj.weight": "model-00003-of-00014.safetensors", "model.layers.1.self_attn.v_proj.weight": "model-00003-of-00014.safetensors", "model.layers.2.input_layernorm.weight": "model-00003-of-00014.safetensors", "model.layers.2.mlp.down_proj.weight": "model-00003-of-00014.safetensors", "model.layers.2.mlp.gate_proj.weight": "model-00003-of-00014.safetensors", "model.layers.2.mlp.up_proj.weight": "model-00004-of-00014.safetensors", "model.layers.2.post_attention_layernorm.weight": "model-00004-of-00014.safetensors", "model.layers.2.self_attn.k_proj.weight": "model-00004-of-00014.safetensors", "model.layers.2.self_attn.o_proj.weight": "model-00004-of-00014.safetensors", "model.layers.2.self_attn.q_proj.weight": "model-00004-of-00014.safetensors", "model.layers.2.self_attn.v_proj.weight": "model-00004-of-00014.safetensors", "model.layers.3.input_layernorm.weight": "model-00004-of-00014.safetensors", "model.layers.3.mlp.down_proj.weight": "model-00004-of-00014.safetensors", "model.layers.3.mlp.gate_proj.weight": "model-00004-of-00014.safetensors", "model.layers.3.mlp.up_proj.weight": "model-00004-of-00014.safetensors", "model.layers.3.post_attention_layernorm.weight": "model-00004-of-00014.safetensors", "model.layers.3.self_attn.k_proj.weight": "model-00004-of-00014.safetensors", "model.layers.3.self_attn.o_proj.weight": "model-00005-of-00014.safetensors", "model.layers.3.self_attn.q_proj.weight": "model-00005-of-00014.safetensors", "model.layers.3.self_attn.v_proj.weight": "model-00005-of-00014.safetensors", "model.layers.4.input_layernorm.weight": "model-00005-of-00014.safetensors", "model.layers.4.mlp.down_proj.weight": "model-00005-of-00014.safetensors", "model.layers.4.mlp.gate_proj.weight": "model-00005-of-00014.safetensors", "model.layers.4.mlp.up_proj.weight": "model-00005-of-00014.safetensors", "model.layers.4.post_attention_layernorm.weight": "model-00005-of-00014.safetensors", "model.layers.4.self_attn.k_proj.weight": "model-00005-of-00014.safetensors", "model.layers.4.self_attn.o_proj.weight": "model-00005-of-00014.safetensors", "model.layers.4.self_attn.q_proj.weight": "model-00005-of-00014.safetensors", "model.layers.4.self_attn.v_proj.weight": "model-00005-of-00014.safetensors", "model.layers.5.input_layernorm.weight": "model-00005-of-00014.safetensors", "model.layers.5.mlp.down_proj.weight": "model-00006-of-00014.safetensors", "model.layers.5.mlp.gate_proj.weight": "model-00006-of-00014.safetensors", "model.layers.5.mlp.up_proj.weight": "model-00006-of-00014.safetensors", "model.layers.5.post_attention_layernorm.weight": "model-00006-of-00014.safetensors", "model.layers.5.self_attn.k_proj.weight": "model-00006-of-00014.safetensors", "model.layers.5.self_attn.o_proj.weight": "model-00006-of-00014.safetensors", "model.layers.5.self_attn.q_proj.weight": "model-00006-of-00014.safetensors", "model.layers.5.self_attn.v_proj.weight": "model-00006-of-00014.safetensors", "model.layers.6.input_layernorm.weight": "model-00006-of-00014.safetensors", "model.layers.6.mlp.down_proj.weight": "model-00006-of-00014.safetensors", "model.layers.6.mlp.gate_proj.weight": "model-00007-of-00014.safetensors", "model.layers.6.mlp.up_proj.weight": "model-00007-of-00014.safetensors", "model.layers.6.post_attention_layernorm.weight": "model-00007-of-00014.safetensors", "model.layers.6.self_attn.k_proj.weight": "model-00007-of-00014.safetensors", "model.layers.6.self_attn.o_proj.weight": "model-00007-of-00014.safetensors", "model.layers.6.self_attn.q_proj.weight": "model-00007-of-00014.safetensors", "model.layers.6.self_attn.v_proj.weight": "model-00007-of-00014.safetensors", "model.layers.7.input_layernorm.weight": "model-00007-of-00014.safetensors", "model.layers.7.mlp.down_proj.weight": "model-00007-of-00014.safetensors", "model.layers.7.mlp.gate_proj.weight": "model-00007-of-00014.safetensors", "model.layers.7.mlp.up_proj.weight": "model-00008-of-00014.safetensors", "model.layers.7.post_attention_layernorm.weight": "model-00008-of-00014.safetensors", "model.layers.7.self_attn.k_proj.weight": "model-00008-of-00014.safetensors", "model.layers.7.self_attn.o_proj.weight": "model-00008-of-00014.safetensors", "model.layers.7.self_attn.q_proj.weight": "model-00008-of-00014.safetensors", "model.layers.7.self_attn.v_proj.weight": "model-00008-of-00014.safetensors", "model.layers.8.input_layernorm.weight": "model-00008-of-00014.safetensors", "model.layers.8.mlp.down_proj.weight": "model-00008-of-00014.safetensors", "model.layers.8.mlp.gate_proj.weight": "model-00008-of-00014.safetensors", "model.layers.8.mlp.up_proj.weight": "model-00008-of-00014.safetensors", "model.layers.8.post_attention_layernorm.weight": "model-00008-of-00014.safetensors", "model.layers.8.self_attn.k_proj.weight": "model-00008-of-00014.safetensors", "model.layers.8.self_attn.o_proj.weight": "model-00009-of-00014.safetensors", "model.layers.8.self_attn.q_proj.weight": "model-00009-of-00014.safetensors", "model.layers.8.self_attn.v_proj.weight": "model-00009-of-00014.safetensors", "model.layers.9.input_layernorm.weight": "model-00009-of-00014.safetensors", "model.layers.9.mlp.down_proj.weight": "model-00009-of-00014.safetensors", "model.layers.9.mlp.gate_proj.weight": "model-00009-of-00014.safetensors", "model.layers.9.mlp.up_proj.weight": "model-00009-of-00014.safetensors", "model.layers.9.post_attention_layernorm.weight": "model-00009-of-00014.safetensors", "model.layers.9.self_attn.k_proj.weight": "model-00009-of-00014.safetensors", "model.layers.9.self_attn.o_proj.weight": "model-00009-of-00014.safetensors", "model.layers.9.self_attn.q_proj.weight": "model-00009-of-00014.safetensors", "model.layers.9.self_attn.v_proj.weight": "model-00009-of-00014.safetensors", "model.layers.10.input_layernorm.weight": "model-00009-of-00014.safetensors", "model.layers.10.mlp.down_proj.weight": "model-00010-of-00014.safetensors", "model.layers.10.mlp.gate_proj.weight": "model-00010-of-00014.safetensors", "model.layers.10.mlp.up_proj.weight": "model-00010-of-00014.safetensors", "model.layers.10.post_attention_layernorm.weight": "model-00010-of-00014.safetensors", "model.layers.10.self_attn.k_proj.weight": "model-00010-of-00014.safetensors", "model.layers.10.self_attn.o_proj.weight": "model-00010-of-00014.safetensors", "model.layers.10.self_attn.q_proj.weight": "model-00010-of-00014.safetensors", "model.layers.10.self_attn.v_proj.weight": "model-00010-of-00014.safetensors", "model.layers.11.input_layernorm.weight": "model-00010-of-00014.safetensors", "model.layers.11.mlp.down_proj.weight": "model-00010-of-00014.safetensors", "model.layers.11.mlp.gate_proj.weight": "model-00011-of-00014.safetensors", "model.layers.11.mlp.up_proj.weight": "model-00011-of-00014.safetensors", "model.layers.11.post_attention_layernorm.weight": "model-00011-of-00014.safetensors", "model.layers.11.self_attn.k_proj.weight": "model-00011-of-00014.safetensors", "model.layers.11.self_attn.o_proj.weight": "model-00011-of-00014.safetensors", "model.layers.11.self_attn.q_proj.weight": "model-00011-of-00014.safetensors", "model.layers.11.self_attn.v_proj.weight": "model-00011-of-00014.safetensors", "model.layers.12.input_layernorm.weight": "model-00011-of-00014.safetensors", "model.layers.12.mlp.down_proj.weight": "model-00011-of-00014.safetensors", "model.layers.12.mlp.gate_proj.weight": "model-00011-of-00014.safetensors", "model.layers.12.mlp.up_proj.weight": "model-00012-of-00014.safetensors", "model.layers.12.post_attention_layernorm.weight": "model-00012-of-00014.safetensors", "model.layers.12.self_attn.k_proj.weight": "model-00012-of-00014.safetensors", "model.layers.12.self_attn.o_proj.weight": "model-00012-of-00014.safetensors", "model.layers.12.self_attn.q_proj.weight": "model-00012-of-00014.safetensors", "model.layers.12.self_attn.v_proj.weight": "model-00012-of-00014.safetensors", "model.layers.13.input_layernorm.weight": "model-00012-of-00014.safetensors", "model.layers.13.mlp.down_proj.weight": "model-00012-of-00014.safetensors", "model.layers.13.mlp.gate_proj.weight": "model-00012-of-00014.safetensors", "model.layers.13.mlp.up_proj.weight": "model-00012-of-00014.safetensors", "model.layers.13.post_attention_layernorm.weight": "model-00012-of-00014.safetensors", "model.layers.13.self_attn.k_proj.weight": "model-00012-of-00014.safetensors", "model.layers.13.self_attn.o_proj.weight": "model-00013-of-00014.safetensors", "model.layers.13.self_attn.q_proj.weight": "model-00013-of-00014.safetensors", "model.layers.13.self_attn.v_proj.weight": "model-00013-of-00014.safetensors", "model.layers.14.input_layernorm.weight": "model-00013-of-00014.safetensors", "model.layers.14.mlp.down_proj.weight": "model-00013-of-00014.safetensors", "model.layers.14.mlp.gate_proj.weight": "model-00013-of-00014.safetensors", "model.layers.14.mlp.up_proj.weight": "model-00013-of-00014.safetensors", "model.layers.14.post_attention_layernorm.weight": "model-00013-of-00014.safetensors", "model.layers.14.self_attn.k_proj.weight": "model-00013-of-00014.safetensors", "model.layers.14.self_attn.o_proj.weight": "model-00013-of-00014.safetensors", "model.layers.14.self_attn.q_proj.weight": "model-00013-of-00014.safetensors", "model.layers.14.self_attn.v_proj.weight": "model-00013-of-00014.safetensors", "model.layers.15.input_layernorm.weight": "model-00013-of-00014.safetensors", "model.layers.15.mlp.down_proj.weight": "model-00014-of-00014.safetensors", "model.layers.15.mlp.gate_proj.weight": "model-00014-of-00014.safetensors", "model.layers.15.mlp.up_proj.weight": "model-00014-of-00014.safetensors", "model.layers.15.post_attention_layernorm.weight": "model-00014-of-00014.safetensors", "model.layers.15.self_attn.k_proj.weight": "model-00014-of-00014.safetensors", "model.layers.15.self_attn.o_proj.weight": "model-00014-of-00014.safetensors", "model.layers.15.self_attn.q_proj.weight": "model-00014-of-00014.safetensors", "model.layers.15.self_attn.v_proj.weight": "model-00014-of-00014.safetensors", "model.norm.weight": "model-00014-of-00014.safetensors"}}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<unk>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:59f95e28944c062244741268596badc900df86c7f5ded05088d2da22a7379e06
|
3 |
+
size 587583
|
tokenizer_config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|