update model
Browse files- config.json +30 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +9 -0
- spm.model +3 -0
- tokenizer.json +0 -0
- tokenizer_config.json +16 -0
- train_result.log +107 -0
config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"DebertaV2ForMaskedLM"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"bos_token_id": 5,
|
7 |
+
"eos_token_id": 4,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.1,
|
10 |
+
"hidden_size": 768,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 3072,
|
13 |
+
"layer_norm_eps": 1e-07,
|
14 |
+
"max_position_embeddings": 256,
|
15 |
+
"max_relative_positions": -1,
|
16 |
+
"model_type": "deberta-v2",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 6,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"pooler_dropout": 0,
|
21 |
+
"pooler_hidden_act": "gelu",
|
22 |
+
"pooler_hidden_size": 768,
|
23 |
+
"pos_att_type": null,
|
24 |
+
"position_biased_input": true,
|
25 |
+
"relative_attention": false,
|
26 |
+
"torch_dtype": "float32",
|
27 |
+
"transformers_version": "4.20.1",
|
28 |
+
"type_vocab_size": 0,
|
29 |
+
"vocab_size": 80000
|
30 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2b39953d7570d3b0611c3b17ae2e487f97112e0e9376f57f2453312a59e13876
|
3 |
+
size 419387373
|
special_tokens_map.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "[CLS]",
|
3 |
+
"cls_token": "[CLS]",
|
4 |
+
"eos_token": "[SEP]",
|
5 |
+
"mask_token": "[MASK]",
|
6 |
+
"pad_token": "[PAD]",
|
7 |
+
"sep_token": "[SEP]",
|
8 |
+
"unk_token": "[UNK]"
|
9 |
+
}
|
spm.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d13781724629b8e0fda5545b9ddd32267f634323cd62ac71876f2ceb9dc8a308
|
3 |
+
size 1600285
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": true,
|
3 |
+
"bos_token": "[CLS]",
|
4 |
+
"cls_token": "[CLS]",
|
5 |
+
"do_lower_case": false,
|
6 |
+
"eos_token": "[SEP]",
|
7 |
+
"mask_token": "[MASK]",
|
8 |
+
"name_or_path": "hieule/vie-spm-uncased-80k",
|
9 |
+
"pad_token": "[PAD]",
|
10 |
+
"sep_token": "[SEP]",
|
11 |
+
"sp_model_kwargs": {},
|
12 |
+
"special_tokens_map_file": "/home/viet/.cache/huggingface/transformers/9e82dafff602fe591d6dcb10539a07b733b6ba6d1da0ad0413ea069c6cf96a45.33ea9968fa7ac107543fcd0be7da0a5b475c882cb5390b8ce2fc4a0c34fe4d6b",
|
13 |
+
"split_by_punct": false,
|
14 |
+
"tokenizer_class": "DebertaV2Tokenizer",
|
15 |
+
"unk_token": "[UNK]"
|
16 |
+
}
|
train_result.log
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Before load dataset, RAM used: 0.38 GB | Avaiable: 51.32 GB | Left: 50.94 GB
|
2 |
+
Dataset({
|
3 |
+
features: ['text'],
|
4 |
+
num_rows: 7625355
|
5 |
+
})
|
6 |
+
After load dataset, RAM used: 1.78 GB | Avaiable: 50.99 GB | Left: 49.22 GB
|
7 |
+
After Prepare Dataloader, RAM used: 2.30 GB | Avaiable: 46.91 GB | Left: 44.61 GB
|
8 |
+
After epoch 1, RAM used: 3.57 GB | Avaiable: 51.76 GB | Left: 48.19 GB
|
9 |
+
|
10 |
+
>>> Epoch 1: Perplexity: 21.590424136414132 Loss: 2.6214573416621008
|
11 |
+
Loss improved inf -> 2.6214573416621008
|
12 |
+
Saved training checkpoint
|
13 |
+
After epoch 2, RAM used: 3.53 GB | Avaiable: 57.51 GB | Left: 53.98 GB
|
14 |
+
|
15 |
+
>>> Epoch 2: Perplexity: 14.599685600614704 Loss: 2.4090022345374362
|
16 |
+
Loss improved 2.6214573416621008 -> 2.4090022345374362
|
17 |
+
Saved training checkpoint
|
18 |
+
After epoch 3, RAM used: 3.57 GB | Avaiable: 57.47 GB | Left: 53.89 GB
|
19 |
+
|
20 |
+
>>> Epoch 3: Perplexity: 11.945347969346386 Loss: 2.3160537090725533
|
21 |
+
Loss improved 2.4090022345374362 -> 2.3160537090725533
|
22 |
+
Saved training checkpoint
|
23 |
+
After epoch 4, RAM used: 3.58 GB | Avaiable: 57.47 GB | Left: 53.90 GB
|
24 |
+
|
25 |
+
>>> Epoch 4: Perplexity: 11.61526404278414 Loss: 2.2554892432450773
|
26 |
+
Loss improved 2.3160537090725533 -> 2.2554892432450773
|
27 |
+
Saved training checkpoint
|
28 |
+
After epoch 5, RAM used: 3.58 GB | Avaiable: 52.30 GB | Left: 48.72 GB
|
29 |
+
|
30 |
+
>>> Epoch 5: Perplexity: 10.940166697585614 Loss: 2.211717551305325
|
31 |
+
Loss improved 2.2554892432450773 -> 2.211717551305325
|
32 |
+
Saved training checkpoint
|
33 |
+
After epoch 6, RAM used: 3.58 GB | Avaiable: 57.48 GB | Left: 53.90 GB
|
34 |
+
|
35 |
+
>>> Epoch 6: Perplexity: 9.703375475135896 Loss: 2.1727879395655756
|
36 |
+
Loss improved 2.211717551305325 -> 2.1727879395655756
|
37 |
+
Saved training checkpoint
|
38 |
+
After epoch 7, RAM used: 3.58 GB | Avaiable: 57.46 GB | Left: 53.89 GB
|
39 |
+
|
40 |
+
>>> Epoch 7: Perplexity: 9.611460056753156 Loss: 2.139744746335077
|
41 |
+
Loss improved 2.1727879395655756 -> 2.139744746335077
|
42 |
+
Saved training checkpoint
|
43 |
+
After epoch 8, RAM used: 3.58 GB | Avaiable: 57.45 GB | Left: 53.88 GB
|
44 |
+
|
45 |
+
>>> Epoch 8: Perplexity: 9.253905521907615 Loss: 2.112046389352141
|
46 |
+
Loss improved 2.139744746335077 -> 2.112046389352141
|
47 |
+
Saved training checkpoint
|
48 |
+
After epoch 9, RAM used: 3.58 GB | Avaiable: 57.45 GB | Left: 53.87 GB
|
49 |
+
|
50 |
+
>>> Epoch 9: Perplexity: 8.987782076156853 Loss: 2.088010660353402
|
51 |
+
Loss improved 2.112046389352141 -> 2.088010660353402
|
52 |
+
Saved training checkpoint
|
53 |
+
After epoch 10, RAM used: 3.58 GB | Avaiable: 57.46 GB | Left: 53.89 GB
|
54 |
+
|
55 |
+
>>> Epoch 10: Perplexity: 8.989515812427134 Loss: 2.0606724782881045
|
56 |
+
Loss improved 2.088010660353402 -> 2.0606724782881045
|
57 |
+
Saved training checkpoint
|
58 |
+
After epoch 11, RAM used: 3.58 GB | Avaiable: 57.48 GB | Left: 53.91 GB
|
59 |
+
|
60 |
+
>>> Epoch 11: Perplexity: 8.596087054176957 Loss: 2.0449221819049317
|
61 |
+
Loss improved 2.0606724782881045 -> 2.0449221819049317
|
62 |
+
Saved training checkpoint
|
63 |
+
After epoch 12, RAM used: 3.58 GB | Avaiable: 57.43 GB | Left: 53.85 GB
|
64 |
+
|
65 |
+
>>> Epoch 12: Perplexity: 8.133701984722778 Loss: 2.0227558158141825
|
66 |
+
Loss improved 2.0449221819049317 -> 2.0227558158141825
|
67 |
+
Saved training checkpoint
|
68 |
+
After epoch 13, RAM used: 3.58 GB | Avaiable: 57.47 GB | Left: 53.89 GB
|
69 |
+
|
70 |
+
>>> Epoch 13: Perplexity: 8.120926713195095 Loss: 1.9996797158441224
|
71 |
+
Loss improved 2.0227558158141825 -> 1.9996797158441224
|
72 |
+
Saved training checkpoint
|
73 |
+
After epoch 14, RAM used: 3.58 GB | Avaiable: 57.47 GB | Left: 53.90 GB
|
74 |
+
|
75 |
+
>>> Epoch 14: Perplexity: 7.887189857322398 Loss: 1.986047686118474
|
76 |
+
Loss improved 1.9996797158441224 -> 1.986047686118474
|
77 |
+
Saved training checkpoint
|
78 |
+
After epoch 15, RAM used: 3.58 GB | Avaiable: 55.84 GB | Left: 52.26 GB
|
79 |
+
|
80 |
+
>>> Epoch 15: Perplexity: 7.687336654327518 Loss: 1.9684794586581946
|
81 |
+
Loss improved 1.986047686118474 -> 1.9684794586581946
|
82 |
+
Saved training checkpoint
|
83 |
+
After epoch 16, RAM used: 3.58 GB | Avaiable: 57.46 GB | Left: 53.89 GB
|
84 |
+
|
85 |
+
>>> Epoch 16: Perplexity: 7.549170162424603 Loss: 1.9527537560472294
|
86 |
+
Loss improved 1.9684794586581946 -> 1.9527537560472294
|
87 |
+
Saved training checkpoint
|
88 |
+
After epoch 17, RAM used: 3.58 GB | Avaiable: 57.46 GB | Left: 53.88 GB
|
89 |
+
|
90 |
+
>>> Epoch 17: Perplexity: 7.614053464896555 Loss: 1.9367966973686213
|
91 |
+
Loss improved 1.9527537560472294 -> 1.9367966973686213
|
92 |
+
Saved training checkpoint
|
93 |
+
After epoch 18, RAM used: 3.58 GB | Avaiable: 57.47 GB | Left: 53.89 GB
|
94 |
+
|
95 |
+
>>> Epoch 18: Perplexity: 7.137354318384541 Loss: 1.9238058941700293
|
96 |
+
Loss improved 1.9367966973686213 -> 1.9238058941700293
|
97 |
+
Saved training checkpoint
|
98 |
+
After epoch 19, RAM used: 3.58 GB | Avaiable: 51.08 GB | Left: 47.50 GB
|
99 |
+
|
100 |
+
>>> Epoch 19: Perplexity: 7.229089215165024 Loss: 1.9130641898584901
|
101 |
+
Loss improved 1.9238058941700293 -> 1.9130641898584901
|
102 |
+
Saved training checkpoint
|
103 |
+
After epoch 20, RAM used: 3.56 GB | Avaiable: 51.37 GB | Left: 47.80 GB
|
104 |
+
|
105 |
+
>>> Epoch 20: Perplexity: 7.165172113154145 Loss: 1.9069529029687642
|
106 |
+
Loss improved 1.9130641898584901 -> 1.9069529029687642
|
107 |
+
Saved training checkpoint
|