bobox commited on
Commit
344222a
·
verified ·
1 Parent(s): 8f5f1de

Training in progress, step 438, checkpoint

Browse files
checkpoint-438/1_AdvancedWeightedPooling/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "embed_dim": 1024,
3
+ "num_heads": 8,
4
+ "dropout": 0.05,
5
+ "bias": true,
6
+ "use_layernorm": false,
7
+ "use_MLP": false,
8
+ "MLP_h_size": 1024,
9
+ "use_residual": false
10
+ }
checkpoint-438/1_AdvancedWeightedPooling/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21b456a8c72f1889e87a06cae425b1efd8812bc3f1ee5250ea98bb0a52d9d083
3
+ size 16795776
checkpoint-438/README.md ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-438/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "BAAI/bge-m3",
3
+ "architectures": [
4
+ "XLMRobertaModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 8194,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.47.0",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 250002
28
+ }
checkpoint-438/config_sentence_transformers.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "3.3.1",
4
+ "transformers": "4.47.0",
5
+ "pytorch": "2.5.1+cu121"
6
+ },
7
+ "prompts": {},
8
+ "default_prompt_name": null,
9
+ "similarity_fn_name": "cosine"
10
+ }
checkpoint-438/modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_AdvancedWeightedPooling",
12
+ "type": "__main__.AdvancedWeightedPooling"
13
+ }
14
+ ]
checkpoint-438/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a282a7cab76b925a0b1fa9fde5249fcd7fc34b00745c368338e9fe225d1e4ad
3
+ size 33591506
checkpoint-438/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e969b03be79a3f792216f1febef058e86c2b7f27374fb1e044d87938ceea1ba
3
+ size 2271151270
checkpoint-438/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63b4d9d674854b1885bcc0cd458d694df71630ad65a46be2f43987f0d38042b8
3
+ size 14244
checkpoint-438/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1542c13060cbf11d63740168f06810d9c42ab089663f77ceda24a3db37e35a80
3
+ size 1000
checkpoint-438/sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 8192,
3
+ "do_lower_case": false
4
+ }
checkpoint-438/sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
checkpoint-438/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
checkpoint-438/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4f7e21bec3fb0044ca0bb2d50eb5d4d8c596273c422baef84466d2c73748b9c
3
+ size 17083053
checkpoint-438/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "extra_special_tokens": {},
49
+ "mask_token": "<mask>",
50
+ "model_max_length": 8192,
51
+ "pad_token": "<pad>",
52
+ "sep_token": "</s>",
53
+ "sp_model_kwargs": {},
54
+ "tokenizer_class": "XLMRobertaTokenizer",
55
+ "unk_token": "<unk>"
56
+ }
checkpoint-438/trainer_state.json ADDED
@@ -0,0 +1,669 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.6008230452674898,
5
+ "eval_steps": 110,
6
+ "global_step": 438,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.015089163237311385,
13
+ "grad_norm": 3.8793580532073975,
14
+ "learning_rate": 1.8281535648994517e-05,
15
+ "loss": 0.7558,
16
+ "step": 11
17
+ },
18
+ {
19
+ "epoch": 0.03017832647462277,
20
+ "grad_norm": 3.666482448577881,
21
+ "learning_rate": 3.839122486288849e-05,
22
+ "loss": 0.7776,
23
+ "step": 22
24
+ },
25
+ {
26
+ "epoch": 0.04526748971193416,
27
+ "grad_norm": 3.493319511413574,
28
+ "learning_rate": 5.850091407678245e-05,
29
+ "loss": 0.379,
30
+ "step": 33
31
+ },
32
+ {
33
+ "epoch": 0.06035665294924554,
34
+ "grad_norm": 2.435302972793579,
35
+ "learning_rate": 7.861060329067642e-05,
36
+ "loss": 0.4947,
37
+ "step": 44
38
+ },
39
+ {
40
+ "epoch": 0.07544581618655692,
41
+ "grad_norm": 2.2073612213134766,
42
+ "learning_rate": 9.872029250457039e-05,
43
+ "loss": 0.3406,
44
+ "step": 55
45
+ },
46
+ {
47
+ "epoch": 0.09053497942386832,
48
+ "grad_norm": 0.5107505917549133,
49
+ "learning_rate": 0.00011882998171846434,
50
+ "loss": 0.2391,
51
+ "step": 66
52
+ },
53
+ {
54
+ "epoch": 0.1056241426611797,
55
+ "grad_norm": 2.4110350608825684,
56
+ "learning_rate": 0.00013893967093235832,
57
+ "loss": 0.2154,
58
+ "step": 77
59
+ },
60
+ {
61
+ "epoch": 0.12071330589849108,
62
+ "grad_norm": 3.4400408267974854,
63
+ "learning_rate": 0.00015904936014625229,
64
+ "loss": 0.2662,
65
+ "step": 88
66
+ },
67
+ {
68
+ "epoch": 0.13580246913580246,
69
+ "grad_norm": 3.31318998336792,
70
+ "learning_rate": 0.00017915904936014626,
71
+ "loss": 0.283,
72
+ "step": 99
73
+ },
74
+ {
75
+ "epoch": 0.15089163237311384,
76
+ "grad_norm": 2.624321699142456,
77
+ "learning_rate": 0.00019926873857404023,
78
+ "loss": 0.2391,
79
+ "step": 110
80
+ },
81
+ {
82
+ "epoch": 0.15089163237311384,
83
+ "eval_Qnli-dev_cosine_accuracy": 0.7353515625,
84
+ "eval_Qnli-dev_cosine_accuracy_threshold": 0.641769528388977,
85
+ "eval_Qnli-dev_cosine_ap": 0.7934694922676566,
86
+ "eval_Qnli-dev_cosine_f1": 0.7255734919286321,
87
+ "eval_Qnli-dev_cosine_f1_threshold": 0.533623456954956,
88
+ "eval_Qnli-dev_cosine_precision": 0.6170520231213873,
89
+ "eval_Qnli-dev_cosine_recall": 0.8804123711340206,
90
+ "eval_allNLI-dev_cosine_accuracy": 0.7451171875,
91
+ "eval_allNLI-dev_cosine_accuracy_threshold": 0.7058684229850769,
92
+ "eval_allNLI-dev_cosine_ap": 0.6358738534384165,
93
+ "eval_allNLI-dev_cosine_f1": 0.6633039092055485,
94
+ "eval_allNLI-dev_cosine_f1_threshold": 0.6644865274429321,
95
+ "eval_allNLI-dev_cosine_precision": 0.579295154185022,
96
+ "eval_allNLI-dev_cosine_recall": 0.775811209439528,
97
+ "eval_sequential_score": 0.7934694922676566,
98
+ "eval_sts-test_pearson_cosine": 0.8508165029528609,
99
+ "eval_sts-test_spearman_cosine": 0.8665992028008191,
100
+ "eval_vitaminc-pairs_loss": 1.550615668296814,
101
+ "eval_vitaminc-pairs_runtime": 24.2459,
102
+ "eval_vitaminc-pairs_samples_per_second": 10.558,
103
+ "eval_vitaminc-pairs_steps_per_second": 0.041,
104
+ "step": 110
105
+ },
106
+ {
107
+ "epoch": 0.15089163237311384,
108
+ "eval_negation-triplets_loss": 0.8121126294136047,
109
+ "eval_negation-triplets_runtime": 4.2821,
110
+ "eval_negation-triplets_samples_per_second": 59.784,
111
+ "eval_negation-triplets_steps_per_second": 0.234,
112
+ "step": 110
113
+ },
114
+ {
115
+ "epoch": 0.15089163237311384,
116
+ "eval_scitail-pairs-pos_loss": 0.05080736428499222,
117
+ "eval_scitail-pairs-pos_runtime": 3.0909,
118
+ "eval_scitail-pairs-pos_samples_per_second": 82.824,
119
+ "eval_scitail-pairs-pos_steps_per_second": 0.324,
120
+ "step": 110
121
+ },
122
+ {
123
+ "epoch": 0.15089163237311384,
124
+ "eval_scitail-pairs-qa_loss": 0.011191274970769882,
125
+ "eval_scitail-pairs-qa_runtime": 2.3574,
126
+ "eval_scitail-pairs-qa_samples_per_second": 108.596,
127
+ "eval_scitail-pairs-qa_steps_per_second": 0.424,
128
+ "step": 110
129
+ },
130
+ {
131
+ "epoch": 0.15089163237311384,
132
+ "eval_xsum-pairs_loss": 0.2039160132408142,
133
+ "eval_xsum-pairs_runtime": 12.8416,
134
+ "eval_xsum-pairs_samples_per_second": 19.935,
135
+ "eval_xsum-pairs_steps_per_second": 0.078,
136
+ "step": 110
137
+ },
138
+ {
139
+ "epoch": 0.15089163237311384,
140
+ "eval_sciq_pairs_loss": 0.023365184664726257,
141
+ "eval_sciq_pairs_runtime": 20.6659,
142
+ "eval_sciq_pairs_samples_per_second": 12.388,
143
+ "eval_sciq_pairs_steps_per_second": 0.048,
144
+ "step": 110
145
+ },
146
+ {
147
+ "epoch": 0.15089163237311384,
148
+ "eval_qasc_pairs_loss": 0.558290421962738,
149
+ "eval_qasc_pairs_runtime": 3.009,
150
+ "eval_qasc_pairs_samples_per_second": 85.077,
151
+ "eval_qasc_pairs_steps_per_second": 0.332,
152
+ "step": 110
153
+ },
154
+ {
155
+ "epoch": 0.15089163237311384,
156
+ "eval_openbookqa_pairs_loss": 1.253723382949829,
157
+ "eval_openbookqa_pairs_runtime": 2.2457,
158
+ "eval_openbookqa_pairs_samples_per_second": 113.993,
159
+ "eval_openbookqa_pairs_steps_per_second": 0.445,
160
+ "step": 110
161
+ },
162
+ {
163
+ "epoch": 0.15089163237311384,
164
+ "eval_nq_pairs_loss": 0.10213108360767365,
165
+ "eval_nq_pairs_runtime": 18.094,
166
+ "eval_nq_pairs_samples_per_second": 14.148,
167
+ "eval_nq_pairs_steps_per_second": 0.055,
168
+ "step": 110
169
+ },
170
+ {
171
+ "epoch": 0.15089163237311384,
172
+ "eval_trivia_pairs_loss": 0.4372706711292267,
173
+ "eval_trivia_pairs_runtime": 16.9326,
174
+ "eval_trivia_pairs_samples_per_second": 15.119,
175
+ "eval_trivia_pairs_steps_per_second": 0.059,
176
+ "step": 110
177
+ },
178
+ {
179
+ "epoch": 0.15089163237311384,
180
+ "eval_gooaq_pairs_loss": 0.2727060914039612,
181
+ "eval_gooaq_pairs_runtime": 3.6277,
182
+ "eval_gooaq_pairs_samples_per_second": 70.568,
183
+ "eval_gooaq_pairs_steps_per_second": 0.276,
184
+ "step": 110
185
+ },
186
+ {
187
+ "epoch": 0.15089163237311384,
188
+ "eval_paws-pos_loss": 0.040396444499492645,
189
+ "eval_paws-pos_runtime": 2.9381,
190
+ "eval_paws-pos_samples_per_second": 87.132,
191
+ "eval_paws-pos_steps_per_second": 0.34,
192
+ "step": 110
193
+ },
194
+ {
195
+ "epoch": 0.15089163237311384,
196
+ "eval_global_dataset_loss": 0.1820984184741974,
197
+ "eval_global_dataset_runtime": 125.361,
198
+ "eval_global_dataset_samples_per_second": 9.764,
199
+ "eval_global_dataset_steps_per_second": 0.04,
200
+ "step": 110
201
+ },
202
+ {
203
+ "epoch": 0.16598079561042525,
204
+ "grad_norm": 3.345057725906372,
205
+ "learning_rate": 0.00021937842778793417,
206
+ "loss": 0.2456,
207
+ "step": 121
208
+ },
209
+ {
210
+ "epoch": 0.18106995884773663,
211
+ "grad_norm": 2.2555935382843018,
212
+ "learning_rate": 0.00023948811700182814,
213
+ "loss": 0.4199,
214
+ "step": 132
215
+ },
216
+ {
217
+ "epoch": 0.19615912208504802,
218
+ "grad_norm": 2.3139288425445557,
219
+ "learning_rate": 0.0002595978062157221,
220
+ "loss": 0.2809,
221
+ "step": 143
222
+ },
223
+ {
224
+ "epoch": 0.2112482853223594,
225
+ "grad_norm": 3.585463047027588,
226
+ "learning_rate": 0.0002797074954296161,
227
+ "loss": 0.5773,
228
+ "step": 154
229
+ },
230
+ {
231
+ "epoch": 0.22633744855967078,
232
+ "grad_norm": 2.4281251430511475,
233
+ "learning_rate": 0.00029981718464351003,
234
+ "loss": 0.3734,
235
+ "step": 165
236
+ },
237
+ {
238
+ "epoch": 0.24142661179698216,
239
+ "grad_norm": 0.2383209466934204,
240
+ "learning_rate": 0.000319926873857404,
241
+ "loss": 0.2348,
242
+ "step": 176
243
+ },
244
+ {
245
+ "epoch": 0.25651577503429357,
246
+ "grad_norm": 2.4634456634521484,
247
+ "learning_rate": 0.00034003656307129797,
248
+ "loss": 0.4421,
249
+ "step": 187
250
+ },
251
+ {
252
+ "epoch": 0.2716049382716049,
253
+ "grad_norm": 3.1270384788513184,
254
+ "learning_rate": 0.00036014625228519197,
255
+ "loss": 0.5076,
256
+ "step": 198
257
+ },
258
+ {
259
+ "epoch": 0.28669410150891633,
260
+ "grad_norm": 0.7871516346931458,
261
+ "learning_rate": 0.0003802559414990859,
262
+ "loss": 0.211,
263
+ "step": 209
264
+ },
265
+ {
266
+ "epoch": 0.3017832647462277,
267
+ "grad_norm": 2.159247636795044,
268
+ "learning_rate": 0.00040036563071297986,
269
+ "loss": 0.3514,
270
+ "step": 220
271
+ },
272
+ {
273
+ "epoch": 0.3017832647462277,
274
+ "eval_Qnli-dev_cosine_accuracy": 0.736328125,
275
+ "eval_Qnli-dev_cosine_accuracy_threshold": 0.6637322902679443,
276
+ "eval_Qnli-dev_cosine_ap": 0.7960534826633536,
277
+ "eval_Qnli-dev_cosine_f1": 0.7298050139275766,
278
+ "eval_Qnli-dev_cosine_f1_threshold": 0.5937396287918091,
279
+ "eval_Qnli-dev_cosine_precision": 0.6638513513513513,
280
+ "eval_Qnli-dev_cosine_recall": 0.8103092783505155,
281
+ "eval_allNLI-dev_cosine_accuracy": 0.755859375,
282
+ "eval_allNLI-dev_cosine_accuracy_threshold": 0.728554904460907,
283
+ "eval_allNLI-dev_cosine_ap": 0.6433680273177467,
284
+ "eval_allNLI-dev_cosine_f1": 0.661818181818182,
285
+ "eval_allNLI-dev_cosine_f1_threshold": 0.6872978210449219,
286
+ "eval_allNLI-dev_cosine_precision": 0.5617283950617284,
287
+ "eval_allNLI-dev_cosine_recall": 0.8053097345132744,
288
+ "eval_sequential_score": 0.7960534826633536,
289
+ "eval_sts-test_pearson_cosine": 0.8381104761555598,
290
+ "eval_sts-test_spearman_cosine": 0.8624525294470655,
291
+ "eval_vitaminc-pairs_loss": 1.5675894021987915,
292
+ "eval_vitaminc-pairs_runtime": 24.2649,
293
+ "eval_vitaminc-pairs_samples_per_second": 10.55,
294
+ "eval_vitaminc-pairs_steps_per_second": 0.041,
295
+ "step": 220
296
+ },
297
+ {
298
+ "epoch": 0.3017832647462277,
299
+ "eval_negation-triplets_loss": 0.923573911190033,
300
+ "eval_negation-triplets_runtime": 4.311,
301
+ "eval_negation-triplets_samples_per_second": 59.384,
302
+ "eval_negation-triplets_steps_per_second": 0.232,
303
+ "step": 220
304
+ },
305
+ {
306
+ "epoch": 0.3017832647462277,
307
+ "eval_scitail-pairs-pos_loss": 0.0530293844640255,
308
+ "eval_scitail-pairs-pos_runtime": 3.1182,
309
+ "eval_scitail-pairs-pos_samples_per_second": 82.098,
310
+ "eval_scitail-pairs-pos_steps_per_second": 0.321,
311
+ "step": 220
312
+ },
313
+ {
314
+ "epoch": 0.3017832647462277,
315
+ "eval_scitail-pairs-qa_loss": 0.008582310751080513,
316
+ "eval_scitail-pairs-qa_runtime": 2.402,
317
+ "eval_scitail-pairs-qa_samples_per_second": 106.58,
318
+ "eval_scitail-pairs-qa_steps_per_second": 0.416,
319
+ "step": 220
320
+ },
321
+ {
322
+ "epoch": 0.3017832647462277,
323
+ "eval_xsum-pairs_loss": 0.19049452245235443,
324
+ "eval_xsum-pairs_runtime": 12.8763,
325
+ "eval_xsum-pairs_samples_per_second": 19.882,
326
+ "eval_xsum-pairs_steps_per_second": 0.078,
327
+ "step": 220
328
+ },
329
+ {
330
+ "epoch": 0.3017832647462277,
331
+ "eval_sciq_pairs_loss": 0.02437273971736431,
332
+ "eval_sciq_pairs_runtime": 20.6537,
333
+ "eval_sciq_pairs_samples_per_second": 12.395,
334
+ "eval_sciq_pairs_steps_per_second": 0.048,
335
+ "step": 220
336
+ },
337
+ {
338
+ "epoch": 0.3017832647462277,
339
+ "eval_qasc_pairs_loss": 0.6957117915153503,
340
+ "eval_qasc_pairs_runtime": 3.0168,
341
+ "eval_qasc_pairs_samples_per_second": 84.858,
342
+ "eval_qasc_pairs_steps_per_second": 0.331,
343
+ "step": 220
344
+ },
345
+ {
346
+ "epoch": 0.3017832647462277,
347
+ "eval_openbookqa_pairs_loss": 1.2585959434509277,
348
+ "eval_openbookqa_pairs_runtime": 2.2495,
349
+ "eval_openbookqa_pairs_samples_per_second": 113.801,
350
+ "eval_openbookqa_pairs_steps_per_second": 0.445,
351
+ "step": 220
352
+ },
353
+ {
354
+ "epoch": 0.3017832647462277,
355
+ "eval_nq_pairs_loss": 0.12953564524650574,
356
+ "eval_nq_pairs_runtime": 18.127,
357
+ "eval_nq_pairs_samples_per_second": 14.123,
358
+ "eval_nq_pairs_steps_per_second": 0.055,
359
+ "step": 220
360
+ },
361
+ {
362
+ "epoch": 0.3017832647462277,
363
+ "eval_trivia_pairs_loss": 0.46085307002067566,
364
+ "eval_trivia_pairs_runtime": 16.9635,
365
+ "eval_trivia_pairs_samples_per_second": 15.091,
366
+ "eval_trivia_pairs_steps_per_second": 0.059,
367
+ "step": 220
368
+ },
369
+ {
370
+ "epoch": 0.3017832647462277,
371
+ "eval_gooaq_pairs_loss": 0.2918424606323242,
372
+ "eval_gooaq_pairs_runtime": 3.6275,
373
+ "eval_gooaq_pairs_samples_per_second": 70.573,
374
+ "eval_gooaq_pairs_steps_per_second": 0.276,
375
+ "step": 220
376
+ },
377
+ {
378
+ "epoch": 0.3017832647462277,
379
+ "eval_paws-pos_loss": 0.041661862283945084,
380
+ "eval_paws-pos_runtime": 2.9518,
381
+ "eval_paws-pos_samples_per_second": 86.727,
382
+ "eval_paws-pos_steps_per_second": 0.339,
383
+ "step": 220
384
+ },
385
+ {
386
+ "epoch": 0.3017832647462277,
387
+ "eval_global_dataset_loss": 0.3610426187515259,
388
+ "eval_global_dataset_runtime": 125.4174,
389
+ "eval_global_dataset_samples_per_second": 9.759,
390
+ "eval_global_dataset_steps_per_second": 0.04,
391
+ "step": 220
392
+ },
393
+ {
394
+ "epoch": 0.3168724279835391,
395
+ "grad_norm": 2.7020387649536133,
396
+ "learning_rate": 0.00042047531992687385,
397
+ "loss": 0.2981,
398
+ "step": 231
399
+ },
400
+ {
401
+ "epoch": 0.3319615912208505,
402
+ "grad_norm": 2.0000767707824707,
403
+ "learning_rate": 0.0004405850091407678,
404
+ "loss": 0.3045,
405
+ "step": 242
406
+ },
407
+ {
408
+ "epoch": 0.34705075445816186,
409
+ "grad_norm": 2.539660692214966,
410
+ "learning_rate": 0.00046069469835466185,
411
+ "loss": 0.3126,
412
+ "step": 253
413
+ },
414
+ {
415
+ "epoch": 0.36213991769547327,
416
+ "grad_norm": 2.418445348739624,
417
+ "learning_rate": 0.0004808043875685558,
418
+ "loss": 0.7813,
419
+ "step": 264
420
+ },
421
+ {
422
+ "epoch": 0.3772290809327846,
423
+ "grad_norm": 0.13016735017299652,
424
+ "learning_rate": 0.0005009140767824497,
425
+ "loss": 0.547,
426
+ "step": 275
427
+ },
428
+ {
429
+ "epoch": 0.39231824417009603,
430
+ "grad_norm": 2.2323102951049805,
431
+ "learning_rate": 0.0005210237659963437,
432
+ "loss": 0.4698,
433
+ "step": 286
434
+ },
435
+ {
436
+ "epoch": 0.4074074074074074,
437
+ "grad_norm": 3.097975492477417,
438
+ "learning_rate": 0.0005411334552102377,
439
+ "loss": 0.5427,
440
+ "step": 297
441
+ },
442
+ {
443
+ "epoch": 0.4224965706447188,
444
+ "grad_norm": 0.7084994316101074,
445
+ "learning_rate": 0.0005612431444241316,
446
+ "loss": 0.3151,
447
+ "step": 308
448
+ },
449
+ {
450
+ "epoch": 0.4375857338820302,
451
+ "grad_norm": 1.7643369436264038,
452
+ "learning_rate": 0.0005813528336380256,
453
+ "loss": 0.4687,
454
+ "step": 319
455
+ },
456
+ {
457
+ "epoch": 0.45267489711934156,
458
+ "grad_norm": 1.7608978748321533,
459
+ "learning_rate": 0.0006014625228519196,
460
+ "loss": 0.3769,
461
+ "step": 330
462
+ },
463
+ {
464
+ "epoch": 0.45267489711934156,
465
+ "eval_Qnli-dev_cosine_accuracy": 0.7255859375,
466
+ "eval_Qnli-dev_cosine_accuracy_threshold": 0.6892818212509155,
467
+ "eval_Qnli-dev_cosine_ap": 0.7884120709809157,
468
+ "eval_Qnli-dev_cosine_f1": 0.7228464419475655,
469
+ "eval_Qnli-dev_cosine_f1_threshold": 0.6395477056503296,
470
+ "eval_Qnli-dev_cosine_precision": 0.6620926243567753,
471
+ "eval_Qnli-dev_cosine_recall": 0.7958762886597938,
472
+ "eval_allNLI-dev_cosine_accuracy": 0.7421875,
473
+ "eval_allNLI-dev_cosine_accuracy_threshold": 0.7702663540840149,
474
+ "eval_allNLI-dev_cosine_ap": 0.6347046378974335,
475
+ "eval_allNLI-dev_cosine_f1": 0.6455542021924483,
476
+ "eval_allNLI-dev_cosine_f1_threshold": 0.7014378309249878,
477
+ "eval_allNLI-dev_cosine_precision": 0.549792531120332,
478
+ "eval_allNLI-dev_cosine_recall": 0.7817109144542773,
479
+ "eval_sequential_score": 0.7884120709809157,
480
+ "eval_sts-test_pearson_cosine": 0.8389808770066287,
481
+ "eval_sts-test_spearman_cosine": 0.861226668384837,
482
+ "eval_vitaminc-pairs_loss": 1.5675371885299683,
483
+ "eval_vitaminc-pairs_runtime": 24.2072,
484
+ "eval_vitaminc-pairs_samples_per_second": 10.575,
485
+ "eval_vitaminc-pairs_steps_per_second": 0.041,
486
+ "step": 330
487
+ },
488
+ {
489
+ "epoch": 0.45267489711934156,
490
+ "eval_negation-triplets_loss": 0.7654371857643127,
491
+ "eval_negation-triplets_runtime": 4.2588,
492
+ "eval_negation-triplets_samples_per_second": 60.111,
493
+ "eval_negation-triplets_steps_per_second": 0.235,
494
+ "step": 330
495
+ },
496
+ {
497
+ "epoch": 0.45267489711934156,
498
+ "eval_scitail-pairs-pos_loss": 0.04239173233509064,
499
+ "eval_scitail-pairs-pos_runtime": 3.0875,
500
+ "eval_scitail-pairs-pos_samples_per_second": 82.915,
501
+ "eval_scitail-pairs-pos_steps_per_second": 0.324,
502
+ "step": 330
503
+ },
504
+ {
505
+ "epoch": 0.45267489711934156,
506
+ "eval_scitail-pairs-qa_loss": 0.010665436275303364,
507
+ "eval_scitail-pairs-qa_runtime": 2.3433,
508
+ "eval_scitail-pairs-qa_samples_per_second": 109.246,
509
+ "eval_scitail-pairs-qa_steps_per_second": 0.427,
510
+ "step": 330
511
+ },
512
+ {
513
+ "epoch": 0.45267489711934156,
514
+ "eval_xsum-pairs_loss": 0.20898626744747162,
515
+ "eval_xsum-pairs_runtime": 12.8471,
516
+ "eval_xsum-pairs_samples_per_second": 19.927,
517
+ "eval_xsum-pairs_steps_per_second": 0.078,
518
+ "step": 330
519
+ },
520
+ {
521
+ "epoch": 0.45267489711934156,
522
+ "eval_sciq_pairs_loss": 0.03412973880767822,
523
+ "eval_sciq_pairs_runtime": 20.7279,
524
+ "eval_sciq_pairs_samples_per_second": 12.351,
525
+ "eval_sciq_pairs_steps_per_second": 0.048,
526
+ "step": 330
527
+ },
528
+ {
529
+ "epoch": 0.45267489711934156,
530
+ "eval_qasc_pairs_loss": 0.7678776383399963,
531
+ "eval_qasc_pairs_runtime": 3.0154,
532
+ "eval_qasc_pairs_samples_per_second": 84.896,
533
+ "eval_qasc_pairs_steps_per_second": 0.332,
534
+ "step": 330
535
+ },
536
+ {
537
+ "epoch": 0.45267489711934156,
538
+ "eval_openbookqa_pairs_loss": 1.3723315000534058,
539
+ "eval_openbookqa_pairs_runtime": 2.2474,
540
+ "eval_openbookqa_pairs_samples_per_second": 113.91,
541
+ "eval_openbookqa_pairs_steps_per_second": 0.445,
542
+ "step": 330
543
+ },
544
+ {
545
+ "epoch": 0.45267489711934156,
546
+ "eval_nq_pairs_loss": 0.15752817690372467,
547
+ "eval_nq_pairs_runtime": 18.1484,
548
+ "eval_nq_pairs_samples_per_second": 14.106,
549
+ "eval_nq_pairs_steps_per_second": 0.055,
550
+ "step": 330
551
+ },
552
+ {
553
+ "epoch": 0.45267489711934156,
554
+ "eval_trivia_pairs_loss": 0.6312745213508606,
555
+ "eval_trivia_pairs_runtime": 16.9738,
556
+ "eval_trivia_pairs_samples_per_second": 15.082,
557
+ "eval_trivia_pairs_steps_per_second": 0.059,
558
+ "step": 330
559
+ },
560
+ {
561
+ "epoch": 0.45267489711934156,
562
+ "eval_gooaq_pairs_loss": 0.47655048966407776,
563
+ "eval_gooaq_pairs_runtime": 3.6272,
564
+ "eval_gooaq_pairs_samples_per_second": 70.579,
565
+ "eval_gooaq_pairs_steps_per_second": 0.276,
566
+ "step": 330
567
+ },
568
+ {
569
+ "epoch": 0.45267489711934156,
570
+ "eval_paws-pos_loss": 0.04226630926132202,
571
+ "eval_paws-pos_runtime": 2.9441,
572
+ "eval_paws-pos_samples_per_second": 86.954,
573
+ "eval_paws-pos_steps_per_second": 0.34,
574
+ "step": 330
575
+ },
576
+ {
577
+ "epoch": 0.45267489711934156,
578
+ "eval_global_dataset_loss": 0.23270446062088013,
579
+ "eval_global_dataset_runtime": 125.3499,
580
+ "eval_global_dataset_samples_per_second": 9.765,
581
+ "eval_global_dataset_steps_per_second": 0.04,
582
+ "step": 330
583
+ },
584
+ {
585
+ "epoch": 0.46776406035665297,
586
+ "grad_norm": 0.37502893805503845,
587
+ "learning_rate": 0.0006215722120658135,
588
+ "loss": 0.3832,
589
+ "step": 341
590
+ },
591
+ {
592
+ "epoch": 0.4828532235939643,
593
+ "grad_norm": 2.488353967666626,
594
+ "learning_rate": 0.0006416819012797075,
595
+ "loss": 0.4392,
596
+ "step": 352
597
+ },
598
+ {
599
+ "epoch": 0.49794238683127573,
600
+ "grad_norm": 0.0037633629981428385,
601
+ "learning_rate": 0.0006617915904936015,
602
+ "loss": 0.3929,
603
+ "step": 363
604
+ },
605
+ {
606
+ "epoch": 0.5130315500685871,
607
+ "grad_norm": 0.467970073223114,
608
+ "learning_rate": 0.0006819012797074955,
609
+ "loss": 0.2611,
610
+ "step": 374
611
+ },
612
+ {
613
+ "epoch": 0.5281207133058985,
614
+ "grad_norm": 2.020796537399292,
615
+ "learning_rate": 0.0007020109689213894,
616
+ "loss": 0.3528,
617
+ "step": 385
618
+ },
619
+ {
620
+ "epoch": 0.5432098765432098,
621
+ "grad_norm": 2.0242817401885986,
622
+ "learning_rate": 0.0007221206581352834,
623
+ "loss": 0.738,
624
+ "step": 396
625
+ },
626
+ {
627
+ "epoch": 0.5582990397805213,
628
+ "grad_norm": 2.9996001720428467,
629
+ "learning_rate": 0.0007422303473491774,
630
+ "loss": 0.4016,
631
+ "step": 407
632
+ },
633
+ {
634
+ "epoch": 0.5733882030178327,
635
+ "grad_norm": 1.8861972093582153,
636
+ "learning_rate": 0.0007623400365630713,
637
+ "loss": 0.3589,
638
+ "step": 418
639
+ },
640
+ {
641
+ "epoch": 0.588477366255144,
642
+ "grad_norm": 0.24432632327079773,
643
+ "learning_rate": 0.0007824497257769653,
644
+ "loss": 0.3057,
645
+ "step": 429
646
+ }
647
+ ],
648
+ "logging_steps": 11,
649
+ "max_steps": 2187,
650
+ "num_input_tokens_seen": 0,
651
+ "num_train_epochs": 3,
652
+ "save_steps": 219,
653
+ "stateful_callbacks": {
654
+ "TrainerControl": {
655
+ "args": {
656
+ "should_epoch_stop": false,
657
+ "should_evaluate": false,
658
+ "should_log": false,
659
+ "should_save": true,
660
+ "should_training_stop": false
661
+ },
662
+ "attributes": {}
663
+ }
664
+ },
665
+ "total_flos": 0.0,
666
+ "train_batch_size": 128,
667
+ "trial_name": null,
668
+ "trial_params": null
669
+ }
checkpoint-438/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc4e2f1b0b6ec0b954c31a7a2d7392e2f2f817d60f3db0f6f0a80bdf431c25bc
3
+ size 5880