brettbbb commited on
Commit
9c4b5cb
1 Parent(s): e06c1c7

End of training

Browse files
README.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama2
3
+ base_model: lmsys/vicuna-7b-v1.5
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: cs_cot_32
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # cs_cot_32
15
+
16
+ This model is a fine-tuned version of [lmsys/vicuna-7b-v1.5](https://huggingface.co/lmsys/vicuna-7b-v1.5) on an unknown dataset.
17
+
18
+ ## Model description
19
+
20
+ More information needed
21
+
22
+ ## Intended uses & limitations
23
+
24
+ More information needed
25
+
26
+ ## Training and evaluation data
27
+
28
+ More information needed
29
+
30
+ ## Training procedure
31
+
32
+ ### Training hyperparameters
33
+
34
+ The following hyperparameters were used during training:
35
+ - learning_rate: 0.0001
36
+ - train_batch_size: 4
37
+ - eval_batch_size: 8
38
+ - seed: 42
39
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
40
+ - lr_scheduler_type: linear
41
+ - lr_scheduler_warmup_steps: 5
42
+ - num_epochs: 20
43
+ - mixed_precision_training: Native AMP
44
+
45
+ ### Training results
46
+
47
+
48
+
49
+ ### Framework versions
50
+
51
+ - Transformers 4.36.0.dev0
52
+ - Pytorch 2.1.0+cu121
53
+ - Datasets 2.13.1
54
+ - Tokenizers 0.14.1
adapter_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "lmsys/vicuna-7b-v1.5",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 64,
12
+ "lora_dropout": 0.1,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 16,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "q_proj",
20
+ "gate_proj",
21
+ "up_proj",
22
+ "down_proj",
23
+ "k_proj",
24
+ "v_proj",
25
+ "o_proj"
26
+ ],
27
+ "task_type": "CAUSAL_LM"
28
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17af2117ec8742464ad725e49fa3ceeb13886b570b4592d6979b299bc9e55305
3
+ size 160069834
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0899289ca8b47dfa6a853be5d9934f1ed627571acb1a1be08c3fc12a5459394
3
+ size 159967880
all_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "train_loss": 0.4629385274136439,
4
+ "train_runtime": 260.0227,
5
+ "train_samples_per_second": 2.461,
6
+ "train_steps_per_second": 0.615
7
+ }
args.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model_name: lmsys/vicuna-7b-v1.5
2
+ batch_size: 4
3
+ cot: true
4
+ dataset_name: BENBENBENb/CommonsenseQA1000COT
5
+ epochs: 20
6
+ eval_strategy: epoch
7
+ evaluation: false
8
+ learning_rate: 0.0001
9
+ logging_steps: 1
10
+ output_dir: brettbbb/cs_cot_32
11
+ seed: 42
12
+ skip_example: true
13
+ train_size: 32
14
+ warmup_steps: 5
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "train_loss": 0.4629385274136439,
4
+ "train_runtime": 260.0227,
5
+ "train_samples_per_second": 2.461,
6
+ "train_steps_per_second": 0.615
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,988 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 20.0,
5
+ "eval_steps": 500,
6
+ "global_step": 160,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.12,
13
+ "learning_rate": 2e-05,
14
+ "loss": 2.6536,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.25,
19
+ "learning_rate": 4e-05,
20
+ "loss": 3.3995,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 0.38,
25
+ "learning_rate": 6e-05,
26
+ "loss": 4.281,
27
+ "step": 3
28
+ },
29
+ {
30
+ "epoch": 0.5,
31
+ "learning_rate": 8e-05,
32
+ "loss": 2.4605,
33
+ "step": 4
34
+ },
35
+ {
36
+ "epoch": 0.62,
37
+ "learning_rate": 0.0001,
38
+ "loss": 2.538,
39
+ "step": 5
40
+ },
41
+ {
42
+ "epoch": 0.75,
43
+ "learning_rate": 9.935483870967742e-05,
44
+ "loss": 2.629,
45
+ "step": 6
46
+ },
47
+ {
48
+ "epoch": 0.88,
49
+ "learning_rate": 9.870967741935484e-05,
50
+ "loss": 2.1875,
51
+ "step": 7
52
+ },
53
+ {
54
+ "epoch": 1.0,
55
+ "learning_rate": 9.806451612903226e-05,
56
+ "loss": 1.9768,
57
+ "step": 8
58
+ },
59
+ {
60
+ "epoch": 1.12,
61
+ "learning_rate": 9.741935483870968e-05,
62
+ "loss": 2.0199,
63
+ "step": 9
64
+ },
65
+ {
66
+ "epoch": 1.25,
67
+ "learning_rate": 9.677419354838711e-05,
68
+ "loss": 2.0266,
69
+ "step": 10
70
+ },
71
+ {
72
+ "epoch": 1.38,
73
+ "learning_rate": 9.612903225806452e-05,
74
+ "loss": 1.7383,
75
+ "step": 11
76
+ },
77
+ {
78
+ "epoch": 1.5,
79
+ "learning_rate": 9.548387096774195e-05,
80
+ "loss": 1.7494,
81
+ "step": 12
82
+ },
83
+ {
84
+ "epoch": 1.62,
85
+ "learning_rate": 9.483870967741936e-05,
86
+ "loss": 1.8509,
87
+ "step": 13
88
+ },
89
+ {
90
+ "epoch": 1.75,
91
+ "learning_rate": 9.419354838709677e-05,
92
+ "loss": 1.8364,
93
+ "step": 14
94
+ },
95
+ {
96
+ "epoch": 1.88,
97
+ "learning_rate": 9.35483870967742e-05,
98
+ "loss": 1.6808,
99
+ "step": 15
100
+ },
101
+ {
102
+ "epoch": 2.0,
103
+ "learning_rate": 9.290322580645162e-05,
104
+ "loss": 1.5642,
105
+ "step": 16
106
+ },
107
+ {
108
+ "epoch": 2.12,
109
+ "learning_rate": 9.225806451612904e-05,
110
+ "loss": 1.6336,
111
+ "step": 17
112
+ },
113
+ {
114
+ "epoch": 2.25,
115
+ "learning_rate": 9.161290322580646e-05,
116
+ "loss": 1.6452,
117
+ "step": 18
118
+ },
119
+ {
120
+ "epoch": 2.38,
121
+ "learning_rate": 9.096774193548387e-05,
122
+ "loss": 1.1714,
123
+ "step": 19
124
+ },
125
+ {
126
+ "epoch": 2.5,
127
+ "learning_rate": 9.096774193548387e-05,
128
+ "loss": 1.9018,
129
+ "step": 20
130
+ },
131
+ {
132
+ "epoch": 2.62,
133
+ "learning_rate": 9.032258064516129e-05,
134
+ "loss": 1.6711,
135
+ "step": 21
136
+ },
137
+ {
138
+ "epoch": 2.75,
139
+ "learning_rate": 8.967741935483871e-05,
140
+ "loss": 1.1646,
141
+ "step": 22
142
+ },
143
+ {
144
+ "epoch": 2.88,
145
+ "learning_rate": 8.903225806451614e-05,
146
+ "loss": 1.0608,
147
+ "step": 23
148
+ },
149
+ {
150
+ "epoch": 3.0,
151
+ "learning_rate": 8.838709677419355e-05,
152
+ "loss": 1.2916,
153
+ "step": 24
154
+ },
155
+ {
156
+ "epoch": 3.12,
157
+ "learning_rate": 8.774193548387098e-05,
158
+ "loss": 0.9801,
159
+ "step": 25
160
+ },
161
+ {
162
+ "epoch": 3.25,
163
+ "learning_rate": 8.709677419354839e-05,
164
+ "loss": 1.0964,
165
+ "step": 26
166
+ },
167
+ {
168
+ "epoch": 3.38,
169
+ "learning_rate": 8.645161290322581e-05,
170
+ "loss": 1.1112,
171
+ "step": 27
172
+ },
173
+ {
174
+ "epoch": 3.5,
175
+ "learning_rate": 8.580645161290323e-05,
176
+ "loss": 1.2538,
177
+ "step": 28
178
+ },
179
+ {
180
+ "epoch": 3.62,
181
+ "learning_rate": 8.516129032258064e-05,
182
+ "loss": 1.0811,
183
+ "step": 29
184
+ },
185
+ {
186
+ "epoch": 3.75,
187
+ "learning_rate": 8.451612903225808e-05,
188
+ "loss": 0.9926,
189
+ "step": 30
190
+ },
191
+ {
192
+ "epoch": 3.88,
193
+ "learning_rate": 8.387096774193549e-05,
194
+ "loss": 1.3067,
195
+ "step": 31
196
+ },
197
+ {
198
+ "epoch": 4.0,
199
+ "learning_rate": 8.32258064516129e-05,
200
+ "loss": 0.9473,
201
+ "step": 32
202
+ },
203
+ {
204
+ "epoch": 4.12,
205
+ "learning_rate": 8.258064516129033e-05,
206
+ "loss": 0.8545,
207
+ "step": 33
208
+ },
209
+ {
210
+ "epoch": 4.25,
211
+ "learning_rate": 8.193548387096774e-05,
212
+ "loss": 0.704,
213
+ "step": 34
214
+ },
215
+ {
216
+ "epoch": 4.38,
217
+ "learning_rate": 8.129032258064517e-05,
218
+ "loss": 0.8111,
219
+ "step": 35
220
+ },
221
+ {
222
+ "epoch": 4.5,
223
+ "learning_rate": 8.064516129032258e-05,
224
+ "loss": 0.9651,
225
+ "step": 36
226
+ },
227
+ {
228
+ "epoch": 4.62,
229
+ "learning_rate": 8e-05,
230
+ "loss": 0.6063,
231
+ "step": 37
232
+ },
233
+ {
234
+ "epoch": 4.75,
235
+ "learning_rate": 7.935483870967743e-05,
236
+ "loss": 0.5452,
237
+ "step": 38
238
+ },
239
+ {
240
+ "epoch": 4.88,
241
+ "learning_rate": 7.870967741935484e-05,
242
+ "loss": 0.8213,
243
+ "step": 39
244
+ },
245
+ {
246
+ "epoch": 5.0,
247
+ "learning_rate": 7.806451612903226e-05,
248
+ "loss": 0.7832,
249
+ "step": 40
250
+ },
251
+ {
252
+ "epoch": 5.12,
253
+ "learning_rate": 7.741935483870968e-05,
254
+ "loss": 0.3778,
255
+ "step": 41
256
+ },
257
+ {
258
+ "epoch": 5.25,
259
+ "learning_rate": 7.67741935483871e-05,
260
+ "loss": 0.3637,
261
+ "step": 42
262
+ },
263
+ {
264
+ "epoch": 5.38,
265
+ "learning_rate": 7.612903225806451e-05,
266
+ "loss": 0.6275,
267
+ "step": 43
268
+ },
269
+ {
270
+ "epoch": 5.5,
271
+ "learning_rate": 7.548387096774195e-05,
272
+ "loss": 0.6516,
273
+ "step": 44
274
+ },
275
+ {
276
+ "epoch": 5.62,
277
+ "learning_rate": 7.483870967741936e-05,
278
+ "loss": 0.3812,
279
+ "step": 45
280
+ },
281
+ {
282
+ "epoch": 5.75,
283
+ "learning_rate": 7.419354838709677e-05,
284
+ "loss": 0.3557,
285
+ "step": 46
286
+ },
287
+ {
288
+ "epoch": 5.88,
289
+ "learning_rate": 7.35483870967742e-05,
290
+ "loss": 0.2963,
291
+ "step": 47
292
+ },
293
+ {
294
+ "epoch": 6.0,
295
+ "learning_rate": 7.290322580645161e-05,
296
+ "loss": 0.2674,
297
+ "step": 48
298
+ },
299
+ {
300
+ "epoch": 6.12,
301
+ "learning_rate": 7.225806451612904e-05,
302
+ "loss": 0.1111,
303
+ "step": 49
304
+ },
305
+ {
306
+ "epoch": 6.25,
307
+ "learning_rate": 7.161290322580646e-05,
308
+ "loss": 0.1296,
309
+ "step": 50
310
+ },
311
+ {
312
+ "epoch": 6.38,
313
+ "learning_rate": 7.096774193548388e-05,
314
+ "loss": 0.1281,
315
+ "step": 51
316
+ },
317
+ {
318
+ "epoch": 6.5,
319
+ "learning_rate": 7.03225806451613e-05,
320
+ "loss": 0.238,
321
+ "step": 52
322
+ },
323
+ {
324
+ "epoch": 6.62,
325
+ "learning_rate": 6.967741935483871e-05,
326
+ "loss": 0.1803,
327
+ "step": 53
328
+ },
329
+ {
330
+ "epoch": 6.75,
331
+ "learning_rate": 6.903225806451613e-05,
332
+ "loss": 0.1911,
333
+ "step": 54
334
+ },
335
+ {
336
+ "epoch": 6.88,
337
+ "learning_rate": 6.838709677419355e-05,
338
+ "loss": 0.2073,
339
+ "step": 55
340
+ },
341
+ {
342
+ "epoch": 7.0,
343
+ "learning_rate": 6.774193548387096e-05,
344
+ "loss": 0.2415,
345
+ "step": 56
346
+ },
347
+ {
348
+ "epoch": 7.12,
349
+ "learning_rate": 6.709677419354839e-05,
350
+ "loss": 0.1104,
351
+ "step": 57
352
+ },
353
+ {
354
+ "epoch": 7.25,
355
+ "learning_rate": 6.645161290322582e-05,
356
+ "loss": 0.1154,
357
+ "step": 58
358
+ },
359
+ {
360
+ "epoch": 7.38,
361
+ "learning_rate": 6.580645161290323e-05,
362
+ "loss": 0.0931,
363
+ "step": 59
364
+ },
365
+ {
366
+ "epoch": 7.5,
367
+ "learning_rate": 6.516129032258065e-05,
368
+ "loss": 0.0671,
369
+ "step": 60
370
+ },
371
+ {
372
+ "epoch": 7.62,
373
+ "learning_rate": 6.451612903225807e-05,
374
+ "loss": 0.1003,
375
+ "step": 61
376
+ },
377
+ {
378
+ "epoch": 7.75,
379
+ "learning_rate": 6.387096774193548e-05,
380
+ "loss": 0.1646,
381
+ "step": 62
382
+ },
383
+ {
384
+ "epoch": 7.88,
385
+ "learning_rate": 6.32258064516129e-05,
386
+ "loss": 0.1203,
387
+ "step": 63
388
+ },
389
+ {
390
+ "epoch": 8.0,
391
+ "learning_rate": 6.258064516129033e-05,
392
+ "loss": 0.1888,
393
+ "step": 64
394
+ },
395
+ {
396
+ "epoch": 8.12,
397
+ "learning_rate": 6.193548387096774e-05,
398
+ "loss": 0.0798,
399
+ "step": 65
400
+ },
401
+ {
402
+ "epoch": 8.25,
403
+ "learning_rate": 6.129032258064517e-05,
404
+ "loss": 0.0921,
405
+ "step": 66
406
+ },
407
+ {
408
+ "epoch": 8.38,
409
+ "learning_rate": 6.064516129032258e-05,
410
+ "loss": 0.1284,
411
+ "step": 67
412
+ },
413
+ {
414
+ "epoch": 8.5,
415
+ "learning_rate": 6e-05,
416
+ "loss": 0.1008,
417
+ "step": 68
418
+ },
419
+ {
420
+ "epoch": 8.62,
421
+ "learning_rate": 5.935483870967742e-05,
422
+ "loss": 0.0732,
423
+ "step": 69
424
+ },
425
+ {
426
+ "epoch": 8.75,
427
+ "learning_rate": 5.870967741935483e-05,
428
+ "loss": 0.0628,
429
+ "step": 70
430
+ },
431
+ {
432
+ "epoch": 8.88,
433
+ "learning_rate": 5.8064516129032266e-05,
434
+ "loss": 0.0694,
435
+ "step": 71
436
+ },
437
+ {
438
+ "epoch": 9.0,
439
+ "learning_rate": 5.7419354838709685e-05,
440
+ "loss": 0.0856,
441
+ "step": 72
442
+ },
443
+ {
444
+ "epoch": 9.12,
445
+ "learning_rate": 5.67741935483871e-05,
446
+ "loss": 0.0698,
447
+ "step": 73
448
+ },
449
+ {
450
+ "epoch": 9.25,
451
+ "learning_rate": 5.612903225806452e-05,
452
+ "loss": 0.0619,
453
+ "step": 74
454
+ },
455
+ {
456
+ "epoch": 9.38,
457
+ "learning_rate": 5.5483870967741936e-05,
458
+ "loss": 0.1077,
459
+ "step": 75
460
+ },
461
+ {
462
+ "epoch": 9.5,
463
+ "learning_rate": 5.4838709677419355e-05,
464
+ "loss": 0.0524,
465
+ "step": 76
466
+ },
467
+ {
468
+ "epoch": 9.62,
469
+ "learning_rate": 5.419354838709678e-05,
470
+ "loss": 0.0747,
471
+ "step": 77
472
+ },
473
+ {
474
+ "epoch": 9.75,
475
+ "learning_rate": 5.35483870967742e-05,
476
+ "loss": 0.0805,
477
+ "step": 78
478
+ },
479
+ {
480
+ "epoch": 9.88,
481
+ "learning_rate": 5.290322580645162e-05,
482
+ "loss": 0.0665,
483
+ "step": 79
484
+ },
485
+ {
486
+ "epoch": 10.0,
487
+ "learning_rate": 5.225806451612903e-05,
488
+ "loss": 0.0688,
489
+ "step": 80
490
+ },
491
+ {
492
+ "epoch": 10.12,
493
+ "learning_rate": 5.161290322580645e-05,
494
+ "loss": 0.0421,
495
+ "step": 81
496
+ },
497
+ {
498
+ "epoch": 10.25,
499
+ "learning_rate": 5.096774193548387e-05,
500
+ "loss": 0.0548,
501
+ "step": 82
502
+ },
503
+ {
504
+ "epoch": 10.38,
505
+ "learning_rate": 5.032258064516129e-05,
506
+ "loss": 0.0552,
507
+ "step": 83
508
+ },
509
+ {
510
+ "epoch": 10.5,
511
+ "learning_rate": 4.967741935483871e-05,
512
+ "loss": 0.0777,
513
+ "step": 84
514
+ },
515
+ {
516
+ "epoch": 10.62,
517
+ "learning_rate": 4.903225806451613e-05,
518
+ "loss": 0.0574,
519
+ "step": 85
520
+ },
521
+ {
522
+ "epoch": 10.75,
523
+ "learning_rate": 4.8387096774193554e-05,
524
+ "loss": 0.1117,
525
+ "step": 86
526
+ },
527
+ {
528
+ "epoch": 10.88,
529
+ "learning_rate": 4.774193548387097e-05,
530
+ "loss": 0.0512,
531
+ "step": 87
532
+ },
533
+ {
534
+ "epoch": 11.0,
535
+ "learning_rate": 4.7096774193548385e-05,
536
+ "loss": 0.07,
537
+ "step": 88
538
+ },
539
+ {
540
+ "epoch": 11.12,
541
+ "learning_rate": 4.645161290322581e-05,
542
+ "loss": 0.0381,
543
+ "step": 89
544
+ },
545
+ {
546
+ "epoch": 11.25,
547
+ "learning_rate": 4.580645161290323e-05,
548
+ "loss": 0.0589,
549
+ "step": 90
550
+ },
551
+ {
552
+ "epoch": 11.38,
553
+ "learning_rate": 4.516129032258064e-05,
554
+ "loss": 0.0407,
555
+ "step": 91
556
+ },
557
+ {
558
+ "epoch": 11.5,
559
+ "learning_rate": 4.451612903225807e-05,
560
+ "loss": 0.0484,
561
+ "step": 92
562
+ },
563
+ {
564
+ "epoch": 11.62,
565
+ "learning_rate": 4.387096774193549e-05,
566
+ "loss": 0.0487,
567
+ "step": 93
568
+ },
569
+ {
570
+ "epoch": 11.75,
571
+ "learning_rate": 4.322580645161291e-05,
572
+ "loss": 0.0616,
573
+ "step": 94
574
+ },
575
+ {
576
+ "epoch": 11.88,
577
+ "learning_rate": 4.258064516129032e-05,
578
+ "loss": 0.0739,
579
+ "step": 95
580
+ },
581
+ {
582
+ "epoch": 12.0,
583
+ "learning_rate": 4.1935483870967746e-05,
584
+ "loss": 0.0951,
585
+ "step": 96
586
+ },
587
+ {
588
+ "epoch": 12.12,
589
+ "learning_rate": 4.1290322580645165e-05,
590
+ "loss": 0.0978,
591
+ "step": 97
592
+ },
593
+ {
594
+ "epoch": 12.25,
595
+ "learning_rate": 4.0645161290322584e-05,
596
+ "loss": 0.0436,
597
+ "step": 98
598
+ },
599
+ {
600
+ "epoch": 12.38,
601
+ "learning_rate": 4e-05,
602
+ "loss": 0.0393,
603
+ "step": 99
604
+ },
605
+ {
606
+ "epoch": 12.5,
607
+ "learning_rate": 3.935483870967742e-05,
608
+ "loss": 0.0601,
609
+ "step": 100
610
+ },
611
+ {
612
+ "epoch": 12.62,
613
+ "learning_rate": 3.870967741935484e-05,
614
+ "loss": 0.0518,
615
+ "step": 101
616
+ },
617
+ {
618
+ "epoch": 12.75,
619
+ "learning_rate": 3.8064516129032254e-05,
620
+ "loss": 0.0464,
621
+ "step": 102
622
+ },
623
+ {
624
+ "epoch": 12.88,
625
+ "learning_rate": 3.741935483870968e-05,
626
+ "loss": 0.0483,
627
+ "step": 103
628
+ },
629
+ {
630
+ "epoch": 13.0,
631
+ "learning_rate": 3.67741935483871e-05,
632
+ "loss": 0.0659,
633
+ "step": 104
634
+ },
635
+ {
636
+ "epoch": 13.12,
637
+ "learning_rate": 3.612903225806452e-05,
638
+ "loss": 0.033,
639
+ "step": 105
640
+ },
641
+ {
642
+ "epoch": 13.25,
643
+ "learning_rate": 3.548387096774194e-05,
644
+ "loss": 0.0395,
645
+ "step": 106
646
+ },
647
+ {
648
+ "epoch": 13.38,
649
+ "learning_rate": 3.483870967741936e-05,
650
+ "loss": 0.0711,
651
+ "step": 107
652
+ },
653
+ {
654
+ "epoch": 13.5,
655
+ "learning_rate": 3.4193548387096776e-05,
656
+ "loss": 0.051,
657
+ "step": 108
658
+ },
659
+ {
660
+ "epoch": 13.62,
661
+ "learning_rate": 3.3548387096774195e-05,
662
+ "loss": 0.0633,
663
+ "step": 109
664
+ },
665
+ {
666
+ "epoch": 13.75,
667
+ "learning_rate": 3.2903225806451614e-05,
668
+ "loss": 0.0406,
669
+ "step": 110
670
+ },
671
+ {
672
+ "epoch": 13.88,
673
+ "learning_rate": 3.2258064516129034e-05,
674
+ "loss": 0.0562,
675
+ "step": 111
676
+ },
677
+ {
678
+ "epoch": 14.0,
679
+ "learning_rate": 3.161290322580645e-05,
680
+ "loss": 0.0406,
681
+ "step": 112
682
+ },
683
+ {
684
+ "epoch": 14.12,
685
+ "learning_rate": 3.096774193548387e-05,
686
+ "loss": 0.0346,
687
+ "step": 113
688
+ },
689
+ {
690
+ "epoch": 14.25,
691
+ "learning_rate": 3.032258064516129e-05,
692
+ "loss": 0.0555,
693
+ "step": 114
694
+ },
695
+ {
696
+ "epoch": 14.38,
697
+ "learning_rate": 2.967741935483871e-05,
698
+ "loss": 0.0435,
699
+ "step": 115
700
+ },
701
+ {
702
+ "epoch": 14.5,
703
+ "learning_rate": 2.9032258064516133e-05,
704
+ "loss": 0.0486,
705
+ "step": 116
706
+ },
707
+ {
708
+ "epoch": 14.62,
709
+ "learning_rate": 2.838709677419355e-05,
710
+ "loss": 0.0334,
711
+ "step": 117
712
+ },
713
+ {
714
+ "epoch": 14.75,
715
+ "learning_rate": 2.7741935483870968e-05,
716
+ "loss": 0.0571,
717
+ "step": 118
718
+ },
719
+ {
720
+ "epoch": 14.88,
721
+ "learning_rate": 2.709677419354839e-05,
722
+ "loss": 0.0437,
723
+ "step": 119
724
+ },
725
+ {
726
+ "epoch": 15.0,
727
+ "learning_rate": 2.645161290322581e-05,
728
+ "loss": 0.0578,
729
+ "step": 120
730
+ },
731
+ {
732
+ "epoch": 15.12,
733
+ "learning_rate": 2.5806451612903226e-05,
734
+ "loss": 0.0373,
735
+ "step": 121
736
+ },
737
+ {
738
+ "epoch": 15.25,
739
+ "learning_rate": 2.5161290322580645e-05,
740
+ "loss": 0.0642,
741
+ "step": 122
742
+ },
743
+ {
744
+ "epoch": 15.38,
745
+ "learning_rate": 2.4516129032258064e-05,
746
+ "loss": 0.0428,
747
+ "step": 123
748
+ },
749
+ {
750
+ "epoch": 15.5,
751
+ "learning_rate": 2.3870967741935486e-05,
752
+ "loss": 0.0523,
753
+ "step": 124
754
+ },
755
+ {
756
+ "epoch": 15.62,
757
+ "learning_rate": 2.3225806451612906e-05,
758
+ "loss": 0.0311,
759
+ "step": 125
760
+ },
761
+ {
762
+ "epoch": 15.75,
763
+ "learning_rate": 2.258064516129032e-05,
764
+ "loss": 0.038,
765
+ "step": 126
766
+ },
767
+ {
768
+ "epoch": 15.88,
769
+ "learning_rate": 2.1935483870967744e-05,
770
+ "loss": 0.0385,
771
+ "step": 127
772
+ },
773
+ {
774
+ "epoch": 16.0,
775
+ "learning_rate": 2.129032258064516e-05,
776
+ "loss": 0.0699,
777
+ "step": 128
778
+ },
779
+ {
780
+ "epoch": 16.12,
781
+ "learning_rate": 2.0645161290322582e-05,
782
+ "loss": 0.061,
783
+ "step": 129
784
+ },
785
+ {
786
+ "epoch": 16.25,
787
+ "learning_rate": 2e-05,
788
+ "loss": 0.0431,
789
+ "step": 130
790
+ },
791
+ {
792
+ "epoch": 16.38,
793
+ "learning_rate": 1.935483870967742e-05,
794
+ "loss": 0.0665,
795
+ "step": 131
796
+ },
797
+ {
798
+ "epoch": 16.5,
799
+ "learning_rate": 1.870967741935484e-05,
800
+ "loss": 0.032,
801
+ "step": 132
802
+ },
803
+ {
804
+ "epoch": 16.62,
805
+ "learning_rate": 1.806451612903226e-05,
806
+ "loss": 0.045,
807
+ "step": 133
808
+ },
809
+ {
810
+ "epoch": 16.75,
811
+ "learning_rate": 1.741935483870968e-05,
812
+ "loss": 0.0533,
813
+ "step": 134
814
+ },
815
+ {
816
+ "epoch": 16.88,
817
+ "learning_rate": 1.6774193548387098e-05,
818
+ "loss": 0.0393,
819
+ "step": 135
820
+ },
821
+ {
822
+ "epoch": 17.0,
823
+ "learning_rate": 1.6129032258064517e-05,
824
+ "loss": 0.0462,
825
+ "step": 136
826
+ },
827
+ {
828
+ "epoch": 17.12,
829
+ "learning_rate": 1.5483870967741936e-05,
830
+ "loss": 0.042,
831
+ "step": 137
832
+ },
833
+ {
834
+ "epoch": 17.25,
835
+ "learning_rate": 1.4838709677419355e-05,
836
+ "loss": 0.0694,
837
+ "step": 138
838
+ },
839
+ {
840
+ "epoch": 17.38,
841
+ "learning_rate": 1.4193548387096774e-05,
842
+ "loss": 0.0253,
843
+ "step": 139
844
+ },
845
+ {
846
+ "epoch": 17.5,
847
+ "learning_rate": 1.3548387096774195e-05,
848
+ "loss": 0.0367,
849
+ "step": 140
850
+ },
851
+ {
852
+ "epoch": 17.62,
853
+ "learning_rate": 1.2903225806451613e-05,
854
+ "loss": 0.0391,
855
+ "step": 141
856
+ },
857
+ {
858
+ "epoch": 17.75,
859
+ "learning_rate": 1.2258064516129032e-05,
860
+ "loss": 0.0508,
861
+ "step": 142
862
+ },
863
+ {
864
+ "epoch": 17.88,
865
+ "learning_rate": 1.1612903225806453e-05,
866
+ "loss": 0.0516,
867
+ "step": 143
868
+ },
869
+ {
870
+ "epoch": 18.0,
871
+ "learning_rate": 1.0967741935483872e-05,
872
+ "loss": 0.0562,
873
+ "step": 144
874
+ },
875
+ {
876
+ "epoch": 18.12,
877
+ "learning_rate": 1.0322580645161291e-05,
878
+ "loss": 0.0404,
879
+ "step": 145
880
+ },
881
+ {
882
+ "epoch": 18.25,
883
+ "learning_rate": 9.67741935483871e-06,
884
+ "loss": 0.0427,
885
+ "step": 146
886
+ },
887
+ {
888
+ "epoch": 18.38,
889
+ "learning_rate": 9.03225806451613e-06,
890
+ "loss": 0.0318,
891
+ "step": 147
892
+ },
893
+ {
894
+ "epoch": 18.5,
895
+ "learning_rate": 8.387096774193549e-06,
896
+ "loss": 0.052,
897
+ "step": 148
898
+ },
899
+ {
900
+ "epoch": 18.62,
901
+ "learning_rate": 7.741935483870968e-06,
902
+ "loss": 0.0278,
903
+ "step": 149
904
+ },
905
+ {
906
+ "epoch": 18.75,
907
+ "learning_rate": 7.096774193548387e-06,
908
+ "loss": 0.095,
909
+ "step": 150
910
+ },
911
+ {
912
+ "epoch": 18.88,
913
+ "learning_rate": 6.451612903225806e-06,
914
+ "loss": 0.0408,
915
+ "step": 151
916
+ },
917
+ {
918
+ "epoch": 19.0,
919
+ "learning_rate": 5.806451612903226e-06,
920
+ "loss": 0.0509,
921
+ "step": 152
922
+ },
923
+ {
924
+ "epoch": 19.12,
925
+ "learning_rate": 5.161290322580646e-06,
926
+ "loss": 0.0297,
927
+ "step": 153
928
+ },
929
+ {
930
+ "epoch": 19.25,
931
+ "learning_rate": 4.516129032258065e-06,
932
+ "loss": 0.0493,
933
+ "step": 154
934
+ },
935
+ {
936
+ "epoch": 19.38,
937
+ "learning_rate": 3.870967741935484e-06,
938
+ "loss": 0.1001,
939
+ "step": 155
940
+ },
941
+ {
942
+ "epoch": 19.5,
943
+ "learning_rate": 3.225806451612903e-06,
944
+ "loss": 0.0401,
945
+ "step": 156
946
+ },
947
+ {
948
+ "epoch": 19.62,
949
+ "learning_rate": 2.580645161290323e-06,
950
+ "loss": 0.0434,
951
+ "step": 157
952
+ },
953
+ {
954
+ "epoch": 19.75,
955
+ "learning_rate": 1.935483870967742e-06,
956
+ "loss": 0.0363,
957
+ "step": 158
958
+ },
959
+ {
960
+ "epoch": 19.88,
961
+ "learning_rate": 1.2903225806451614e-06,
962
+ "loss": 0.0432,
963
+ "step": 159
964
+ },
965
+ {
966
+ "epoch": 20.0,
967
+ "learning_rate": 6.451612903225807e-07,
968
+ "loss": 0.0328,
969
+ "step": 160
970
+ },
971
+ {
972
+ "epoch": 20.0,
973
+ "step": 160,
974
+ "total_flos": 7453507615457280.0,
975
+ "train_loss": 0.4629385274136439,
976
+ "train_runtime": 260.0227,
977
+ "train_samples_per_second": 2.461,
978
+ "train_steps_per_second": 0.615
979
+ }
980
+ ],
981
+ "logging_steps": 1,
982
+ "max_steps": 160,
983
+ "num_train_epochs": 20,
984
+ "save_steps": 500,
985
+ "total_flos": 7453507615457280.0,
986
+ "trial_name": null,
987
+ "trial_params": null
988
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0289c8be0a154caa6331c59ea2fa13fa4f0a694d07634ccc5808360a502d707
3
+ size 4600