saqidr commited on
Commit
2fab01a
·
verified ·
1 Parent(s): b86a880

Training in progress, step 500

Browse files
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71e0adabe57431758e6a472d31246e096a12256d0c48319058002b04b0696af1
3
  size 268290900
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1df6a2e095aa6bbb5d360e7f012cbe4a7929871fd4d4183142dd1f5f93bff5b9
3
  size 268290900
run-0/checkpoint-1000/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a6bfc0d04fa062aafe14fdad65aae2ea6adae5c8469541e9b7863a1faad3e9a
3
  size 268290900
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7ee2b76ad26fda27a87269b8487a8982728f6da76312664825a4e1c5c739251
3
  size 268290900
run-0/checkpoint-1000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:80c0e2557f9299a203cad2cf29f7a6e92288b2b70263361fd6952fed79531eed
3
  size 536643898
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61d1902e0f419d4bd955d1ea59a4f898f832344652150ef307c6b77d64babc3a
3
  size 536643898
run-0/checkpoint-1000/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed9da1cbe846856af516cf037dd225e7f48e2737ef84c42a9bee0753b8c140ef
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f182501c34e4ea3ebc7617d27edab7e1367582b147e518cd90295ec7f2eaa0f
3
  size 1064
run-0/checkpoint-1000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3995b387f2d0291be709cdbf61f96ec0665c9f1f556146e986cb9b22d69b84bd
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6765f5cb88961cbc2387298f0752026d7dd9093834e5e5d36d91152176870a41
3
  size 5176
run-0/checkpoint-1500/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c12548a0e1d2999b1e89a6451858c1ba19278e932f73f0ec0a958da3b98809ec
3
  size 268290900
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4485ebbdc70232cda43f157c3a0ba5fcb5086e8bea10a25ff4364587e27d9117
3
  size 268290900
run-0/checkpoint-1500/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e508a0180bc2432169987e3671ecfea4c7f7399055995ca577258a0697775a3d
3
  size 536643898
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0309665e784927ec40732c3414ab14cf17bfcb720e1ccbaad3f6ef39737f826
3
  size 536643898
run-0/checkpoint-1500/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad4d7d251acf36e559c362893a1fb310c9f46b20e8a330025a14b6829ce4ab07
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91d97f7c1be085852e2ffd2fe2f8a493a080e2b2858e2f3baf642fe214e035a0
3
  size 1064
run-0/checkpoint-1500/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3995b387f2d0291be709cdbf61f96ec0665c9f1f556146e986cb9b22d69b84bd
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6765f5cb88961cbc2387298f0752026d7dd9093834e5e5d36d91152176870a41
3
  size 5176
run-1/checkpoint-1000/trainer_state.json CHANGED
@@ -10,50 +10,50 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "eval_accuracy": 0.577741935483871,
14
- "eval_loss": 0.20344002544879913,
15
- "eval_runtime": 1.3567,
16
- "eval_samples_per_second": 2284.973,
17
- "eval_steps_per_second": 47.911,
18
  "step": 318
19
  },
20
  {
21
  "epoch": 1.5723270440251573,
22
- "grad_norm": 0.494202196598053,
23
- "learning_rate": 1.4758909853249476e-05,
24
- "loss": 0.3218,
25
  "step": 500
26
  },
27
  {
28
  "epoch": 2.0,
29
- "eval_accuracy": 0.8129032258064516,
30
- "eval_loss": 0.10371456295251846,
31
- "eval_runtime": 1.3471,
32
- "eval_samples_per_second": 2301.291,
33
- "eval_steps_per_second": 48.253,
34
  "step": 636
35
  },
36
  {
37
  "epoch": 3.0,
38
- "eval_accuracy": 0.8709677419354839,
39
- "eval_loss": 0.07233396172523499,
40
- "eval_runtime": 1.3503,
41
- "eval_samples_per_second": 2295.728,
42
- "eval_steps_per_second": 48.136,
43
  "step": 954
44
  },
45
  {
46
  "epoch": 3.1446540880503147,
47
- "grad_norm": 0.4743537902832031,
48
- "learning_rate": 9.517819706498952e-06,
49
- "loss": 0.1194,
50
  "step": 1000
51
  }
52
  ],
53
  "logging_steps": 500,
54
- "max_steps": 1908,
55
  "num_input_tokens_seen": 0,
56
- "num_train_epochs": 6,
57
  "save_steps": 500,
58
  "stateful_callbacks": {
59
  "TrainerControl": {
@@ -71,8 +71,8 @@
71
  "train_batch_size": 48,
72
  "trial_name": null,
73
  "trial_params": {
74
- "alpha": 0.6325268278134527,
75
- "num_train_epochs": 6,
76
- "temperature": 12
77
  }
78
  }
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "eval_accuracy": 0.5951612903225807,
14
+ "eval_loss": 0.20783506333827972,
15
+ "eval_runtime": 1.3226,
16
+ "eval_samples_per_second": 2343.937,
17
+ "eval_steps_per_second": 49.147,
18
  "step": 318
19
  },
20
  {
21
  "epoch": 1.5723270440251573,
22
+ "grad_norm": 0.5312509536743164,
23
+ "learning_rate": 1.650593990216632e-05,
24
+ "loss": 0.3341,
25
  "step": 500
26
  },
27
  {
28
  "epoch": 2.0,
29
+ "eval_accuracy": 0.8338709677419355,
30
+ "eval_loss": 0.09971419721841812,
31
+ "eval_runtime": 1.3325,
32
+ "eval_samples_per_second": 2326.534,
33
+ "eval_steps_per_second": 48.782,
34
  "step": 636
35
  },
36
  {
37
  "epoch": 3.0,
38
+ "eval_accuracy": 0.882258064516129,
39
+ "eval_loss": 0.06562549620866776,
40
+ "eval_runtime": 1.3417,
41
+ "eval_samples_per_second": 2310.56,
42
+ "eval_steps_per_second": 48.447,
43
  "step": 954
44
  },
45
  {
46
  "epoch": 3.1446540880503147,
47
+ "grad_norm": 0.4943838119506836,
48
+ "learning_rate": 1.3011879804332637e-05,
49
+ "loss": 0.1156,
50
  "step": 1000
51
  }
52
  ],
53
  "logging_steps": 500,
54
+ "max_steps": 2862,
55
  "num_input_tokens_seen": 0,
56
+ "num_train_epochs": 9,
57
  "save_steps": 500,
58
  "stateful_callbacks": {
59
  "TrainerControl": {
 
71
  "train_batch_size": 48,
72
  "trial_name": null,
73
  "trial_params": {
74
+ "alpha": 0.7941624119034844,
75
+ "num_train_epochs": 9,
76
+ "temperature": 8
77
  }
78
  }
run-1/checkpoint-1500/trainer_state.json CHANGED
@@ -10,66 +10,66 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "eval_accuracy": 0.577741935483871,
14
- "eval_loss": 0.20344002544879913,
15
- "eval_runtime": 1.3567,
16
- "eval_samples_per_second": 2284.973,
17
- "eval_steps_per_second": 47.911,
18
  "step": 318
19
  },
20
  {
21
  "epoch": 1.5723270440251573,
22
- "grad_norm": 0.494202196598053,
23
- "learning_rate": 1.4758909853249476e-05,
24
- "loss": 0.3218,
25
  "step": 500
26
  },
27
  {
28
  "epoch": 2.0,
29
- "eval_accuracy": 0.8129032258064516,
30
- "eval_loss": 0.10371456295251846,
31
- "eval_runtime": 1.3471,
32
- "eval_samples_per_second": 2301.291,
33
- "eval_steps_per_second": 48.253,
34
  "step": 636
35
  },
36
  {
37
  "epoch": 3.0,
38
- "eval_accuracy": 0.8709677419354839,
39
- "eval_loss": 0.07233396172523499,
40
- "eval_runtime": 1.3503,
41
- "eval_samples_per_second": 2295.728,
42
- "eval_steps_per_second": 48.136,
43
  "step": 954
44
  },
45
  {
46
  "epoch": 3.1446540880503147,
47
- "grad_norm": 0.4743537902832031,
48
- "learning_rate": 9.517819706498952e-06,
49
- "loss": 0.1194,
50
  "step": 1000
51
  },
52
  {
53
  "epoch": 4.0,
54
- "eval_accuracy": 0.8848387096774194,
55
- "eval_loss": 0.05824179947376251,
56
- "eval_runtime": 1.3553,
57
- "eval_samples_per_second": 2287.256,
58
- "eval_steps_per_second": 47.959,
59
  "step": 1272
60
  },
61
  {
62
  "epoch": 4.716981132075472,
63
- "grad_norm": 0.3467690646648407,
64
- "learning_rate": 4.276729559748428e-06,
65
- "loss": 0.0813,
66
  "step": 1500
67
  }
68
  ],
69
  "logging_steps": 500,
70
- "max_steps": 1908,
71
  "num_input_tokens_seen": 0,
72
- "num_train_epochs": 6,
73
  "save_steps": 500,
74
  "stateful_callbacks": {
75
  "TrainerControl": {
@@ -87,8 +87,8 @@
87
  "train_batch_size": 48,
88
  "trial_name": null,
89
  "trial_params": {
90
- "alpha": 0.6325268278134527,
91
- "num_train_epochs": 6,
92
- "temperature": 12
93
  }
94
  }
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "eval_accuracy": 0.5951612903225807,
14
+ "eval_loss": 0.20783506333827972,
15
+ "eval_runtime": 1.3226,
16
+ "eval_samples_per_second": 2343.937,
17
+ "eval_steps_per_second": 49.147,
18
  "step": 318
19
  },
20
  {
21
  "epoch": 1.5723270440251573,
22
+ "grad_norm": 0.5312509536743164,
23
+ "learning_rate": 1.650593990216632e-05,
24
+ "loss": 0.3341,
25
  "step": 500
26
  },
27
  {
28
  "epoch": 2.0,
29
+ "eval_accuracy": 0.8338709677419355,
30
+ "eval_loss": 0.09971419721841812,
31
+ "eval_runtime": 1.3325,
32
+ "eval_samples_per_second": 2326.534,
33
+ "eval_steps_per_second": 48.782,
34
  "step": 636
35
  },
36
  {
37
  "epoch": 3.0,
38
+ "eval_accuracy": 0.882258064516129,
39
+ "eval_loss": 0.06562549620866776,
40
+ "eval_runtime": 1.3417,
41
+ "eval_samples_per_second": 2310.56,
42
+ "eval_steps_per_second": 48.447,
43
  "step": 954
44
  },
45
  {
46
  "epoch": 3.1446540880503147,
47
+ "grad_norm": 0.4943838119506836,
48
+ "learning_rate": 1.3011879804332637e-05,
49
+ "loss": 0.1156,
50
  "step": 1000
51
  },
52
  {
53
  "epoch": 4.0,
54
+ "eval_accuracy": 0.8958064516129032,
55
+ "eval_loss": 0.04985633119940758,
56
+ "eval_runtime": 1.3449,
57
+ "eval_samples_per_second": 2305.032,
58
+ "eval_steps_per_second": 48.331,
59
  "step": 1272
60
  },
61
  {
62
  "epoch": 4.716981132075472,
63
+ "grad_norm": 0.3221791386604309,
64
+ "learning_rate": 9.517819706498952e-06,
65
+ "loss": 0.0725,
66
  "step": 1500
67
  }
68
  ],
69
  "logging_steps": 500,
70
+ "max_steps": 2862,
71
  "num_input_tokens_seen": 0,
72
+ "num_train_epochs": 9,
73
  "save_steps": 500,
74
  "stateful_callbacks": {
75
  "TrainerControl": {
 
87
  "train_batch_size": 48,
88
  "trial_name": null,
89
  "trial_params": {
90
+ "alpha": 0.7941624119034844,
91
+ "num_train_epochs": 9,
92
+ "temperature": 8
93
  }
94
  }
run-1/checkpoint-2000/config.json CHANGED
@@ -326,6 +326,6 @@
326
  "sinusoidal_pos_embds": false,
327
  "tie_weights_": true,
328
  "torch_dtype": "float32",
329
- "transformers_version": "4.37.2",
330
  "vocab_size": 30522
331
  }
 
326
  "sinusoidal_pos_embds": false,
327
  "tie_weights_": true,
328
  "torch_dtype": "float32",
329
+ "transformers_version": "4.41.1",
330
  "vocab_size": 30522
331
  }
run-1/checkpoint-2000/tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 512,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
run-1/checkpoint-2000/trainer_state.json CHANGED
@@ -10,94 +10,110 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "eval_accuracy": 0.6412903225806451,
14
- "eval_loss": 0.230123370885849,
15
- "eval_runtime": 1.3595,
16
- "eval_samples_per_second": 2280.291,
17
- "eval_steps_per_second": 47.813,
18
  "step": 318
19
  },
20
  {
21
- "epoch": 1.57,
22
- "learning_rate": 1.685534591194969e-05,
23
- "loss": 0.3693,
 
24
  "step": 500
25
  },
26
  {
27
  "epoch": 2.0,
28
- "eval_accuracy": 0.8358064516129032,
29
- "eval_loss": 0.10448037087917328,
30
- "eval_runtime": 1.3653,
31
- "eval_samples_per_second": 2270.569,
32
- "eval_steps_per_second": 47.609,
33
  "step": 636
34
  },
35
  {
36
  "epoch": 3.0,
37
- "eval_accuracy": 0.8851612903225806,
38
- "eval_loss": 0.06413500756025314,
39
- "eval_runtime": 1.3667,
40
- "eval_samples_per_second": 2268.292,
41
- "eval_steps_per_second": 47.561,
42
  "step": 954
43
  },
44
  {
45
- "epoch": 3.14,
46
- "learning_rate": 1.371069182389937e-05,
47
- "loss": 0.1204,
 
48
  "step": 1000
49
  },
50
  {
51
  "epoch": 4.0,
52
- "eval_accuracy": 0.9041935483870968,
53
- "eval_loss": 0.04693836718797684,
54
- "eval_runtime": 1.3284,
55
- "eval_samples_per_second": 2333.641,
56
- "eval_steps_per_second": 48.931,
57
  "step": 1272
58
  },
59
  {
60
- "epoch": 4.72,
61
- "learning_rate": 1.0566037735849058e-05,
62
- "loss": 0.0726,
 
63
  "step": 1500
64
  },
65
  {
66
  "epoch": 5.0,
67
- "eval_accuracy": 0.9180645161290323,
68
- "eval_loss": 0.03833732381463051,
69
- "eval_runtime": 1.2898,
70
- "eval_samples_per_second": 2403.423,
71
- "eval_steps_per_second": 50.394,
72
  "step": 1590
73
  },
74
  {
75
  "epoch": 6.0,
76
- "eval_accuracy": 0.9235483870967742,
77
- "eval_loss": 0.03355677053332329,
78
- "eval_runtime": 1.2765,
79
- "eval_samples_per_second": 2428.541,
80
- "eval_steps_per_second": 50.921,
81
  "step": 1908
82
  },
83
  {
84
- "epoch": 6.29,
85
- "learning_rate": 7.421383647798742e-06,
86
- "loss": 0.0554,
 
87
  "step": 2000
88
  }
89
  ],
90
  "logging_steps": 500,
91
- "max_steps": 3180,
92
  "num_input_tokens_seen": 0,
93
- "num_train_epochs": 10,
94
  "save_steps": 500,
95
- "total_flos": 519271419317532.0,
 
 
 
 
 
 
 
 
 
 
 
 
96
  "train_batch_size": 48,
97
  "trial_name": null,
98
  "trial_params": {
99
- "alpha": 0.7207200745295966,
100
- "num_train_epochs": 10,
101
- "temperature": 5
102
  }
103
  }
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "eval_accuracy": 0.5951612903225807,
14
+ "eval_loss": 0.20783506333827972,
15
+ "eval_runtime": 1.3226,
16
+ "eval_samples_per_second": 2343.937,
17
+ "eval_steps_per_second": 49.147,
18
  "step": 318
19
  },
20
  {
21
+ "epoch": 1.5723270440251573,
22
+ "grad_norm": 0.5312509536743164,
23
+ "learning_rate": 1.650593990216632e-05,
24
+ "loss": 0.3341,
25
  "step": 500
26
  },
27
  {
28
  "epoch": 2.0,
29
+ "eval_accuracy": 0.8338709677419355,
30
+ "eval_loss": 0.09971419721841812,
31
+ "eval_runtime": 1.3325,
32
+ "eval_samples_per_second": 2326.534,
33
+ "eval_steps_per_second": 48.782,
34
  "step": 636
35
  },
36
  {
37
  "epoch": 3.0,
38
+ "eval_accuracy": 0.882258064516129,
39
+ "eval_loss": 0.06562549620866776,
40
+ "eval_runtime": 1.3417,
41
+ "eval_samples_per_second": 2310.56,
42
+ "eval_steps_per_second": 48.447,
43
  "step": 954
44
  },
45
  {
46
+ "epoch": 3.1446540880503147,
47
+ "grad_norm": 0.4943838119506836,
48
+ "learning_rate": 1.3011879804332637e-05,
49
+ "loss": 0.1156,
50
  "step": 1000
51
  },
52
  {
53
  "epoch": 4.0,
54
+ "eval_accuracy": 0.8958064516129032,
55
+ "eval_loss": 0.04985633119940758,
56
+ "eval_runtime": 1.3449,
57
+ "eval_samples_per_second": 2305.032,
58
+ "eval_steps_per_second": 48.331,
59
  "step": 1272
60
  },
61
  {
62
+ "epoch": 4.716981132075472,
63
+ "grad_norm": 0.3221791386604309,
64
+ "learning_rate": 9.517819706498952e-06,
65
+ "loss": 0.0725,
66
  "step": 1500
67
  },
68
  {
69
  "epoch": 5.0,
70
+ "eval_accuracy": 0.9096774193548387,
71
+ "eval_loss": 0.04061922803521156,
72
+ "eval_runtime": 1.3564,
73
+ "eval_samples_per_second": 2285.502,
74
+ "eval_steps_per_second": 47.922,
75
  "step": 1590
76
  },
77
  {
78
  "epoch": 6.0,
79
+ "eval_accuracy": 0.9187096774193548,
80
+ "eval_loss": 0.03560462221503258,
81
+ "eval_runtime": 1.3515,
82
+ "eval_samples_per_second": 2293.716,
83
+ "eval_steps_per_second": 48.094,
84
  "step": 1908
85
  },
86
  {
87
+ "epoch": 6.289308176100629,
88
+ "grad_norm": 0.2550632953643799,
89
+ "learning_rate": 6.02375960866527e-06,
90
+ "loss": 0.0569,
91
  "step": 2000
92
  }
93
  ],
94
  "logging_steps": 500,
95
+ "max_steps": 2862,
96
  "num_input_tokens_seen": 0,
97
+ "num_train_epochs": 9,
98
  "save_steps": 500,
99
+ "stateful_callbacks": {
100
+ "TrainerControl": {
101
+ "args": {
102
+ "should_epoch_stop": false,
103
+ "should_evaluate": false,
104
+ "should_log": false,
105
+ "should_save": true,
106
+ "should_training_stop": false
107
+ },
108
+ "attributes": {}
109
+ }
110
+ },
111
+ "total_flos": 520991326672152.0,
112
  "train_batch_size": 48,
113
  "trial_name": null,
114
  "trial_params": {
115
+ "alpha": 0.7941624119034844,
116
+ "num_train_epochs": 9,
117
+ "temperature": 8
118
  }
119
  }
run-1/checkpoint-500/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d1992bc8cbd8cfc6c88b1963eedb9aa5c6b4583be00c396662c3f6a358ddb1ad
3
  size 268290900
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1df6a2e095aa6bbb5d360e7f012cbe4a7929871fd4d4183142dd1f5f93bff5b9
3
  size 268290900
run-1/checkpoint-500/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:569559a6c1c61ada1010013edab7b010346f26be17388bea8473982069d749b5
3
  size 536643898
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f255b5aed36ba4abd626288657ee63762aafb521c202746c931ad4d10b96a61e
3
  size 536643898
run-1/checkpoint-500/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb78b1e790f49c38375a9a03add3c9373ef72cf0138709ffb96775535ef084c6
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98041bd7cae455426e290a1a0ee683bd5dd30893f7451fec3a464ae8995b17e4
3
  size 1064
run-1/checkpoint-500/trainer_state.json CHANGED
@@ -10,25 +10,25 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "eval_accuracy": 0.577741935483871,
14
- "eval_loss": 0.20344002544879913,
15
- "eval_runtime": 1.3567,
16
- "eval_samples_per_second": 2284.973,
17
- "eval_steps_per_second": 47.911,
18
  "step": 318
19
  },
20
  {
21
  "epoch": 1.5723270440251573,
22
- "grad_norm": 0.494202196598053,
23
- "learning_rate": 1.4758909853249476e-05,
24
- "loss": 0.3218,
25
  "step": 500
26
  }
27
  ],
28
  "logging_steps": 500,
29
- "max_steps": 1908,
30
  "num_input_tokens_seen": 0,
31
- "num_train_epochs": 6,
32
  "save_steps": 500,
33
  "stateful_callbacks": {
34
  "TrainerControl": {
@@ -46,8 +46,8 @@
46
  "train_batch_size": 48,
47
  "trial_name": null,
48
  "trial_params": {
49
- "alpha": 0.6325268278134527,
50
- "num_train_epochs": 6,
51
- "temperature": 12
52
  }
53
  }
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "eval_accuracy": 0.5951612903225807,
14
+ "eval_loss": 0.20783506333827972,
15
+ "eval_runtime": 1.3226,
16
+ "eval_samples_per_second": 2343.937,
17
+ "eval_steps_per_second": 49.147,
18
  "step": 318
19
  },
20
  {
21
  "epoch": 1.5723270440251573,
22
+ "grad_norm": 0.5312509536743164,
23
+ "learning_rate": 1.650593990216632e-05,
24
+ "loss": 0.3341,
25
  "step": 500
26
  }
27
  ],
28
  "logging_steps": 500,
29
+ "max_steps": 2862,
30
  "num_input_tokens_seen": 0,
31
+ "num_train_epochs": 9,
32
  "save_steps": 500,
33
  "stateful_callbacks": {
34
  "TrainerControl": {
 
46
  "train_batch_size": 48,
47
  "trial_name": null,
48
  "trial_params": {
49
+ "alpha": 0.7941624119034844,
50
+ "num_train_epochs": 9,
51
+ "temperature": 8
52
  }
53
  }
run-1/checkpoint-500/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:720882e08adee18bc186850828f02514ae1f0c4061921a151ad1b71291c1e3c8
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48a44b718cd5e63b306a632a23ed7f69877efbab8daac27533495007e71fadba
3
  size 5176
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6765f5cb88961cbc2387298f0752026d7dd9093834e5e5d36d91152176870a41
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48a44b718cd5e63b306a632a23ed7f69877efbab8daac27533495007e71fadba
3
  size 5176