Kira-Floris commited on
Commit
56a5fae
1 Parent(s): d4eb0eb

Training in progress, epoch 4

Browse files
logs/events.out.tfevents.1719305887.852b1e905a9a.223.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ccce78a6de7b7ead06a2eaeb7095288a4e8dd2066b568e59b6f75294e9e29337
3
- size 6498
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2ac4d7fa81b6904905124ea42da5585b5330205bcf5bbe14f7557aa4d2fc4eb
3
+ size 7032
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc1ea81d68885299a8a06cce03ef9b210fe932d9c8ee4477cb9e366ba6369340
3
  size 17549312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afb0fc63aa3b163fe161f0fc2e80b005f0c467955700d8a1e79b85ea43d3e170
3
  size 17549312
run-0/checkpoint-2108/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52e23161d13c2c0074293d23c46bb546af70686a45a81739153b9141d5aee932
3
  size 17549312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afb0fc63aa3b163fe161f0fc2e80b005f0c467955700d8a1e79b85ea43d3e170
3
  size 17549312
run-0/checkpoint-2108/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b231c0660c8389832990d020279e7e6bed45f149f650c87707b64d90e7e759f
3
  size 35123898
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03bfd9340b47f91e67617eca810211baa7689f51a4f5097ecd60622492ca1ec0
3
  size 35123898
run-0/checkpoint-2108/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5cff89e4666999e97f90d1645b64057419c7dc0028c34edc93cf14e1576ac0da
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d78463cfbd46cd542a69701cf2c4f2244fad66669f0025f3033d82ed9ee45fb0
3
  size 1064
run-0/checkpoint-2108/trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 0.8256880733944955,
3
  "best_model_checkpoint": "tiny-bert-sst2-distilled/run-0/checkpoint-2108",
4
  "epoch": 4.0,
5
  "eval_steps": 500,
@@ -10,73 +10,73 @@
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
- "grad_norm": 16.55626678466797,
14
- "learning_rate": 5.1383294230414005e-05,
15
- "loss": 1.5907,
16
  "step": 527
17
  },
18
  {
19
  "epoch": 1.0,
20
- "eval_accuracy": 0.8061926605504587,
21
- "eval_loss": 1.2464169263839722,
22
- "eval_runtime": 2.4007,
23
- "eval_samples_per_second": 363.232,
24
- "eval_steps_per_second": 2.916,
25
  "step": 527
26
  },
27
  {
28
  "epoch": 2.0,
29
- "grad_norm": Infinity,
30
- "learning_rate": 4.405675241279466e-05,
31
- "loss": 0.9038,
32
  "step": 1054
33
  },
34
  {
35
  "epoch": 2.0,
36
- "eval_accuracy": 0.8107798165137615,
37
- "eval_loss": 1.1235342025756836,
38
- "eval_runtime": 2.3533,
39
- "eval_samples_per_second": 370.54,
40
- "eval_steps_per_second": 2.975,
41
  "step": 1054
42
  },
43
  {
44
  "epoch": 3.0,
45
- "grad_norm": 48.55740737915039,
46
- "learning_rate": 3.67162818084498e-05,
47
- "loss": 0.6946,
48
  "step": 1581
49
  },
50
  {
51
  "epoch": 3.0,
52
- "eval_accuracy": 0.8176605504587156,
53
- "eval_loss": 1.1027159690856934,
54
- "eval_runtime": 2.3719,
55
- "eval_samples_per_second": 367.637,
56
- "eval_steps_per_second": 2.951,
57
  "step": 1581
58
  },
59
  {
60
  "epoch": 4.0,
61
- "grad_norm": 16.267213821411133,
62
- "learning_rate": 2.9375811204104943e-05,
63
- "loss": 0.595,
64
  "step": 2108
65
  },
66
  {
67
  "epoch": 4.0,
68
- "eval_accuracy": 0.8256880733944955,
69
- "eval_loss": 1.0700539350509644,
70
- "eval_runtime": 2.3638,
71
- "eval_samples_per_second": 368.893,
72
- "eval_steps_per_second": 2.961,
73
  "step": 2108
74
  }
75
  ],
76
  "logging_steps": 500,
77
- "max_steps": 4216,
78
  "num_input_tokens_seen": 0,
79
- "num_train_epochs": 8,
80
  "save_steps": 500,
81
  "stateful_callbacks": {
82
  "TrainerControl": {
@@ -94,9 +94,9 @@
94
  "train_batch_size": 128,
95
  "trial_name": null,
96
  "trial_params": {
97
- "alpha": 0.5167874928728581,
98
- "learning_rate": 5.872376483475886e-05,
99
- "num_train_epochs": 8,
100
- "temperature": 5
101
  }
102
  }
 
1
  {
2
+ "best_metric": 0.7901376146788991,
3
  "best_model_checkpoint": "tiny-bert-sst2-distilled/run-0/checkpoint-2108",
4
  "epoch": 4.0,
5
  "eval_steps": 500,
 
10
  "log_history": [
11
  {
12
  "epoch": 1.0,
13
+ "grad_norm": 5.95961856842041,
14
+ "learning_rate": 9.55389368279823e-06,
15
+ "loss": 1.5369,
16
  "step": 527
17
  },
18
  {
19
  "epoch": 1.0,
20
+ "eval_accuracy": 0.7339449541284404,
21
+ "eval_loss": 1.2773902416229248,
22
+ "eval_runtime": 2.5957,
23
+ "eval_samples_per_second": 335.939,
24
+ "eval_steps_per_second": 2.697,
25
  "step": 527
26
  },
27
  {
28
  "epoch": 2.0,
29
+ "grad_norm": 15.252978324890137,
30
+ "learning_rate": 8.492349940265094e-06,
31
+ "loss": 1.2159,
32
  "step": 1054
33
  },
34
  {
35
  "epoch": 2.0,
36
+ "eval_accuracy": 0.7786697247706422,
37
+ "eval_loss": 1.022659182548523,
38
+ "eval_runtime": 2.5741,
39
+ "eval_samples_per_second": 338.753,
40
+ "eval_steps_per_second": 2.719,
41
  "step": 1054
42
  },
43
  {
44
  "epoch": 3.0,
45
+ "grad_norm": 18.01114845275879,
46
+ "learning_rate": 7.430806197731956e-06,
47
+ "loss": 1.0132,
48
  "step": 1581
49
  },
50
  {
51
  "epoch": 3.0,
52
+ "eval_accuracy": 0.7844036697247706,
53
+ "eval_loss": 0.9622328281402588,
54
+ "eval_runtime": 2.5865,
55
+ "eval_samples_per_second": 337.137,
56
+ "eval_steps_per_second": 2.706,
57
  "step": 1581
58
  },
59
  {
60
  "epoch": 4.0,
61
+ "grad_norm": 15.912079811096191,
62
+ "learning_rate": 6.371276769700781e-06,
63
+ "loss": 0.9206,
64
  "step": 2108
65
  },
66
  {
67
  "epoch": 4.0,
68
+ "eval_accuracy": 0.7901376146788991,
69
+ "eval_loss": 0.9278557896614075,
70
+ "eval_runtime": 2.5808,
71
+ "eval_samples_per_second": 337.878,
72
+ "eval_steps_per_second": 2.712,
73
  "step": 2108
74
  }
75
  ],
76
  "logging_steps": 500,
77
+ "max_steps": 5270,
78
  "num_input_tokens_seen": 0,
79
+ "num_train_epochs": 10,
80
  "save_steps": 500,
81
  "stateful_callbacks": {
82
  "TrainerControl": {
 
94
  "train_batch_size": 128,
95
  "trial_name": null,
96
  "trial_params": {
97
+ "alpha": 0.27608826195592573,
98
+ "learning_rate": 1.0615437425331367e-05,
99
+ "num_train_epochs": 10,
100
+ "temperature": 2
101
  }
102
  }
run-0/checkpoint-2108/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f28149fe21091b257234d7cbe1611ee6ca88e3a7cef675e40e6d90410e6fc1a6
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8b6a60f7b85b38fa45cddf1a417ee51250fe5822237403416bf2406ff2cdb84
3
  size 5176