shibajustfor commited on
Commit
040ba9c
·
verified ·
1 Parent(s): e2a18e8

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b474bd93c3206db3d88387f7f44736976b1523750858cd8cf527baa2232b7f9
3
  size 125248064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93022cc3213e06f275da22ae0d7c0b2e83448d3ea658694fe6f3c7ce92543a88
3
  size 125248064
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f03ca27aa3f217a2b689bb12700acd2b25af84d828f89a265fa2610cc2750ea
3
  size 64219860
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30636835b81bc57d605076ab81c45af81a2327ddb0cbed89724a6f18b033e72a
3
  size 64219860
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d67cef9c988719cba8883d1990d1b0cdb6b0ce358c09847ec80740d69ae7a53d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a7f1807a79f764cdbf1638aa0e0db1154794499b1015a5180c25c186b8d6d94
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8148b27980adb9b0bdd5c04049e531bfefcc05c06612ee72169cfb9b9fd7ee7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fd8212ec5ec3406d74a7f927b717dd30ea8a06115ee6582e14976f7b84b4b58
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.10793308148947653,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,49 @@
58
  "eval_samples_per_second": 21.007,
59
  "eval_steps_per_second": 10.557,
60
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 10,
@@ -77,7 +120,7 @@
77
  "attributes": {}
78
  }
79
  },
80
- "total_flos": 1.5831020863488e+16,
81
  "train_batch_size": 2,
82
  "trial_name": null,
83
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.21586616297895306,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "eval_samples_per_second": 21.007,
59
  "eval_steps_per_second": 10.557,
60
  "step": 50
61
+ },
62
+ {
63
+ "epoch": 0.12951969778737182,
64
+ "grad_norm": 0.8435693979263306,
65
+ "learning_rate": 0.00016324453755953773,
66
+ "loss": 0.7173,
67
+ "step": 60
68
+ },
69
+ {
70
+ "epoch": 0.15110631408526715,
71
+ "grad_norm": 0.8595899939537048,
72
+ "learning_rate": 0.00015000000000000001,
73
+ "loss": 0.6499,
74
+ "step": 70
75
+ },
76
+ {
77
+ "epoch": 0.17269293038316244,
78
+ "grad_norm": 0.7316609025001526,
79
+ "learning_rate": 0.00013546048870425356,
80
+ "loss": 0.6147,
81
+ "step": 80
82
+ },
83
+ {
84
+ "epoch": 0.19427954668105774,
85
+ "grad_norm": 0.845934271812439,
86
+ "learning_rate": 0.00012000256937760445,
87
+ "loss": 0.6582,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 0.21586616297895306,
92
+ "grad_norm": 0.7001475691795349,
93
+ "learning_rate": 0.00010402659401094152,
94
+ "loss": 0.6764,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.21586616297895306,
99
+ "eval_loss": 0.633052408695221,
100
+ "eval_runtime": 9.2699,
101
+ "eval_samples_per_second": 21.036,
102
+ "eval_steps_per_second": 10.572,
103
+ "step": 100
104
  }
105
  ],
106
  "logging_steps": 10,
 
120
  "attributes": {}
121
  }
122
  },
123
+ "total_flos": 3.1662041726976e+16,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null