besimray commited on
Commit
5786147
1 Parent(s): 68d922d

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b363d9b3d34db8babb2c2cd28ff9fcbe150e320d421d670b02824aac3fbbca9
3
  size 22573704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0222c0047535996deb8c8cb559888e4d66b3038044de3ae8bf08f3bd010cc02
3
  size 22573704
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:58aa446747ed8a047799a4009220bfa51fd93f2d52c03e85d541e5fe0f01c58c
3
  size 11710970
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9677e62c9c2ab535a7409c2bbd60fd7316b6431801418e2a7ba28e522edbfac2
3
  size 11710970
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd78934c835a4a69f293b31361858c5aba532565f0c899d40296121d5cbad7d7
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd641d2c2a7f8e3282e19ce9e5df14c6531555bf46535a1b63e4e32f7c2a55e1
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4849a6ac0a1d895740f1ab4eba9d346b8d898008d0cfe93dd108cd928d7c63e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f3583f6fb6381489c8d29ab09722642111b0d57df55aa7b4c72ce687a83cdee
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.9411152005195618,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-10",
4
- "epoch": 0.5714285714285714,
5
  "eval_steps": 10,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -93,6 +93,84 @@
93
  "eval_samples_per_second": 7.199,
94
  "eval_steps_per_second": 1.92,
95
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  }
97
  ],
98
  "logging_steps": 1,
@@ -121,7 +199,7 @@
121
  "attributes": {}
122
  }
123
  },
124
- "total_flos": 3848728247009280.0,
125
  "train_batch_size": 4,
126
  "trial_name": null,
127
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.784389317035675,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-20",
4
+ "epoch": 1.1428571428571428,
5
  "eval_steps": 10,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
93
  "eval_samples_per_second": 7.199,
94
  "eval_steps_per_second": 1.92,
95
  "step": 10
96
+ },
97
+ {
98
+ "epoch": 0.6285714285714286,
99
+ "grad_norm": 0.33402150869369507,
100
+ "learning_rate": 9.986661418317759e-05,
101
+ "loss": 0.8844,
102
+ "step": 11
103
+ },
104
+ {
105
+ "epoch": 0.6857142857142857,
106
+ "grad_norm": 0.31261247396469116,
107
+ "learning_rate": 9.946716840375551e-05,
108
+ "loss": 0.8441,
109
+ "step": 12
110
+ },
111
+ {
112
+ "epoch": 0.7428571428571429,
113
+ "grad_norm": 0.33552902936935425,
114
+ "learning_rate": 9.880379387779637e-05,
115
+ "loss": 0.8498,
116
+ "step": 13
117
+ },
118
+ {
119
+ "epoch": 0.8,
120
+ "grad_norm": 0.33501139283180237,
121
+ "learning_rate": 9.78800299954203e-05,
122
+ "loss": 0.8905,
123
+ "step": 14
124
+ },
125
+ {
126
+ "epoch": 0.8571428571428571,
127
+ "grad_norm": 0.33641278743743896,
128
+ "learning_rate": 9.67008054366274e-05,
129
+ "loss": 0.8753,
130
+ "step": 15
131
+ },
132
+ {
133
+ "epoch": 0.9142857142857143,
134
+ "grad_norm": 0.3439123332500458,
135
+ "learning_rate": 9.527241187465734e-05,
136
+ "loss": 0.8292,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.9714285714285714,
141
+ "grad_norm": 0.3387420177459717,
142
+ "learning_rate": 9.360247040719039e-05,
143
+ "loss": 0.8047,
144
+ "step": 17
145
+ },
146
+ {
147
+ "epoch": 1.0285714285714285,
148
+ "grad_norm": 0.35765647888183594,
149
+ "learning_rate": 9.16998908944939e-05,
150
+ "loss": 0.8683,
151
+ "step": 18
152
+ },
153
+ {
154
+ "epoch": 1.0857142857142856,
155
+ "grad_norm": 0.33680498600006104,
156
+ "learning_rate": 8.957482442146272e-05,
157
+ "loss": 0.7337,
158
+ "step": 19
159
+ },
160
+ {
161
+ "epoch": 1.1428571428571428,
162
+ "grad_norm": 0.32664304971694946,
163
+ "learning_rate": 8.72386091371891e-05,
164
+ "loss": 0.7241,
165
+ "step": 20
166
+ },
167
+ {
168
+ "epoch": 1.1428571428571428,
169
+ "eval_loss": 0.784389317035675,
170
+ "eval_runtime": 2.086,
171
+ "eval_samples_per_second": 7.191,
172
+ "eval_steps_per_second": 1.918,
173
+ "step": 20
174
  }
175
  ],
176
  "logging_steps": 1,
 
199
  "attributes": {}
200
  }
201
  },
202
+ "total_flos": 7697456494018560.0,
203
  "train_batch_size": 4,
204
  "trial_name": null,
205
  "trial_params": null