Training in progress, epoch 0
Browse files- all_results.json +9 -9
- eval_results.json +5 -5
- model.safetensors +1 -1
- runs/Jun10_05-30-11_e6d590d50f6e/events.out.tfevents.1717997741.e6d590d50f6e.461.7 +3 -0
- runs/Jun10_05-35-50_e6d590d50f6e/events.out.tfevents.1717997751.e6d590d50f6e.461.8 +3 -0
- train_results.json +4 -4
- trainer_state.json +101 -101
- training_args.bin +1 -1
all_results.json
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
{
|
2 |
"epoch": 9.68421052631579,
|
3 |
-
"eval_accuracy": 0.
|
4 |
-
"eval_loss": 0.
|
5 |
-
"eval_runtime": 3.
|
6 |
-
"eval_samples_per_second":
|
7 |
-
"eval_steps_per_second": 7.
|
8 |
"total_flos": 7.291573574754632e+17,
|
9 |
-
"train_loss": 0.
|
10 |
-
"train_runtime":
|
11 |
-
"train_samples_per_second": 97.
|
12 |
-
"train_steps_per_second": 0.
|
13 |
}
|
|
|
1 |
{
|
2 |
"epoch": 9.68421052631579,
|
3 |
+
"eval_accuracy": 0.9352708058124174,
|
4 |
+
"eval_loss": 0.1843554824590683,
|
5 |
+
"eval_runtime": 3.1532,
|
6 |
+
"eval_samples_per_second": 240.077,
|
7 |
+
"eval_steps_per_second": 7.611,
|
8 |
"total_flos": 7.291573574754632e+17,
|
9 |
+
"train_loss": 0.36177816701971965,
|
10 |
+
"train_runtime": 310.2655,
|
11 |
+
"train_samples_per_second": 97.594,
|
12 |
+
"train_steps_per_second": 0.741
|
13 |
}
|
eval_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
"epoch": 9.68421052631579,
|
3 |
-
"eval_accuracy": 0.
|
4 |
-
"eval_loss": 0.
|
5 |
-
"eval_runtime": 3.
|
6 |
-
"eval_samples_per_second":
|
7 |
-
"eval_steps_per_second": 7.
|
8 |
}
|
|
|
1 |
{
|
2 |
"epoch": 9.68421052631579,
|
3 |
+
"eval_accuracy": 0.9352708058124174,
|
4 |
+
"eval_loss": 0.1843554824590683,
|
5 |
+
"eval_runtime": 3.1532,
|
6 |
+
"eval_samples_per_second": 240.077,
|
7 |
+
"eval_steps_per_second": 7.611
|
8 |
}
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 110355136
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b80417fbab649d6952234f47689d844e63a0c4201df74afdfcb488c360b85e7
|
3 |
size 110355136
|
runs/Jun10_05-30-11_e6d590d50f6e/events.out.tfevents.1717997741.e6d590d50f6e.461.7
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:895742b41821b52486dd8476afe46a30078a32b0547b591caa9cb2fc4789694b
|
3 |
+
size 411
|
runs/Jun10_05-35-50_e6d590d50f6e/events.out.tfevents.1717997751.e6d590d50f6e.461.8
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b6f8f70de6a3163db263348d0591e69c1b2549d33da6a43c69bff695f8ce939b
|
3 |
+
size 6066
|
train_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
"epoch": 9.68421052631579,
|
3 |
"total_flos": 7.291573574754632e+17,
|
4 |
-
"train_loss": 0.
|
5 |
-
"train_runtime":
|
6 |
-
"train_samples_per_second": 97.
|
7 |
-
"train_steps_per_second": 0.
|
8 |
}
|
|
|
1 |
{
|
2 |
"epoch": 9.68421052631579,
|
3 |
"total_flos": 7.291573574754632e+17,
|
4 |
+
"train_loss": 0.36177816701971965,
|
5 |
+
"train_runtime": 310.2655,
|
6 |
+
"train_samples_per_second": 97.594,
|
7 |
+
"train_steps_per_second": 0.741
|
8 |
}
|
trainer_state.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"best_metric": 0.
|
3 |
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finalterm/checkpoint-166",
|
4 |
"epoch": 9.68421052631579,
|
5 |
"eval_steps": 500,
|
@@ -10,263 +10,263 @@
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.42105263157894735,
|
13 |
-
"grad_norm":
|
14 |
"learning_rate": 2.173913043478261e-05,
|
15 |
-
"loss": 1.
|
16 |
"step": 10
|
17 |
},
|
18 |
{
|
19 |
"epoch": 0.8421052631578947,
|
20 |
-
"grad_norm":
|
21 |
"learning_rate": 4.347826086956522e-05,
|
22 |
-
"loss": 1.
|
23 |
"step": 20
|
24 |
},
|
25 |
{
|
26 |
"epoch": 0.968421052631579,
|
27 |
-
"eval_accuracy": 0.
|
28 |
-
"eval_loss": 0.
|
29 |
-
"eval_runtime": 3.
|
30 |
-
"eval_samples_per_second":
|
31 |
-
"eval_steps_per_second": 7.
|
32 |
"step": 23
|
33 |
},
|
34 |
{
|
35 |
"epoch": 1.263157894736842,
|
36 |
-
"grad_norm":
|
37 |
"learning_rate": 4.830917874396135e-05,
|
38 |
-
"loss": 0.
|
39 |
"step": 30
|
40 |
},
|
41 |
{
|
42 |
"epoch": 1.6842105263157894,
|
43 |
-
"grad_norm":
|
44 |
"learning_rate": 4.589371980676328e-05,
|
45 |
-
"loss": 0.
|
46 |
"step": 40
|
47 |
},
|
48 |
{
|
49 |
"epoch": 1.9789473684210526,
|
50 |
-
"eval_accuracy": 0.
|
51 |
-
"eval_loss": 0.
|
52 |
-
"eval_runtime": 3.
|
53 |
-
"eval_samples_per_second":
|
54 |
-
"eval_steps_per_second": 7.
|
55 |
"step": 47
|
56 |
},
|
57 |
{
|
58 |
"epoch": 2.1052631578947367,
|
59 |
-
"grad_norm":
|
60 |
"learning_rate": 4.347826086956522e-05,
|
61 |
-
"loss": 0.
|
62 |
"step": 50
|
63 |
},
|
64 |
{
|
65 |
"epoch": 2.526315789473684,
|
66 |
-
"grad_norm":
|
67 |
"learning_rate": 4.106280193236715e-05,
|
68 |
-
"loss": 0.
|
69 |
"step": 60
|
70 |
},
|
71 |
{
|
72 |
"epoch": 2.9473684210526314,
|
73 |
-
"grad_norm":
|
74 |
"learning_rate": 3.864734299516908e-05,
|
75 |
-
"loss": 0.
|
76 |
"step": 70
|
77 |
},
|
78 |
{
|
79 |
"epoch": 2.9894736842105263,
|
80 |
-
"eval_accuracy": 0.
|
81 |
-
"eval_loss": 0.
|
82 |
-
"eval_runtime": 3.
|
83 |
-
"eval_samples_per_second":
|
84 |
-
"eval_steps_per_second": 7.
|
85 |
"step": 71
|
86 |
},
|
87 |
{
|
88 |
"epoch": 3.3684210526315788,
|
89 |
-
"grad_norm":
|
90 |
"learning_rate": 3.6231884057971014e-05,
|
91 |
-
"loss": 0.
|
92 |
"step": 80
|
93 |
},
|
94 |
{
|
95 |
"epoch": 3.7894736842105265,
|
96 |
-
"grad_norm":
|
97 |
"learning_rate": 3.381642512077295e-05,
|
98 |
-
"loss": 0.
|
99 |
"step": 90
|
100 |
},
|
101 |
{
|
102 |
"epoch": 4.0,
|
103 |
-
"eval_accuracy": 0.
|
104 |
-
"eval_loss": 0.
|
105 |
-
"eval_runtime": 3.
|
106 |
-
"eval_samples_per_second":
|
107 |
-
"eval_steps_per_second": 7.
|
108 |
"step": 95
|
109 |
},
|
110 |
{
|
111 |
"epoch": 4.2105263157894735,
|
112 |
-
"grad_norm": 3.
|
113 |
"learning_rate": 3.140096618357488e-05,
|
114 |
-
"loss": 0.
|
115 |
"step": 100
|
116 |
},
|
117 |
{
|
118 |
"epoch": 4.631578947368421,
|
119 |
-
"grad_norm":
|
120 |
"learning_rate": 2.8985507246376814e-05,
|
121 |
-
"loss": 0.
|
122 |
"step": 110
|
123 |
},
|
124 |
{
|
125 |
"epoch": 4.968421052631579,
|
126 |
-
"eval_accuracy": 0.
|
127 |
-
"eval_loss": 0.
|
128 |
-
"eval_runtime": 3.
|
129 |
-
"eval_samples_per_second":
|
130 |
-
"eval_steps_per_second": 7.
|
131 |
"step": 118
|
132 |
},
|
133 |
{
|
134 |
"epoch": 5.052631578947368,
|
135 |
-
"grad_norm":
|
136 |
"learning_rate": 2.6570048309178748e-05,
|
137 |
-
"loss": 0.
|
138 |
"step": 120
|
139 |
},
|
140 |
{
|
141 |
"epoch": 5.473684210526316,
|
142 |
-
"grad_norm":
|
143 |
"learning_rate": 2.4154589371980676e-05,
|
144 |
-
"loss": 0.
|
145 |
"step": 130
|
146 |
},
|
147 |
{
|
148 |
"epoch": 5.894736842105263,
|
149 |
-
"grad_norm": 4.
|
150 |
"learning_rate": 2.173913043478261e-05,
|
151 |
-
"loss": 0.
|
152 |
"step": 140
|
153 |
},
|
154 |
{
|
155 |
"epoch": 5.978947368421053,
|
156 |
-
"eval_accuracy": 0.
|
157 |
-
"eval_loss": 0.
|
158 |
-
"eval_runtime": 3.
|
159 |
-
"eval_samples_per_second": 241.
|
160 |
-
"eval_steps_per_second": 7.
|
161 |
"step": 142
|
162 |
},
|
163 |
{
|
164 |
"epoch": 6.315789473684211,
|
165 |
-
"grad_norm": 4.
|
166 |
"learning_rate": 1.932367149758454e-05,
|
167 |
-
"loss": 0.
|
168 |
"step": 150
|
169 |
},
|
170 |
{
|
171 |
"epoch": 6.7368421052631575,
|
172 |
-
"grad_norm":
|
173 |
"learning_rate": 1.6908212560386476e-05,
|
174 |
-
"loss": 0.
|
175 |
"step": 160
|
176 |
},
|
177 |
{
|
178 |
"epoch": 6.989473684210527,
|
179 |
-
"eval_accuracy": 0.
|
180 |
-
"eval_loss": 0.
|
181 |
-
"eval_runtime": 3.
|
182 |
-
"eval_samples_per_second":
|
183 |
-
"eval_steps_per_second": 7.
|
184 |
"step": 166
|
185 |
},
|
186 |
{
|
187 |
"epoch": 7.157894736842105,
|
188 |
-
"grad_norm":
|
189 |
"learning_rate": 1.4492753623188407e-05,
|
190 |
-
"loss": 0.
|
191 |
"step": 170
|
192 |
},
|
193 |
{
|
194 |
"epoch": 7.578947368421053,
|
195 |
-
"grad_norm": 4.
|
196 |
"learning_rate": 1.2077294685990338e-05,
|
197 |
-
"loss": 0.
|
198 |
"step": 180
|
199 |
},
|
200 |
{
|
201 |
"epoch": 8.0,
|
202 |
-
"grad_norm": 5.
|
203 |
"learning_rate": 9.66183574879227e-06,
|
204 |
-
"loss": 0.
|
205 |
"step": 190
|
206 |
},
|
207 |
{
|
208 |
"epoch": 8.0,
|
209 |
-
"eval_accuracy": 0.
|
210 |
-
"eval_loss": 0.
|
211 |
-
"eval_runtime": 3.
|
212 |
-
"eval_samples_per_second":
|
213 |
-
"eval_steps_per_second": 7.
|
214 |
"step": 190
|
215 |
},
|
216 |
{
|
217 |
"epoch": 8.421052631578947,
|
218 |
-
"grad_norm":
|
219 |
"learning_rate": 7.246376811594203e-06,
|
220 |
-
"loss": 0.
|
221 |
"step": 200
|
222 |
},
|
223 |
{
|
224 |
"epoch": 8.842105263157894,
|
225 |
-
"grad_norm":
|
226 |
"learning_rate": 4.830917874396135e-06,
|
227 |
-
"loss": 0.
|
228 |
"step": 210
|
229 |
},
|
230 |
{
|
231 |
"epoch": 8.968421052631578,
|
232 |
-
"eval_accuracy": 0.
|
233 |
-
"eval_loss": 0.
|
234 |
-
"eval_runtime": 3.
|
235 |
-
"eval_samples_per_second":
|
236 |
-
"eval_steps_per_second": 7.
|
237 |
"step": 213
|
238 |
},
|
239 |
{
|
240 |
"epoch": 9.263157894736842,
|
241 |
-
"grad_norm":
|
242 |
"learning_rate": 2.4154589371980677e-06,
|
243 |
-
"loss": 0.
|
244 |
"step": 220
|
245 |
},
|
246 |
{
|
247 |
"epoch": 9.68421052631579,
|
248 |
-
"grad_norm":
|
249 |
"learning_rate": 0.0,
|
250 |
-
"loss": 0.
|
251 |
"step": 230
|
252 |
},
|
253 |
{
|
254 |
"epoch": 9.68421052631579,
|
255 |
-
"eval_accuracy": 0.
|
256 |
-
"eval_loss": 0.
|
257 |
-
"eval_runtime": 3.
|
258 |
-
"eval_samples_per_second":
|
259 |
-
"eval_steps_per_second": 7.
|
260 |
"step": 230
|
261 |
},
|
262 |
{
|
263 |
"epoch": 9.68421052631579,
|
264 |
"step": 230,
|
265 |
"total_flos": 7.291573574754632e+17,
|
266 |
-
"train_loss": 0.
|
267 |
-
"train_runtime":
|
268 |
-
"train_samples_per_second": 97.
|
269 |
-
"train_steps_per_second": 0.
|
270 |
}
|
271 |
],
|
272 |
"logging_steps": 10,
|
|
|
1 |
{
|
2 |
+
"best_metric": 0.9352708058124174,
|
3 |
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finalterm/checkpoint-166",
|
4 |
"epoch": 9.68421052631579,
|
5 |
"eval_steps": 500,
|
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.42105263157894735,
|
13 |
+
"grad_norm": 6.019707679748535,
|
14 |
"learning_rate": 2.173913043478261e-05,
|
15 |
+
"loss": 1.7902,
|
16 |
"step": 10
|
17 |
},
|
18 |
{
|
19 |
"epoch": 0.8421052631578947,
|
20 |
+
"grad_norm": 5.092822551727295,
|
21 |
"learning_rate": 4.347826086956522e-05,
|
22 |
+
"loss": 1.2727,
|
23 |
"step": 20
|
24 |
},
|
25 |
{
|
26 |
"epoch": 0.968421052631579,
|
27 |
+
"eval_accuracy": 0.8335535006605019,
|
28 |
+
"eval_loss": 0.5535598993301392,
|
29 |
+
"eval_runtime": 3.1512,
|
30 |
+
"eval_samples_per_second": 240.223,
|
31 |
+
"eval_steps_per_second": 7.616,
|
32 |
"step": 23
|
33 |
},
|
34 |
{
|
35 |
"epoch": 1.263157894736842,
|
36 |
+
"grad_norm": 6.4061150550842285,
|
37 |
"learning_rate": 4.830917874396135e-05,
|
38 |
+
"loss": 0.5623,
|
39 |
"step": 30
|
40 |
},
|
41 |
{
|
42 |
"epoch": 1.6842105263157894,
|
43 |
+
"grad_norm": 4.992868900299072,
|
44 |
"learning_rate": 4.589371980676328e-05,
|
45 |
+
"loss": 0.3845,
|
46 |
"step": 40
|
47 |
},
|
48 |
{
|
49 |
"epoch": 1.9789473684210526,
|
50 |
+
"eval_accuracy": 0.9114927344782034,
|
51 |
+
"eval_loss": 0.23858527839183807,
|
52 |
+
"eval_runtime": 3.1195,
|
53 |
+
"eval_samples_per_second": 242.671,
|
54 |
+
"eval_steps_per_second": 7.694,
|
55 |
"step": 47
|
56 |
},
|
57 |
{
|
58 |
"epoch": 2.1052631578947367,
|
59 |
+
"grad_norm": 5.19857120513916,
|
60 |
"learning_rate": 4.347826086956522e-05,
|
61 |
+
"loss": 0.3797,
|
62 |
"step": 50
|
63 |
},
|
64 |
{
|
65 |
"epoch": 2.526315789473684,
|
66 |
+
"grad_norm": 6.890084743499756,
|
67 |
"learning_rate": 4.106280193236715e-05,
|
68 |
+
"loss": 0.3025,
|
69 |
"step": 60
|
70 |
},
|
71 |
{
|
72 |
"epoch": 2.9473684210526314,
|
73 |
+
"grad_norm": 4.1722092628479,
|
74 |
"learning_rate": 3.864734299516908e-05,
|
75 |
+
"loss": 0.2725,
|
76 |
"step": 70
|
77 |
},
|
78 |
{
|
79 |
"epoch": 2.9894736842105263,
|
80 |
+
"eval_accuracy": 0.9233817701453104,
|
81 |
+
"eval_loss": 0.21346069872379303,
|
82 |
+
"eval_runtime": 3.2718,
|
83 |
+
"eval_samples_per_second": 231.37,
|
84 |
+
"eval_steps_per_second": 7.335,
|
85 |
"step": 71
|
86 |
},
|
87 |
{
|
88 |
"epoch": 3.3684210526315788,
|
89 |
+
"grad_norm": 3.6963953971862793,
|
90 |
"learning_rate": 3.6231884057971014e-05,
|
91 |
+
"loss": 0.2404,
|
92 |
"step": 80
|
93 |
},
|
94 |
{
|
95 |
"epoch": 3.7894736842105265,
|
96 |
+
"grad_norm": 4.915622711181641,
|
97 |
"learning_rate": 3.381642512077295e-05,
|
98 |
+
"loss": 0.2442,
|
99 |
"step": 90
|
100 |
},
|
101 |
{
|
102 |
"epoch": 4.0,
|
103 |
+
"eval_accuracy": 0.9075297225891678,
|
104 |
+
"eval_loss": 0.2290719598531723,
|
105 |
+
"eval_runtime": 3.1408,
|
106 |
+
"eval_samples_per_second": 241.02,
|
107 |
+
"eval_steps_per_second": 7.641,
|
108 |
"step": 95
|
109 |
},
|
110 |
{
|
111 |
"epoch": 4.2105263157894735,
|
112 |
+
"grad_norm": 3.525477170944214,
|
113 |
"learning_rate": 3.140096618357488e-05,
|
114 |
+
"loss": 0.265,
|
115 |
"step": 100
|
116 |
},
|
117 |
{
|
118 |
"epoch": 4.631578947368421,
|
119 |
+
"grad_norm": 4.751044750213623,
|
120 |
"learning_rate": 2.8985507246376814e-05,
|
121 |
+
"loss": 0.2097,
|
122 |
"step": 110
|
123 |
},
|
124 |
{
|
125 |
"epoch": 4.968421052631579,
|
126 |
+
"eval_accuracy": 0.9207397622192867,
|
127 |
+
"eval_loss": 0.19642269611358643,
|
128 |
+
"eval_runtime": 3.1107,
|
129 |
+
"eval_samples_per_second": 243.355,
|
130 |
+
"eval_steps_per_second": 7.715,
|
131 |
"step": 118
|
132 |
},
|
133 |
{
|
134 |
"epoch": 5.052631578947368,
|
135 |
+
"grad_norm": 6.279447078704834,
|
136 |
"learning_rate": 2.6570048309178748e-05,
|
137 |
+
"loss": 0.2359,
|
138 |
"step": 120
|
139 |
},
|
140 |
{
|
141 |
"epoch": 5.473684210526316,
|
142 |
+
"grad_norm": 5.107997894287109,
|
143 |
"learning_rate": 2.4154589371980676e-05,
|
144 |
+
"loss": 0.2293,
|
145 |
"step": 130
|
146 |
},
|
147 |
{
|
148 |
"epoch": 5.894736842105263,
|
149 |
+
"grad_norm": 4.217101097106934,
|
150 |
"learning_rate": 2.173913043478261e-05,
|
151 |
+
"loss": 0.2237,
|
152 |
"step": 140
|
153 |
},
|
154 |
{
|
155 |
"epoch": 5.978947368421053,
|
156 |
+
"eval_accuracy": 0.9286657859973579,
|
157 |
+
"eval_loss": 0.19202813506126404,
|
158 |
+
"eval_runtime": 3.1318,
|
159 |
+
"eval_samples_per_second": 241.714,
|
160 |
+
"eval_steps_per_second": 7.663,
|
161 |
"step": 142
|
162 |
},
|
163 |
{
|
164 |
"epoch": 6.315789473684211,
|
165 |
+
"grad_norm": 4.676567077636719,
|
166 |
"learning_rate": 1.932367149758454e-05,
|
167 |
+
"loss": 0.1789,
|
168 |
"step": 150
|
169 |
},
|
170 |
{
|
171 |
"epoch": 6.7368421052631575,
|
172 |
+
"grad_norm": 4.55054235458374,
|
173 |
"learning_rate": 1.6908212560386476e-05,
|
174 |
+
"loss": 0.2199,
|
175 |
"step": 160
|
176 |
},
|
177 |
{
|
178 |
"epoch": 6.989473684210527,
|
179 |
+
"eval_accuracy": 0.9352708058124174,
|
180 |
+
"eval_loss": 0.1843554824590683,
|
181 |
+
"eval_runtime": 3.1189,
|
182 |
+
"eval_samples_per_second": 242.711,
|
183 |
+
"eval_steps_per_second": 7.695,
|
184 |
"step": 166
|
185 |
},
|
186 |
{
|
187 |
"epoch": 7.157894736842105,
|
188 |
+
"grad_norm": 4.944462776184082,
|
189 |
"learning_rate": 1.4492753623188407e-05,
|
190 |
+
"loss": 0.2016,
|
191 |
"step": 170
|
192 |
},
|
193 |
{
|
194 |
"epoch": 7.578947368421053,
|
195 |
+
"grad_norm": 4.52495813369751,
|
196 |
"learning_rate": 1.2077294685990338e-05,
|
197 |
+
"loss": 0.1954,
|
198 |
"step": 180
|
199 |
},
|
200 |
{
|
201 |
"epoch": 8.0,
|
202 |
+
"grad_norm": 5.276759624481201,
|
203 |
"learning_rate": 9.66183574879227e-06,
|
204 |
+
"loss": 0.2209,
|
205 |
"step": 190
|
206 |
},
|
207 |
{
|
208 |
"epoch": 8.0,
|
209 |
+
"eval_accuracy": 0.9273447820343461,
|
210 |
+
"eval_loss": 0.18566328287124634,
|
211 |
+
"eval_runtime": 3.1379,
|
212 |
+
"eval_samples_per_second": 241.243,
|
213 |
+
"eval_steps_per_second": 7.648,
|
214 |
"step": 190
|
215 |
},
|
216 |
{
|
217 |
"epoch": 8.421052631578947,
|
218 |
+
"grad_norm": 4.787791728973389,
|
219 |
"learning_rate": 7.246376811594203e-06,
|
220 |
+
"loss": 0.1914,
|
221 |
"step": 200
|
222 |
},
|
223 |
{
|
224 |
"epoch": 8.842105263157894,
|
225 |
+
"grad_norm": 4.165464401245117,
|
226 |
"learning_rate": 4.830917874396135e-06,
|
227 |
+
"loss": 0.1717,
|
228 |
"step": 210
|
229 |
},
|
230 |
{
|
231 |
"epoch": 8.968421052631578,
|
232 |
+
"eval_accuracy": 0.9313077939233818,
|
233 |
+
"eval_loss": 0.18422812223434448,
|
234 |
+
"eval_runtime": 3.1181,
|
235 |
+
"eval_samples_per_second": 242.779,
|
236 |
+
"eval_steps_per_second": 7.697,
|
237 |
"step": 213
|
238 |
},
|
239 |
{
|
240 |
"epoch": 9.263157894736842,
|
241 |
+
"grad_norm": 2.85178279876709,
|
242 |
"learning_rate": 2.4154589371980677e-06,
|
243 |
+
"loss": 0.1532,
|
244 |
"step": 220
|
245 |
},
|
246 |
{
|
247 |
"epoch": 9.68421052631579,
|
248 |
+
"grad_norm": 4.912642478942871,
|
249 |
"learning_rate": 0.0,
|
250 |
+
"loss": 0.1754,
|
251 |
"step": 230
|
252 |
},
|
253 |
{
|
254 |
"epoch": 9.68421052631579,
|
255 |
+
"eval_accuracy": 0.9313077939233818,
|
256 |
+
"eval_loss": 0.18370747566223145,
|
257 |
+
"eval_runtime": 3.1098,
|
258 |
+
"eval_samples_per_second": 243.427,
|
259 |
+
"eval_steps_per_second": 7.718,
|
260 |
"step": 230
|
261 |
},
|
262 |
{
|
263 |
"epoch": 9.68421052631579,
|
264 |
"step": 230,
|
265 |
"total_flos": 7.291573574754632e+17,
|
266 |
+
"train_loss": 0.36177816701971965,
|
267 |
+
"train_runtime": 310.2655,
|
268 |
+
"train_samples_per_second": 97.594,
|
269 |
+
"train_steps_per_second": 0.741
|
270 |
}
|
271 |
],
|
272 |
"logging_steps": 10,
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5176
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94ef88ce9caefe0cc757b7eb234413ab26e67b39e3200d8c43ac067a6246a1d2
|
3 |
size 5176
|