diagonalge commited on
Commit
f8940a1
·
verified ·
1 Parent(s): 0bff494

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:093ea47c410b0a745b8e93693ed47be4d6992211962e673eb93e4686c8651607
3
  size 101752088
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:492482922737f774b1c554ae6b0df707f45da2ec1fea952f8d3cdc663975eaf9
3
  size 101752088
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:831d62fc929998f1d4d1b816364318aa8561a879a882fb1e8697e381cc4a0ee4
3
  size 52046596
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c7677d437b82578dbb78df01b1f16713302d84d2d5beb0bcf6fc95926b4f2e6
3
  size 52046596
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:13938e18b9d99cf7006feae297577a361a1a7e3a9e73718318c8b0eb4669c15e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49762064c622a61f788bab27148eaa124166994071a98ad304aeda08b72320f4
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79fd9663c22308e7cda458f2f27a3161480f323121be11ab10f5e1ea3f30fc6d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0012587324564163887,
5
  "eval_steps": 25,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -85,6 +85,76 @@
85
  "learning_rate": 0.0002,
86
  "loss": 3.3169,
87
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  }
89
  ],
90
  "logging_steps": 1,
@@ -104,7 +174,7 @@
104
  "attributes": {}
105
  }
106
  },
107
- "total_flos": 6585384016281600.0,
108
  "train_batch_size": 2,
109
  "trial_name": null,
110
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0025174649128327773,
5
  "eval_steps": 25,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
85
  "learning_rate": 0.0002,
86
  "loss": 3.3169,
87
  "step": 10
88
+ },
89
+ {
90
+ "epoch": 0.0013846057020580275,
91
+ "grad_norm": 21.377635955810547,
92
+ "learning_rate": 0.0001999390827019096,
93
+ "loss": 1.0058,
94
+ "step": 11
95
+ },
96
+ {
97
+ "epoch": 0.0015104789476996663,
98
+ "grad_norm": 11.985687255859375,
99
+ "learning_rate": 0.00019975640502598244,
100
+ "loss": 0.8173,
101
+ "step": 12
102
+ },
103
+ {
104
+ "epoch": 0.0016363521933413054,
105
+ "grad_norm": 17.24648666381836,
106
+ "learning_rate": 0.00019945218953682734,
107
+ "loss": 0.3288,
108
+ "step": 13
109
+ },
110
+ {
111
+ "epoch": 0.0017622254389829443,
112
+ "grad_norm": 38.9212532043457,
113
+ "learning_rate": 0.00019902680687415705,
114
+ "loss": 1.7752,
115
+ "step": 14
116
+ },
117
+ {
118
+ "epoch": 0.001888098684624583,
119
+ "grad_norm": 12.58133316040039,
120
+ "learning_rate": 0.00019848077530122083,
121
+ "loss": 1.7308,
122
+ "step": 15
123
+ },
124
+ {
125
+ "epoch": 0.002013971930266222,
126
+ "grad_norm": 5.521592617034912,
127
+ "learning_rate": 0.00019781476007338058,
128
+ "loss": 0.1023,
129
+ "step": 16
130
+ },
131
+ {
132
+ "epoch": 0.002139845175907861,
133
+ "grad_norm": 1.7692532539367676,
134
+ "learning_rate": 0.00019702957262759965,
135
+ "loss": 0.0166,
136
+ "step": 17
137
+ },
138
+ {
139
+ "epoch": 0.0022657184215494996,
140
+ "grad_norm": 9.756790161132812,
141
+ "learning_rate": 0.0001961261695938319,
142
+ "loss": 1.1932,
143
+ "step": 18
144
+ },
145
+ {
146
+ "epoch": 0.0023915916671911385,
147
+ "grad_norm": 8.156234741210938,
148
+ "learning_rate": 0.00019510565162951537,
149
+ "loss": 0.3879,
150
+ "step": 19
151
+ },
152
+ {
153
+ "epoch": 0.0025174649128327773,
154
+ "grad_norm": 6.355808258056641,
155
+ "learning_rate": 0.00019396926207859084,
156
+ "loss": 0.4888,
157
+ "step": 20
158
  }
159
  ],
160
  "logging_steps": 1,
 
174
  "attributes": {}
175
  }
176
  },
177
+ "total_flos": 1.31707680325632e+16,
178
  "train_batch_size": 2,
179
  "trial_name": null,
180
  "trial_params": null