jialicheng commited on
Commit
8e9afeb
·
verified ·
1 Parent(s): 393647d

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: bert-large-uncased
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ model-index:
9
+ - name: bert-large
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # bert-large
17
+
18
+ This model is a fine-tuned version of [bert-large-uncased](https://huggingface.co/bert-large-uncased) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.9621
21
+ - Accuracy: 0.8887
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 5e-05
41
+ - train_batch_size: 32
42
+ - eval_batch_size: 256
43
+ - seed: 42
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: linear
46
+ - num_epochs: 20
47
+
48
+ ### Training results
49
+
50
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
51
+ |:-------------:|:-----:|:-----:|:---------------:|:--------:|
52
+ | No log | 1.0 | 782 | 0.2848 | 0.8852 |
53
+ | 0.3133 | 2.0 | 1564 | 0.3038 | 0.8888 |
54
+ | 0.1751 | 3.0 | 2346 | 0.5035 | 0.8791 |
55
+ | 0.1057 | 4.0 | 3128 | 0.5942 | 0.885 |
56
+ | 0.1057 | 5.0 | 3910 | 0.5220 | 0.8764 |
57
+ | 0.0733 | 6.0 | 4692 | 0.6981 | 0.8823 |
58
+ | 0.0439 | 7.0 | 5474 | 0.6775 | 0.8833 |
59
+ | 0.0371 | 8.0 | 6256 | 0.6118 | 0.8891 |
60
+ | 0.0277 | 9.0 | 7038 | 0.7128 | 0.8864 |
61
+ | 0.0277 | 10.0 | 7820 | 0.7555 | 0.8868 |
62
+ | 0.0202 | 11.0 | 8602 | 0.7618 | 0.8888 |
63
+ | 0.0141 | 12.0 | 9384 | 0.7654 | 0.8842 |
64
+ | 0.0125 | 13.0 | 10166 | 0.8345 | 0.8867 |
65
+ | 0.0125 | 14.0 | 10948 | 0.8073 | 0.8844 |
66
+ | 0.0077 | 15.0 | 11730 | 0.7047 | 0.8887 |
67
+ | 0.0071 | 16.0 | 12512 | 0.8622 | 0.8891 |
68
+ | 0.004 | 17.0 | 13294 | 0.8655 | 0.8900 |
69
+ | 0.0031 | 18.0 | 14076 | 0.9096 | 0.8898 |
70
+ | 0.0031 | 19.0 | 14858 | 0.9454 | 0.8892 |
71
+ | 0.0016 | 20.0 | 15640 | 0.9621 | 0.8887 |
72
+
73
+
74
+ ### Framework versions
75
+
76
+ - Transformers 4.37.2
77
+ - Pytorch 2.3.0+cu121
78
+ - Datasets 2.19.0
79
+ - Tokenizers 0.15.2
all_results.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dr_accuracy": 0.9998,
3
+ "dr_loss": 0.0014581895666196942,
4
+ "dr_runtime": 108.3951,
5
+ "dr_samples_per_second": 230.638,
6
+ "dr_steps_per_second": 0.904,
7
+ "epoch": 20.0,
8
+ "ood_accuracy": 0.8211404989682987,
9
+ "ood_loss": 1.3243283033370972,
10
+ "ood_runtime": 46.2772,
11
+ "ood_samples_per_second": 230.394,
12
+ "ood_steps_per_second": 0.908,
13
+ "test_accuracy": 0.89004,
14
+ "test_loss": 0.8655158877372742,
15
+ "test_runtime": 107.9984,
16
+ "test_samples_per_second": 231.485,
17
+ "test_steps_per_second": 0.907,
18
+ "train_loss": 0.054148883717444245,
19
+ "train_runtime": 8090.5095,
20
+ "train_samples_per_second": 61.801,
21
+ "train_steps_per_second": 1.933
22
+ }
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-large-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "finetuning_task": "text-classification",
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "label2id": {
16
+ "0": 0,
17
+ "1": 1
18
+ },
19
+ "layer_norm_eps": 1e-12,
20
+ "max_position_embeddings": 512,
21
+ "model_type": "bert",
22
+ "num_attention_heads": 16,
23
+ "num_hidden_layers": 24,
24
+ "pad_token_id": 0,
25
+ "position_embedding_type": "absolute",
26
+ "problem_type": "single_label_classification",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.37.2",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 30522
32
+ }
dr_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dr_accuracy": 0.9998,
3
+ "dr_loss": 0.0014581895666196942,
4
+ "dr_runtime": 108.3951,
5
+ "dr_samples_per_second": 230.638,
6
+ "dr_steps_per_second": 0.904,
7
+ "epoch": 20.0
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb153cf5450d8a6135b5d4f4634a641bb5548791b647b7893299cafa3f83c06f
3
+ size 1340622760
ood_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "ood_accuracy": 0.8211404989682987,
4
+ "ood_loss": 1.3243283033370972,
5
+ "ood_runtime": 46.2772,
6
+ "ood_samples_per_second": 230.394,
7
+ "ood_steps_per_second": 0.908
8
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
test_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "test_accuracy": 0.89004,
4
+ "test_loss": 0.8655158877372742,
5
+ "test_runtime": 107.9984,
6
+ "test_samples_per_second": 231.485,
7
+ "test_steps_per_second": 0.907
8
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "train_loss": 0.054148883717444245,
4
+ "train_runtime": 8090.5095,
5
+ "train_samples_per_second": 61.801,
6
+ "train_steps_per_second": 1.933
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.89004,
3
+ "best_model_checkpoint": "../../checkpoint/imdb/bert-large/checkpoint-13294",
4
+ "epoch": 20.0,
5
+ "eval_steps": 500,
6
+ "global_step": 15640,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.88516,
14
+ "eval_loss": 0.2848346531391144,
15
+ "eval_runtime": 108.2274,
16
+ "eval_samples_per_second": 230.995,
17
+ "eval_steps_per_second": 0.906,
18
+ "step": 782
19
+ },
20
+ {
21
+ "epoch": 1.28,
22
+ "learning_rate": 4.680306905370844e-05,
23
+ "loss": 0.3133,
24
+ "step": 1000
25
+ },
26
+ {
27
+ "epoch": 2.0,
28
+ "eval_accuracy": 0.88884,
29
+ "eval_loss": 0.30378255248069763,
30
+ "eval_runtime": 107.9566,
31
+ "eval_samples_per_second": 231.574,
32
+ "eval_steps_per_second": 0.908,
33
+ "step": 1564
34
+ },
35
+ {
36
+ "epoch": 2.56,
37
+ "learning_rate": 4.360613810741688e-05,
38
+ "loss": 0.1751,
39
+ "step": 2000
40
+ },
41
+ {
42
+ "epoch": 3.0,
43
+ "eval_accuracy": 0.87908,
44
+ "eval_loss": 0.5034909248352051,
45
+ "eval_runtime": 108.1984,
46
+ "eval_samples_per_second": 231.057,
47
+ "eval_steps_per_second": 0.906,
48
+ "step": 2346
49
+ },
50
+ {
51
+ "epoch": 3.84,
52
+ "learning_rate": 4.040920716112532e-05,
53
+ "loss": 0.1057,
54
+ "step": 3000
55
+ },
56
+ {
57
+ "epoch": 4.0,
58
+ "eval_accuracy": 0.885,
59
+ "eval_loss": 0.5942110419273376,
60
+ "eval_runtime": 108.005,
61
+ "eval_samples_per_second": 231.471,
62
+ "eval_steps_per_second": 0.907,
63
+ "step": 3128
64
+ },
65
+ {
66
+ "epoch": 5.0,
67
+ "eval_accuracy": 0.87644,
68
+ "eval_loss": 0.5219671726226807,
69
+ "eval_runtime": 107.7329,
70
+ "eval_samples_per_second": 232.055,
71
+ "eval_steps_per_second": 0.91,
72
+ "step": 3910
73
+ },
74
+ {
75
+ "epoch": 5.12,
76
+ "learning_rate": 3.721227621483376e-05,
77
+ "loss": 0.0733,
78
+ "step": 4000
79
+ },
80
+ {
81
+ "epoch": 6.0,
82
+ "eval_accuracy": 0.88228,
83
+ "eval_loss": 0.6980640888214111,
84
+ "eval_runtime": 107.7427,
85
+ "eval_samples_per_second": 232.034,
86
+ "eval_steps_per_second": 0.91,
87
+ "step": 4692
88
+ },
89
+ {
90
+ "epoch": 6.39,
91
+ "learning_rate": 3.40153452685422e-05,
92
+ "loss": 0.0439,
93
+ "step": 5000
94
+ },
95
+ {
96
+ "epoch": 7.0,
97
+ "eval_accuracy": 0.88328,
98
+ "eval_loss": 0.6775364279747009,
99
+ "eval_runtime": 107.8186,
100
+ "eval_samples_per_second": 231.871,
101
+ "eval_steps_per_second": 0.909,
102
+ "step": 5474
103
+ },
104
+ {
105
+ "epoch": 7.67,
106
+ "learning_rate": 3.081841432225064e-05,
107
+ "loss": 0.0371,
108
+ "step": 6000
109
+ },
110
+ {
111
+ "epoch": 8.0,
112
+ "eval_accuracy": 0.88908,
113
+ "eval_loss": 0.6118285059928894,
114
+ "eval_runtime": 108.1846,
115
+ "eval_samples_per_second": 231.086,
116
+ "eval_steps_per_second": 0.906,
117
+ "step": 6256
118
+ },
119
+ {
120
+ "epoch": 8.95,
121
+ "learning_rate": 2.7621483375959077e-05,
122
+ "loss": 0.0277,
123
+ "step": 7000
124
+ },
125
+ {
126
+ "epoch": 9.0,
127
+ "eval_accuracy": 0.88644,
128
+ "eval_loss": 0.7127733826637268,
129
+ "eval_runtime": 108.2656,
130
+ "eval_samples_per_second": 230.914,
131
+ "eval_steps_per_second": 0.905,
132
+ "step": 7038
133
+ },
134
+ {
135
+ "epoch": 10.0,
136
+ "eval_accuracy": 0.8868,
137
+ "eval_loss": 0.7554897665977478,
138
+ "eval_runtime": 108.2355,
139
+ "eval_samples_per_second": 230.978,
140
+ "eval_steps_per_second": 0.905,
141
+ "step": 7820
142
+ },
143
+ {
144
+ "epoch": 10.23,
145
+ "learning_rate": 2.442455242966752e-05,
146
+ "loss": 0.0202,
147
+ "step": 8000
148
+ },
149
+ {
150
+ "epoch": 11.0,
151
+ "eval_accuracy": 0.88876,
152
+ "eval_loss": 0.7617989182472229,
153
+ "eval_runtime": 107.7019,
154
+ "eval_samples_per_second": 232.122,
155
+ "eval_steps_per_second": 0.91,
156
+ "step": 8602
157
+ },
158
+ {
159
+ "epoch": 11.51,
160
+ "learning_rate": 2.122762148337596e-05,
161
+ "loss": 0.0141,
162
+ "step": 9000
163
+ },
164
+ {
165
+ "epoch": 12.0,
166
+ "eval_accuracy": 0.88424,
167
+ "eval_loss": 0.765361487865448,
168
+ "eval_runtime": 108.098,
169
+ "eval_samples_per_second": 231.272,
170
+ "eval_steps_per_second": 0.907,
171
+ "step": 9384
172
+ },
173
+ {
174
+ "epoch": 12.79,
175
+ "learning_rate": 1.80306905370844e-05,
176
+ "loss": 0.0125,
177
+ "step": 10000
178
+ },
179
+ {
180
+ "epoch": 13.0,
181
+ "eval_accuracy": 0.88668,
182
+ "eval_loss": 0.834495484828949,
183
+ "eval_runtime": 108.2866,
184
+ "eval_samples_per_second": 230.869,
185
+ "eval_steps_per_second": 0.905,
186
+ "step": 10166
187
+ },
188
+ {
189
+ "epoch": 14.0,
190
+ "eval_accuracy": 0.8844,
191
+ "eval_loss": 0.8072969317436218,
192
+ "eval_runtime": 107.9547,
193
+ "eval_samples_per_second": 231.579,
194
+ "eval_steps_per_second": 0.908,
195
+ "step": 10948
196
+ },
197
+ {
198
+ "epoch": 14.07,
199
+ "learning_rate": 1.483375959079284e-05,
200
+ "loss": 0.0077,
201
+ "step": 11000
202
+ },
203
+ {
204
+ "epoch": 15.0,
205
+ "eval_accuracy": 0.88872,
206
+ "eval_loss": 0.7046570181846619,
207
+ "eval_runtime": 108.2002,
208
+ "eval_samples_per_second": 231.053,
209
+ "eval_steps_per_second": 0.906,
210
+ "step": 11730
211
+ },
212
+ {
213
+ "epoch": 15.35,
214
+ "learning_rate": 1.163682864450128e-05,
215
+ "loss": 0.0071,
216
+ "step": 12000
217
+ },
218
+ {
219
+ "epoch": 16.0,
220
+ "eval_accuracy": 0.88908,
221
+ "eval_loss": 0.8622098565101624,
222
+ "eval_runtime": 108.34,
223
+ "eval_samples_per_second": 230.755,
224
+ "eval_steps_per_second": 0.905,
225
+ "step": 12512
226
+ },
227
+ {
228
+ "epoch": 16.62,
229
+ "learning_rate": 8.439897698209718e-06,
230
+ "loss": 0.004,
231
+ "step": 13000
232
+ },
233
+ {
234
+ "epoch": 17.0,
235
+ "eval_accuracy": 0.89004,
236
+ "eval_loss": 0.8655158877372742,
237
+ "eval_runtime": 108.4486,
238
+ "eval_samples_per_second": 230.524,
239
+ "eval_steps_per_second": 0.904,
240
+ "step": 13294
241
+ },
242
+ {
243
+ "epoch": 17.9,
244
+ "learning_rate": 5.242966751918159e-06,
245
+ "loss": 0.0031,
246
+ "step": 14000
247
+ },
248
+ {
249
+ "epoch": 18.0,
250
+ "eval_accuracy": 0.88984,
251
+ "eval_loss": 0.9096148610115051,
252
+ "eval_runtime": 108.8181,
253
+ "eval_samples_per_second": 229.741,
254
+ "eval_steps_per_second": 0.901,
255
+ "step": 14076
256
+ },
257
+ {
258
+ "epoch": 19.0,
259
+ "eval_accuracy": 0.88916,
260
+ "eval_loss": 0.945423424243927,
261
+ "eval_runtime": 108.3869,
262
+ "eval_samples_per_second": 230.655,
263
+ "eval_steps_per_second": 0.904,
264
+ "step": 14858
265
+ },
266
+ {
267
+ "epoch": 19.18,
268
+ "learning_rate": 2.0460358056265987e-06,
269
+ "loss": 0.0016,
270
+ "step": 15000
271
+ },
272
+ {
273
+ "epoch": 20.0,
274
+ "eval_accuracy": 0.88868,
275
+ "eval_loss": 0.9621471762657166,
276
+ "eval_runtime": 108.2519,
277
+ "eval_samples_per_second": 230.943,
278
+ "eval_steps_per_second": 0.905,
279
+ "step": 15640
280
+ },
281
+ {
282
+ "epoch": 20.0,
283
+ "step": 15640,
284
+ "total_flos": 1.16491420416e+17,
285
+ "train_loss": 0.054148883717444245,
286
+ "train_runtime": 8090.5095,
287
+ "train_samples_per_second": 61.801,
288
+ "train_steps_per_second": 1.933
289
+ }
290
+ ],
291
+ "logging_steps": 1000,
292
+ "max_steps": 15640,
293
+ "num_input_tokens_seen": 0,
294
+ "num_train_epochs": 20,
295
+ "save_steps": 500,
296
+ "total_flos": 1.16491420416e+17,
297
+ "train_batch_size": 32,
298
+ "trial_name": null,
299
+ "trial_params": null
300
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1de0b7c4ca4c0094bcc2661050e55092639b324d4b6e9ad4f0dbd5c8dd088a2a
3
+ size 4792
vocab.txt ADDED
The diff for this file is too large to render. See raw diff