dandoune commited on
Commit
884e923
·
verified ·
1 Parent(s): 815789b

End of training

Browse files
Files changed (4) hide show
  1. README.md +139 -139
  2. model.safetensors +1 -1
  3. tokenizer.json +2 -14
  4. training_args.bin +1 -1
README.md CHANGED
@@ -21,21 +21,21 @@ should probably proofread and complete it, then remove this comment. -->
21
 
22
  This model is a fine-tuned version of [nlpaueb/legal-bert-base-uncased](https://huggingface.co/nlpaueb/legal-bert-base-uncased) on the None dataset.
23
  It achieves the following results on the evaluation set:
24
- - Loss: 0.0887
25
- - Accuracy: 0.9845
26
- - Precision: 0.9232
27
- - Recall: 0.9286
28
- - F1: 0.9259
29
  - Classification Report: precision recall f1-score support
30
 
31
  LOC 0.94 0.96 0.95 1837
32
- MISC 0.87 0.87 0.87 922
33
- ORG 0.89 0.88 0.89 1341
34
- PER 0.95 0.96 0.96 1842
35
 
36
- micro avg 0.92 0.93 0.93 5942
37
- macro avg 0.91 0.92 0.92 5942
38
- weighted avg 0.92 0.93 0.93 5942
39
 
40
 
41
  ## Model description
@@ -68,209 +68,209 @@ The following hyperparameters were used during training:
68
 
69
  | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | Classification Report |
70
  |:-------------:|:------:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
71
- | 0.1403 | 0.2668 | 500 | 0.1145 | 0.9685 | 0.8296 | 0.8334 | 0.8315 | precision recall f1-score support
72
 
73
- LOC 0.85 0.93 0.89 1837
74
- MISC 0.72 0.79 0.75 922
75
- ORG 0.81 0.57 0.67 1341
76
- PER 0.87 0.95 0.91 1842
77
 
78
- micro avg 0.83 0.83 0.83 5942
79
- macro avg 0.81 0.81 0.80 5942
80
- weighted avg 0.83 0.83 0.82 5942
81
  |
82
- | 0.0978 | 0.5336 | 1000 | 0.1046 | 0.9734 | 0.8630 | 0.8679 | 0.8654 | precision recall f1-score support
83
 
84
- LOC 0.85 0.94 0.89 1837
85
- MISC 0.89 0.69 0.78 922
86
- ORG 0.78 0.77 0.77 1341
87
- PER 0.92 0.96 0.94 1842
88
 
89
- micro avg 0.86 0.87 0.87 5942
90
  macro avg 0.86 0.84 0.85 5942
91
  weighted avg 0.86 0.87 0.86 5942
92
  |
93
- | 0.1052 | 0.8004 | 1500 | 0.0852 | 0.9770 | 0.8901 | 0.8782 | 0.8841 | precision recall f1-score support
94
 
95
- LOC 0.94 0.91 0.92 1837
96
- MISC 0.85 0.77 0.81 922
97
- ORG 0.77 0.86 0.82 1341
98
- PER 0.96 0.91 0.93 1842
99
 
100
  micro avg 0.89 0.88 0.88 5942
101
- macro avg 0.88 0.86 0.87 5942
102
  weighted avg 0.89 0.88 0.89 5942
103
  |
104
- | 0.0532 | 1.0672 | 2000 | 0.0779 | 0.9810 | 0.9041 | 0.9091 | 0.9066 | precision recall f1-score support
105
 
106
  LOC 0.93 0.95 0.94 1837
107
- MISC 0.83 0.80 0.81 922
108
- ORG 0.85 0.86 0.86 1341
109
- PER 0.95 0.96 0.95 1842
 
 
 
 
 
 
 
 
 
 
 
110
 
111
  micro avg 0.90 0.91 0.91 5942
112
  macro avg 0.89 0.89 0.89 5942
113
  weighted avg 0.90 0.91 0.91 5942
114
  |
115
- | 0.0507 | 1.3340 | 2500 | 0.0739 | 0.9819 | 0.9094 | 0.9073 | 0.9083 | precision recall f1-score support
116
 
117
- LOC 0.95 0.93 0.94 1837
118
- MISC 0.84 0.86 0.85 922
119
- ORG 0.87 0.83 0.85 1341
120
- PER 0.93 0.96 0.95 1842
 
 
 
 
 
 
 
 
 
 
 
121
 
122
  micro avg 0.91 0.91 0.91 5942
123
  macro avg 0.90 0.90 0.90 5942
124
  weighted avg 0.91 0.91 0.91 5942
125
  |
126
- | 0.0451 | 1.6009 | 3000 | 0.0816 | 0.9791 | 0.8883 | 0.9022 | 0.8952 | precision recall f1-score support
127
 
128
- LOC 0.92 0.94 0.93 1837
129
- MISC 0.81 0.81 0.81 922
130
- ORG 0.80 0.88 0.84 1341
131
- PER 0.96 0.93 0.95 1842
132
 
133
- micro avg 0.89 0.90 0.90 5942
134
- macro avg 0.88 0.89 0.88 5942
135
- weighted avg 0.89 0.90 0.90 5942
136
  |
137
- | 0.0397 | 1.8677 | 3500 | 0.0755 | 0.9812 | 0.9033 | 0.9135 | 0.9084 | precision recall f1-score support
138
 
139
- LOC 0.90 0.96 0.93 1837
140
- MISC 0.83 0.86 0.85 922
141
- ORG 0.88 0.83 0.86 1341
142
- PER 0.95 0.95 0.95 1842
143
 
144
- micro avg 0.90 0.91 0.91 5942
145
- macro avg 0.89 0.90 0.90 5942
146
- weighted avg 0.90 0.91 0.91 5942
147
  |
148
- | 0.0211 | 2.1345 | 4000 | 0.0895 | 0.9814 | 0.9173 | 0.9130 | 0.9151 | precision recall f1-score support
149
 
150
  LOC 0.92 0.96 0.94 1837
151
- MISC 0.88 0.83 0.86 922
152
- ORG 0.88 0.86 0.87 1341
153
- PER 0.96 0.95 0.95 1842
154
 
155
- micro avg 0.92 0.91 0.92 5942
156
- macro avg 0.91 0.90 0.90 5942
157
- weighted avg 0.92 0.91 0.91 5942
158
  |
159
- | 0.0224 | 2.4013 | 4500 | 0.0840 | 0.9815 | 0.9005 | 0.9110 | 0.9057 | precision recall f1-score support
160
 
161
- LOC 0.92 0.95 0.93 1837
162
  MISC 0.88 0.84 0.86 922
163
- ORG 0.82 0.88 0.85 1341
164
- PER 0.96 0.93 0.94 1842
165
 
166
- micro avg 0.90 0.91 0.91 5942
167
- macro avg 0.89 0.90 0.90 5942
168
- weighted avg 0.90 0.91 0.91 5942
169
  |
170
- | 0.0285 | 2.6681 | 5000 | 0.0770 | 0.9823 | 0.9143 | 0.9157 | 0.9150 | precision recall f1-score support
171
 
172
- LOC 0.93 0.94 0.94 1837
173
- MISC 0.89 0.83 0.86 922
174
  ORG 0.86 0.87 0.87 1341
175
- PER 0.94 0.97 0.95 1842
176
 
177
- micro avg 0.91 0.92 0.91 5942
178
- macro avg 0.91 0.90 0.90 5942
179
- weighted avg 0.91 0.92 0.91 5942
180
  |
181
- | 0.0256 | 2.9349 | 5500 | 0.0779 | 0.9840 | 0.9260 | 0.9244 | 0.9252 | precision recall f1-score support
182
 
183
- LOC 0.94 0.95 0.94 1837
184
- MISC 0.89 0.86 0.87 922
185
- ORG 0.90 0.87 0.89 1341
186
- PER 0.95 0.97 0.96 1842
187
 
188
- micro avg 0.93 0.92 0.93 5942
189
- macro avg 0.92 0.91 0.92 5942
190
- weighted avg 0.93 0.92 0.92 5942
191
  |
192
- | 0.0135 | 3.2017 | 6000 | 0.0878 | 0.9831 | 0.9111 | 0.9229 | 0.9170 | precision recall f1-score support
193
 
194
- LOC 0.93 0.95 0.94 1837
195
- MISC 0.82 0.87 0.84 922
196
- ORG 0.89 0.86 0.87 1341
197
  PER 0.95 0.97 0.96 1842
198
 
199
- micro avg 0.91 0.92 0.92 5942
200
- macro avg 0.90 0.91 0.90 5942
201
- weighted avg 0.91 0.92 0.92 5942
202
- |
203
- | 0.0112 | 3.4685 | 6500 | 0.0845 | 0.9835 | 0.9131 | 0.9265 | 0.9197 | precision recall f1-score support
204
-
205
- LOC 0.93 0.96 0.94 1837
206
- MISC 0.85 0.88 0.86 922
207
- ORG 0.87 0.88 0.87 1341
208
- PER 0.96 0.96 0.96 1842
209
-
210
- micro avg 0.91 0.93 0.92 5942
211
- macro avg 0.90 0.92 0.91 5942
212
- weighted avg 0.91 0.93 0.92 5942
213
  |
214
- | 0.0119 | 3.7353 | 7000 | 0.0901 | 0.9837 | 0.9230 | 0.9219 | 0.9225 | precision recall f1-score support
215
 
216
- LOC 0.94 0.96 0.95 1837
217
- MISC 0.90 0.83 0.86 922
218
- ORG 0.88 0.88 0.88 1341
219
  PER 0.95 0.96 0.96 1842
220
 
221
- micro avg 0.92 0.92 0.92 5942
222
  macro avg 0.92 0.91 0.91 5942
223
- weighted avg 0.92 0.92 0.92 5942
224
  |
225
- | 0.0132 | 4.0021 | 7500 | 0.0902 | 0.9843 | 0.9262 | 0.9248 | 0.9255 | precision recall f1-score support
226
 
227
- LOC 0.93 0.96 0.95 1837
228
- MISC 0.89 0.86 0.87 922
229
- ORG 0.91 0.87 0.89 1341
230
  PER 0.95 0.96 0.96 1842
231
 
232
- micro avg 0.93 0.92 0.93 5942
233
- macro avg 0.92 0.91 0.92 5942
234
- weighted avg 0.93 0.92 0.93 5942
235
  |
236
- | 0.006 | 4.2689 | 8000 | 0.0914 | 0.9844 | 0.9233 | 0.9273 | 0.9253 | precision recall f1-score support
237
 
238
  LOC 0.94 0.96 0.95 1837
239
- MISC 0.88 0.86 0.87 922
240
- ORG 0.88 0.89 0.88 1341
241
- PER 0.96 0.96 0.96 1842
242
 
243
  micro avg 0.92 0.93 0.93 5942
244
  macro avg 0.92 0.92 0.92 5942
245
  weighted avg 0.92 0.93 0.93 5942
246
  |
247
- | 0.005 | 4.5358 | 8500 | 0.0919 | 0.9846 | 0.9284 | 0.9268 | 0.9276 | precision recall f1-score support
248
-
249
- LOC 0.95 0.96 0.95 1837
250
- MISC 0.90 0.85 0.87 922
251
- ORG 0.90 0.88 0.89 1341
252
- PER 0.95 0.97 0.96 1842
253
-
254
- micro avg 0.93 0.93 0.93 5942
255
- macro avg 0.92 0.91 0.92 5942
256
- weighted avg 0.93 0.93 0.93 5942
257
- |
258
- | 0.0062 | 4.8026 | 9000 | 0.0887 | 0.9845 | 0.9232 | 0.9286 | 0.9259 | precision recall f1-score support
259
 
260
  LOC 0.94 0.96 0.95 1837
261
- MISC 0.87 0.87 0.87 922
262
- ORG 0.89 0.88 0.89 1341
263
- PER 0.95 0.96 0.96 1842
264
 
265
- micro avg 0.92 0.93 0.93 5942
266
- macro avg 0.91 0.92 0.92 5942
267
- weighted avg 0.92 0.93 0.93 5942
268
  |
269
 
270
 
271
  ### Framework versions
272
 
273
  - Transformers 4.47.1
274
- - Pytorch 2.5.1+cpu
275
  - Datasets 3.2.0
276
  - Tokenizers 0.21.0
 
21
 
22
  This model is a fine-tuned version of [nlpaueb/legal-bert-base-uncased](https://huggingface.co/nlpaueb/legal-bert-base-uncased) on the None dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 0.0922
25
+ - Accuracy: 0.9840
26
+ - Precision: 0.9221
27
+ - Recall: 0.9260
28
+ - F1: 0.9240
29
  - Classification Report: precision recall f1-score support
30
 
31
  LOC 0.94 0.96 0.95 1837
32
+ MISC 0.88 0.87 0.87 922
33
+ ORG 0.88 0.87 0.88 1341
34
+ PER 0.96 0.96 0.96 1842
35
 
36
+ micro avg 0.92 0.93 0.92 5942
37
+ macro avg 0.91 0.91 0.91 5942
38
+ weighted avg 0.92 0.93 0.92 5942
39
 
40
 
41
  ## Model description
 
68
 
69
  | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | Classification Report |
70
  |:-------------:|:------:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|
71
+ | 0.1354 | 0.2668 | 500 | 0.1185 | 0.9686 | 0.8405 | 0.8304 | 0.8354 | precision recall f1-score support
72
 
73
+ LOC 0.83 0.94 0.88 1837
74
+ MISC 0.78 0.70 0.74 922
75
+ ORG 0.81 0.62 0.70 1341
76
+ PER 0.90 0.94 0.92 1842
77
 
78
+ micro avg 0.84 0.83 0.84 5942
79
+ macro avg 0.83 0.80 0.81 5942
80
+ weighted avg 0.84 0.83 0.83 5942
81
  |
82
+ | 0.0971 | 0.5336 | 1000 | 0.1045 | 0.9744 | 0.8578 | 0.8721 | 0.8649 | precision recall f1-score support
83
 
84
+ LOC 0.86 0.96 0.91 1837
85
+ MISC 0.89 0.71 0.79 922
86
+ ORG 0.78 0.74 0.76 1341
87
+ PER 0.89 0.96 0.93 1842
88
 
89
+ micro avg 0.86 0.87 0.86 5942
90
  macro avg 0.86 0.84 0.85 5942
91
  weighted avg 0.86 0.87 0.86 5942
92
  |
93
+ | 0.097 | 0.8004 | 1500 | 0.0849 | 0.9776 | 0.8884 | 0.8812 | 0.8848 | precision recall f1-score support
94
 
95
+ LOC 0.93 0.91 0.92 1837
96
+ MISC 0.77 0.82 0.79 922
97
+ ORG 0.82 0.83 0.82 1341
98
+ PER 0.96 0.92 0.94 1842
99
 
100
  micro avg 0.89 0.88 0.88 5942
101
+ macro avg 0.87 0.87 0.87 5942
102
  weighted avg 0.89 0.88 0.89 5942
103
  |
104
+ | 0.0522 | 1.0672 | 2000 | 0.0838 | 0.9791 | 0.9014 | 0.8955 | 0.8984 | precision recall f1-score support
105
 
106
  LOC 0.93 0.95 0.94 1837
107
+ MISC 0.82 0.81 0.82 922
108
+ ORG 0.88 0.79 0.83 1341
109
+ PER 0.93 0.97 0.95 1842
110
+
111
+ micro avg 0.90 0.90 0.90 5942
112
+ macro avg 0.89 0.88 0.88 5942
113
+ weighted avg 0.90 0.90 0.90 5942
114
+ |
115
+ | 0.0491 | 1.3340 | 2500 | 0.0734 | 0.9814 | 0.9021 | 0.9088 | 0.9054 | precision recall f1-score support
116
+
117
+ LOC 0.92 0.95 0.93 1837
118
+ MISC 0.86 0.82 0.84 922
119
+ ORG 0.84 0.85 0.84 1341
120
+ PER 0.95 0.96 0.96 1842
121
 
122
  micro avg 0.90 0.91 0.91 5942
123
  macro avg 0.89 0.89 0.89 5942
124
  weighted avg 0.90 0.91 0.91 5942
125
  |
126
+ | 0.0435 | 1.6009 | 3000 | 0.0891 | 0.9776 | 0.8685 | 0.8972 | 0.8826 | precision recall f1-score support
127
 
128
+ LOC 0.93 0.94 0.94 1837
129
+ MISC 0.78 0.82 0.80 922
130
+ ORG 0.74 0.90 0.81 1341
131
+ PER 0.97 0.89 0.93 1842
132
+
133
+ micro avg 0.87 0.90 0.88 5942
134
+ macro avg 0.86 0.89 0.87 5942
135
+ weighted avg 0.88 0.90 0.89 5942
136
+ |
137
+ | 0.0341 | 1.8677 | 3500 | 0.0777 | 0.9813 | 0.9072 | 0.9111 | 0.9092 | precision recall f1-score support
138
+
139
+ LOC 0.91 0.96 0.94 1837
140
+ MISC 0.87 0.84 0.85 922
141
+ ORG 0.86 0.83 0.85 1341
142
+ PER 0.95 0.96 0.95 1842
143
 
144
  micro avg 0.91 0.91 0.91 5942
145
  macro avg 0.90 0.90 0.90 5942
146
  weighted avg 0.91 0.91 0.91 5942
147
  |
148
+ | 0.0246 | 2.1345 | 4000 | 0.0838 | 0.9813 | 0.8991 | 0.9174 | 0.9081 | precision recall f1-score support
149
 
150
+ LOC 0.92 0.96 0.94 1837
151
+ MISC 0.86 0.82 0.84 922
152
+ ORG 0.87 0.85 0.86 1341
153
+ PER 0.92 0.97 0.95 1842
154
 
155
+ micro avg 0.90 0.92 0.91 5942
156
+ macro avg 0.89 0.90 0.90 5942
157
+ weighted avg 0.90 0.92 0.91 5942
158
  |
159
+ | 0.0205 | 2.4013 | 4500 | 0.0764 | 0.9830 | 0.9104 | 0.9204 | 0.9154 | precision recall f1-score support
160
 
161
+ LOC 0.96 0.94 0.95 1837
162
+ MISC 0.84 0.86 0.85 922
163
+ ORG 0.82 0.88 0.85 1341
164
+ PER 0.96 0.96 0.96 1842
165
 
166
+ micro avg 0.91 0.92 0.92 5942
167
+ macro avg 0.90 0.91 0.90 5942
168
+ weighted avg 0.91 0.92 0.92 5942
169
  |
170
+ | 0.022 | 2.6681 | 5000 | 0.0856 | 0.9819 | 0.9051 | 0.9192 | 0.9121 | precision recall f1-score support
171
 
172
  LOC 0.92 0.96 0.94 1837
173
+ MISC 0.87 0.84 0.85 922
174
+ ORG 0.85 0.85 0.85 1341
175
+ PER 0.95 0.97 0.96 1842
176
 
177
+ micro avg 0.91 0.92 0.91 5942
178
+ macro avg 0.90 0.90 0.90 5942
179
+ weighted avg 0.90 0.92 0.91 5942
180
  |
181
+ | 0.0244 | 2.9349 | 5500 | 0.0850 | 0.9829 | 0.9142 | 0.9194 | 0.9168 | precision recall f1-score support
182
 
183
+ LOC 0.94 0.96 0.95 1837
184
  MISC 0.88 0.84 0.86 922
185
+ ORG 0.86 0.85 0.86 1341
186
+ PER 0.95 0.96 0.95 1842
187
 
188
+ micro avg 0.91 0.92 0.92 5942
189
+ macro avg 0.91 0.91 0.91 5942
190
+ weighted avg 0.91 0.92 0.92 5942
191
  |
192
+ | 0.0166 | 3.2017 | 6000 | 0.0861 | 0.9834 | 0.9187 | 0.9191 | 0.9189 | precision recall f1-score support
193
 
194
+ LOC 0.94 0.96 0.95 1837
195
+ MISC 0.90 0.84 0.87 922
196
  ORG 0.86 0.87 0.87 1341
197
+ PER 0.94 0.96 0.95 1842
198
 
199
+ micro avg 0.92 0.92 0.92 5942
200
+ macro avg 0.91 0.91 0.91 5942
201
+ weighted avg 0.92 0.92 0.92 5942
202
  |
203
+ | 0.0094 | 3.4685 | 6500 | 0.0905 | 0.9840 | 0.9202 | 0.9236 | 0.9219 | precision recall f1-score support
204
 
205
+ LOC 0.95 0.96 0.95 1837
206
+ MISC 0.89 0.86 0.88 922
207
+ ORG 0.85 0.88 0.86 1341
208
+ PER 0.96 0.95 0.96 1842
209
 
210
+ micro avg 0.92 0.92 0.92 5942
211
+ macro avg 0.91 0.91 0.91 5942
212
+ weighted avg 0.92 0.92 0.92 5942
213
  |
214
+ | 0.0123 | 3.7353 | 7000 | 0.0927 | 0.9837 | 0.9239 | 0.9219 | 0.9229 | precision recall f1-score support
215
 
216
+ LOC 0.95 0.95 0.95 1837
217
+ MISC 0.86 0.85 0.86 922
218
+ ORG 0.90 0.87 0.88 1341
219
  PER 0.95 0.97 0.96 1842
220
 
221
+ micro avg 0.92 0.92 0.92 5942
222
+ macro avg 0.91 0.91 0.91 5942
223
+ weighted avg 0.92 0.92 0.92 5942
 
 
 
 
 
 
 
 
 
 
 
224
  |
225
+ | 0.0097 | 4.0021 | 7500 | 0.0947 | 0.9839 | 0.9279 | 0.9221 | 0.9250 | precision recall f1-score support
226
 
227
+ LOC 0.95 0.96 0.95 1837
228
+ MISC 0.88 0.85 0.87 922
229
+ ORG 0.90 0.86 0.88 1341
230
  PER 0.95 0.96 0.96 1842
231
 
232
+ micro avg 0.93 0.92 0.92 5942
233
  macro avg 0.92 0.91 0.91 5942
234
+ weighted avg 0.93 0.92 0.92 5942
235
  |
236
+ | 0.0049 | 4.2689 | 8000 | 0.0903 | 0.9840 | 0.9248 | 0.9251 | 0.9250 | precision recall f1-score support
237
 
238
+ LOC 0.94 0.96 0.95 1837
239
+ MISC 0.90 0.85 0.87 922
240
+ ORG 0.87 0.88 0.88 1341
241
  PER 0.95 0.96 0.96 1842
242
 
243
+ micro avg 0.92 0.93 0.92 5942
244
+ macro avg 0.92 0.91 0.91 5942
245
+ weighted avg 0.92 0.93 0.92 5942
246
  |
247
+ | 0.0037 | 4.5358 | 8500 | 0.0903 | 0.9843 | 0.9235 | 0.9283 | 0.9259 | precision recall f1-score support
248
 
249
  LOC 0.94 0.96 0.95 1837
250
+ MISC 0.89 0.86 0.88 922
251
+ ORG 0.88 0.88 0.88 1341
252
+ PER 0.95 0.96 0.96 1842
253
 
254
  micro avg 0.92 0.93 0.93 5942
255
  macro avg 0.92 0.92 0.92 5942
256
  weighted avg 0.92 0.93 0.93 5942
257
  |
258
+ | 0.0038 | 4.8026 | 9000 | 0.0922 | 0.9840 | 0.9221 | 0.9260 | 0.9240 | precision recall f1-score support
 
 
 
 
 
 
 
 
 
 
 
259
 
260
  LOC 0.94 0.96 0.95 1837
261
+ MISC 0.88 0.87 0.87 922
262
+ ORG 0.88 0.87 0.88 1341
263
+ PER 0.96 0.96 0.96 1842
264
 
265
+ micro avg 0.92 0.93 0.92 5942
266
+ macro avg 0.91 0.91 0.91 5942
267
+ weighted avg 0.92 0.93 0.92 5942
268
  |
269
 
270
 
271
  ### Framework versions
272
 
273
  - Transformers 4.47.1
274
+ - Pytorch 2.3.1+cpu
275
  - Datasets 3.2.0
276
  - Tokenizers 0.21.0
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:165e72e887066034c0e8b42aa5da40b8eecf0035af54db60ea150c7221090277
3
  size 435617620
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89373e3b40383ce7f986f11010c12cdcb2533e018535bf5322bd87e01e6fc723
3
  size 435617620
tokenizer.json CHANGED
@@ -1,19 +1,7 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 512,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
- "padding": {
10
- "strategy": "BatchLongest",
11
- "direction": "Right",
12
- "pad_to_multiple_of": null,
13
- "pad_id": 0,
14
- "pad_type_id": 0,
15
- "pad_token": "[PAD]"
16
- },
17
  "added_tokens": [
18
  {
19
  "id": 0,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8749740048b56887c0dc8f41f3c658d8e03e816b876a097ab9a0497ed75f4cf2
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de62d8b0544990b5fcec003768f11a8e0d76bce15990c4e2726cf84bd5d6df80
3
  size 5304