End of training
Browse files- README.md +25 -25
- logs/events.out.tfevents.1711206742.ethanmbp.lan.34423.0 +2 -2
- model.safetensors +1 -1
- tokenizer.json +16 -2
- tokenizer_config.json +0 -7
README.md
CHANGED
@@ -15,14 +15,14 @@ should probably proofread and complete it, then remove this comment. -->
|
|
15 |
|
16 |
This model is a fine-tuned version of [microsoft/layoutlm-base-uncased](https://huggingface.co/microsoft/layoutlm-base-uncased) on the None dataset.
|
17 |
It achieves the following results on the evaluation set:
|
18 |
-
- Loss: 0.
|
19 |
-
- : {'precision': 0.
|
20 |
-
- C: {'precision': 0.
|
21 |
-
- H: {'precision': 0.
|
22 |
-
- Overall Precision: 0.
|
23 |
-
- Overall Recall: 0.
|
24 |
-
- Overall F1: 0.
|
25 |
-
- Overall Accuracy: 0.
|
26 |
|
27 |
## Model description
|
28 |
|
@@ -51,23 +51,23 @@ The following hyperparameters were used during training:
|
|
51 |
|
52 |
### Training results
|
53 |
|
54 |
-
| Training Loss | Epoch | Step | Validation Loss | | C | H
|
55 |
-
|
56 |
-
| 1.
|
57 |
-
| 0.
|
58 |
-
| 0.
|
59 |
-
| 0.
|
60 |
-
| 0.
|
61 |
-
| 0.
|
62 |
-
| 0.
|
63 |
-
| 0.
|
64 |
-
| 0.
|
65 |
-
| 0.
|
66 |
-
| 0.
|
67 |
-
| 0.
|
68 |
-
| 0.
|
69 |
-
| 0.
|
70 |
-
| 0.
|
71 |
|
72 |
|
73 |
### Framework versions
|
|
|
15 |
|
16 |
This model is a fine-tuned version of [microsoft/layoutlm-base-uncased](https://huggingface.co/microsoft/layoutlm-base-uncased) on the None dataset.
|
17 |
It achieves the following results on the evaluation set:
|
18 |
+
- Loss: 0.5985
|
19 |
+
- : {'precision': 0.17391304347826086, 'recall': 0.18181818181818182, 'f1': 0.17777777777777776, 'number': 22}
|
20 |
+
- C: {'precision': 0.20408163265306123, 'recall': 0.2857142857142857, 'f1': 0.23809523809523808, 'number': 35}
|
21 |
+
- H: {'precision': 0.41935483870967744, 'recall': 0.5, 'f1': 0.45614035087719296, 'number': 26}
|
22 |
+
- Overall Precision: 0.2621
|
23 |
+
- Overall Recall: 0.3253
|
24 |
+
- Overall F1: 0.2903
|
25 |
+
- Overall Accuracy: 0.8694
|
26 |
|
27 |
## Model description
|
28 |
|
|
|
51 |
|
52 |
### Training results
|
53 |
|
54 |
+
| Training Loss | Epoch | Step | Validation Loss | | C | H | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy |
|
55 |
+
|:-------------:|:-----:|:----:|:---------------:|:-------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------:|:-----------------:|:--------------:|:----------:|:----------------:|
|
56 |
+
| 1.3414 | 1.0 | 2 | 0.9941 | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 22} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 35} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 26} | 0.0 | 0.0 | 0.0 | 0.8182 |
|
57 |
+
| 0.6808 | 2.0 | 4 | 0.8831 | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 22} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 35} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 26} | 0.0 | 0.0 | 0.0 | 0.8182 |
|
58 |
+
| 0.5134 | 3.0 | 6 | 0.7517 | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 22} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 35} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 26} | 0.0 | 0.0 | 0.0 | 0.8182 |
|
59 |
+
| 0.4175 | 4.0 | 8 | 0.6992 | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 22} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 35} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 26} | 0.0 | 0.0 | 0.0 | 0.8182 |
|
60 |
+
| 0.3048 | 5.0 | 10 | 0.6476 | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 22} | {'precision': 0.13333333333333333, 'recall': 0.05714285714285714, 'f1': 0.08, 'number': 35} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 26} | 0.0667 | 0.0241 | 0.0354 | 0.8310 |
|
61 |
+
| 0.2767 | 6.0 | 12 | 0.6375 | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 22} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 35} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 26} | 0.0 | 0.0 | 0.0 | 0.8399 |
|
62 |
+
| 0.3514 | 7.0 | 14 | 0.6033 | {'precision': 0.047619047619047616, 'recall': 0.045454545454545456, 'f1': 0.046511627906976744, 'number': 22} | {'precision': 0.047619047619047616, 'recall': 0.02857142857142857, 'f1': 0.03571428571428571, 'number': 35} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 26} | 0.0476 | 0.0241 | 0.032 | 0.8656 |
|
63 |
+
| 0.3766 | 8.0 | 16 | 0.6462 | {'precision': 0.13333333333333333, 'recall': 0.09090909090909091, 'f1': 0.10810810810810811, 'number': 22} | {'precision': 0.06666666666666667, 'recall': 0.02857142857142857, 'f1': 0.04, 'number': 35} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 26} | 0.1 | 0.0361 | 0.0531 | 0.8271 |
|
64 |
+
| 0.4447 | 9.0 | 18 | 0.6570 | {'precision': 0.06666666666666667, 'recall': 0.045454545454545456, 'f1': 0.05405405405405406, 'number': 22} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 35} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 26} | 0.0333 | 0.0120 | 0.0177 | 0.8182 |
|
65 |
+
| 0.2359 | 10.0 | 20 | 0.6297 | {'precision': 0.15, 'recall': 0.13636363636363635, 'f1': 0.14285714285714282, 'number': 22} | {'precision': 0.08333333333333333, 'recall': 0.05714285714285714, 'f1': 0.06779661016949153, 'number': 35} | {'precision': 0.5, 'recall': 0.07692307692307693, 'f1': 0.13333333333333336, 'number': 26} | 0.1458 | 0.0843 | 0.1069 | 0.8438 |
|
66 |
+
| 0.2136 | 11.0 | 22 | 0.6072 | {'precision': 0.20833333333333334, 'recall': 0.22727272727272727, 'f1': 0.21739130434782608, 'number': 22} | {'precision': 0.16666666666666666, 'recall': 0.17142857142857143, 'f1': 0.16901408450704225, 'number': 35} | {'precision': 0.42857142857142855, 'recall': 0.23076923076923078, 'f1': 0.3, 'number': 26} | 0.2297 | 0.2048 | 0.2166 | 0.8617 |
|
67 |
+
| 0.2114 | 12.0 | 24 | 0.5978 | {'precision': 0.17391304347826086, 'recall': 0.18181818181818182, 'f1': 0.17777777777777776, 'number': 22} | {'precision': 0.1951219512195122, 'recall': 0.22857142857142856, 'f1': 0.21052631578947367, 'number': 35} | {'precision': 0.4090909090909091, 'recall': 0.34615384615384615, 'f1': 0.37500000000000006, 'number': 26} | 0.2442 | 0.2530 | 0.2485 | 0.8656 |
|
68 |
+
| 0.1826 | 13.0 | 26 | 0.5982 | {'precision': 0.17391304347826086, 'recall': 0.18181818181818182, 'f1': 0.17777777777777776, 'number': 22} | {'precision': 0.18181818181818182, 'recall': 0.22857142857142856, 'f1': 0.20253164556962025, 'number': 35} | {'precision': 0.4230769230769231, 'recall': 0.4230769230769231, 'f1': 0.4230769230769231, 'number': 26} | 0.2473 | 0.2771 | 0.2614 | 0.8668 |
|
69 |
+
| 0.1861 | 14.0 | 28 | 0.5983 | {'precision': 0.17391304347826086, 'recall': 0.18181818181818182, 'f1': 0.17777777777777776, 'number': 22} | {'precision': 0.21739130434782608, 'recall': 0.2857142857142857, 'f1': 0.24691358024691357, 'number': 35} | {'precision': 0.4642857142857143, 'recall': 0.5, 'f1': 0.4814814814814815, 'number': 26} | 0.2784 | 0.3253 | 0.3000 | 0.8707 |
|
70 |
+
| 0.2442 | 15.0 | 30 | 0.5985 | {'precision': 0.17391304347826086, 'recall': 0.18181818181818182, 'f1': 0.17777777777777776, 'number': 22} | {'precision': 0.20408163265306123, 'recall': 0.2857142857142857, 'f1': 0.23809523809523808, 'number': 35} | {'precision': 0.41935483870967744, 'recall': 0.5, 'f1': 0.45614035087719296, 'number': 26} | 0.2621 | 0.3253 | 0.2903 | 0.8694 |
|
71 |
|
72 |
|
73 |
### Framework versions
|
logs/events.out.tfevents.1711206742.ethanmbp.lan.34423.0
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4dd1690302c8cf0e618e67ea0050ba7cdade6a13b84f09d323765666fa11464f
|
3 |
+
size 15590
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 450552060
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:099bb93cfe5e201ed12dac7973bee497c7c6d011e6e4135fa217f500c0fcfcb9
|
3 |
size 450552060
|
tokenizer.json
CHANGED
@@ -1,7 +1,21 @@
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
-
"truncation":
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
"added_tokens": [
|
6 |
{
|
7 |
"id": 0,
|
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
+
"truncation": {
|
4 |
+
"direction": "Right",
|
5 |
+
"max_length": 512,
|
6 |
+
"strategy": "LongestFirst",
|
7 |
+
"stride": 0
|
8 |
+
},
|
9 |
+
"padding": {
|
10 |
+
"strategy": {
|
11 |
+
"Fixed": 512
|
12 |
+
},
|
13 |
+
"direction": "Right",
|
14 |
+
"pad_to_multiple_of": null,
|
15 |
+
"pad_id": 0,
|
16 |
+
"pad_type_id": 0,
|
17 |
+
"pad_token": "[PAD]"
|
18 |
+
},
|
19 |
"added_tokens": [
|
20 |
{
|
21 |
"id": 0,
|
tokenizer_config.json
CHANGED
@@ -54,11 +54,9 @@
|
|
54 |
"do_basic_tokenize": true,
|
55 |
"do_lower_case": true,
|
56 |
"mask_token": "[MASK]",
|
57 |
-
"max_length": 512,
|
58 |
"model_max_length": 512,
|
59 |
"never_split": null,
|
60 |
"only_label_first_subword": true,
|
61 |
-
"pad_to_multiple_of": null,
|
62 |
"pad_token": "[PAD]",
|
63 |
"pad_token_box": [
|
64 |
0,
|
@@ -67,8 +65,6 @@
|
|
67 |
0
|
68 |
],
|
69 |
"pad_token_label": -100,
|
70 |
-
"pad_token_type_id": 0,
|
71 |
-
"padding_side": "right",
|
72 |
"processor_class": "LayoutLMv2Processor",
|
73 |
"sep_token": "[SEP]",
|
74 |
"sep_token_box": [
|
@@ -77,11 +73,8 @@
|
|
77 |
1000,
|
78 |
1000
|
79 |
],
|
80 |
-
"stride": 0,
|
81 |
"strip_accents": null,
|
82 |
"tokenize_chinese_chars": true,
|
83 |
"tokenizer_class": "LayoutLMv2Tokenizer",
|
84 |
-
"truncation_side": "right",
|
85 |
-
"truncation_strategy": "longest_first",
|
86 |
"unk_token": "[UNK]"
|
87 |
}
|
|
|
54 |
"do_basic_tokenize": true,
|
55 |
"do_lower_case": true,
|
56 |
"mask_token": "[MASK]",
|
|
|
57 |
"model_max_length": 512,
|
58 |
"never_split": null,
|
59 |
"only_label_first_subword": true,
|
|
|
60 |
"pad_token": "[PAD]",
|
61 |
"pad_token_box": [
|
62 |
0,
|
|
|
65 |
0
|
66 |
],
|
67 |
"pad_token_label": -100,
|
|
|
|
|
68 |
"processor_class": "LayoutLMv2Processor",
|
69 |
"sep_token": "[SEP]",
|
70 |
"sep_token_box": [
|
|
|
73 |
1000,
|
74 |
1000
|
75 |
],
|
|
|
76 |
"strip_accents": null,
|
77 |
"tokenize_chinese_chars": true,
|
78 |
"tokenizer_class": "LayoutLMv2Tokenizer",
|
|
|
|
|
79 |
"unk_token": "[UNK]"
|
80 |
}
|