5300-biased-word-detection
Browse files- README.md +13 -13
- config.json +1 -1
- model.safetensors +1 -1
- tokenizer_config.json +1 -0
- training_args.bin +2 -2
README.md
CHANGED
@@ -21,11 +21,11 @@ should probably proofread and complete it, then remove this comment. -->
|
|
21 |
|
22 |
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
|
23 |
It achieves the following results on the evaluation set:
|
24 |
-
- Loss: 0.
|
25 |
-
- Precision: 0.
|
26 |
-
- Recall: 0.
|
27 |
-
- F1: 0.
|
28 |
-
- Accuracy: 0.
|
29 |
|
30 |
## Model description
|
31 |
|
@@ -56,17 +56,17 @@ The following hyperparameters were used during training:
|
|
56 |
|
57 |
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|
58 |
|:-------------:|:------:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
|
59 |
-
| 0.
|
60 |
-
| 0.
|
61 |
-
| 0.
|
62 |
-
| 0.
|
63 |
-
| 0.
|
64 |
-
| 0.
|
65 |
|
66 |
|
67 |
### Framework versions
|
68 |
|
69 |
-
- Transformers 4.
|
70 |
- Pytorch 2.5.1+cu121
|
71 |
- Datasets 3.2.0
|
72 |
-
- Tokenizers 0.
|
|
|
21 |
|
22 |
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
|
23 |
It achieves the following results on the evaluation set:
|
24 |
+
- Loss: 0.5440
|
25 |
+
- Precision: 0.3143
|
26 |
+
- Recall: 0.2170
|
27 |
+
- F1: 0.2568
|
28 |
+
- Accuracy: 0.8900
|
29 |
|
30 |
## Model description
|
31 |
|
|
|
56 |
|
57 |
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|
58 |
|:-------------:|:------:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
|
59 |
+
| 0.2733 | 0.4292 | 100 | 0.4491 | 0.3382 | 0.1454 | 0.2033 | 0.9003 |
|
60 |
+
| 0.2635 | 0.8584 | 200 | 0.4566 | 0.3327 | 0.1848 | 0.2377 | 0.8962 |
|
61 |
+
| 0.202 | 1.2876 | 300 | 0.5266 | 0.3377 | 0.1599 | 0.2171 | 0.8990 |
|
62 |
+
| 0.1981 | 1.7167 | 400 | 0.5384 | 0.3529 | 0.1495 | 0.2101 | 0.9016 |
|
63 |
+
| 0.1904 | 2.1459 | 500 | 0.5169 | 0.3004 | 0.2399 | 0.2667 | 0.8846 |
|
64 |
+
| 0.1682 | 2.5751 | 600 | 0.5660 | 0.3339 | 0.1963 | 0.2472 | 0.8954 |
|
65 |
|
66 |
|
67 |
### Framework versions
|
68 |
|
69 |
+
- Transformers 4.47.0
|
70 |
- Pytorch 2.5.1+cu121
|
71 |
- Datasets 3.2.0
|
72 |
+
- Tokenizers 0.21.0
|
config.json
CHANGED
@@ -25,7 +25,7 @@
|
|
25 |
"pad_token_id": 1,
|
26 |
"position_embedding_type": "absolute",
|
27 |
"torch_dtype": "float32",
|
28 |
-
"transformers_version": "4.
|
29 |
"type_vocab_size": 1,
|
30 |
"use_cache": true,
|
31 |
"vocab_size": 50265
|
|
|
25 |
"pad_token_id": 1,
|
26 |
"position_embedding_type": "absolute",
|
27 |
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.47.0",
|
29 |
"type_vocab_size": 1,
|
30 |
"use_cache": true,
|
31 |
"vocab_size": 50265
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 496250232
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:949cf9f33ed21b2ec68af94c78f67e7f6f85df5070ff2dd584e530d72a9bd459
|
3 |
size 496250232
|
tokenizer_config.json
CHANGED
@@ -47,6 +47,7 @@
|
|
47 |
"cls_token": "<s>",
|
48 |
"eos_token": "</s>",
|
49 |
"errors": "replace",
|
|
|
50 |
"mask_token": "<mask>",
|
51 |
"model_max_length": 512,
|
52 |
"pad_token": "<pad>",
|
|
|
47 |
"cls_token": "<s>",
|
48 |
"eos_token": "</s>",
|
49 |
"errors": "replace",
|
50 |
+
"extra_special_tokens": {},
|
51 |
"mask_token": "<mask>",
|
52 |
"model_max_length": 512,
|
53 |
"pad_token": "<pad>",
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e39f762a2751fee49b68c8831d061741e73e55051e13c6e089ee385be29a9729
|
3 |
+
size 5304
|