mustafoyev202 commited on
Commit
495e5ee
·
verified ·
1 Parent(s): 5a0b04b

mustafoyev202/uzbek_ner

Browse files
Files changed (4) hide show
  1. README.md +13 -10
  2. config.json +48 -16
  3. model.safetensors +2 -2
  4. training_args.bin +2 -2
README.md CHANGED
@@ -25,11 +25,11 @@ should probably proofread and complete it, then remove this comment. -->
25
 
26
  This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) on the Uzbek Ner dataset.
27
  It achieves the following results on the evaluation set:
28
- - Loss: 0.1458
29
- - Precision: 0.5927
30
- - Recall: 0.6370
31
- - F1: 0.6140
32
- - Accuracy: 0.9479
33
 
34
  ## Model description
35
 
@@ -55,8 +55,8 @@ The following hyperparameters were used during training:
55
  - gradient_accumulation_steps: 8
56
  - total_train_batch_size: 64
57
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
58
- - lr_scheduler_type: cosine
59
- - lr_scheduler_warmup_ratio: 0.1
60
  - num_epochs: 3
61
  - mixed_precision_training: Native AMP
62
 
@@ -64,9 +64,12 @@ The following hyperparameters were used during training:
64
 
65
  | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
66
  |:-------------:|:------:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
67
- | 0.1673 | 0.9324 | 200 | 0.1592 | 0.5325 | 0.6170 | 0.5717 | 0.9438 |
68
- | 0.1503 | 1.8625 | 400 | 0.1476 | 0.5929 | 0.6240 | 0.6081 | 0.9477 |
69
- | 0.1394 | 2.7925 | 600 | 0.1458 | 0.5927 | 0.6370 | 0.6140 | 0.9479 |
 
 
 
70
 
71
 
72
  ### Framework versions
 
25
 
26
  This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) on the Uzbek Ner dataset.
27
  It achieves the following results on the evaluation set:
28
+ - Loss: 0.1761
29
+ - Precision: 0.5870
30
+ - Recall: 0.6354
31
+ - F1: 0.6102
32
+ - Accuracy: 0.9386
33
 
34
  ## Model description
35
 
 
55
  - gradient_accumulation_steps: 8
56
  - total_train_batch_size: 64
57
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
58
+ - lr_scheduler_type: cosine_with_restarts
59
+ - lr_scheduler_warmup_ratio: 0.08
60
  - num_epochs: 3
61
  - mixed_precision_training: Native AMP
62
 
 
64
 
65
  | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
66
  |:-------------:|:------:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
67
+ | 0.2571 | 0.4662 | 100 | 0.2272 | 0.4924 | 0.5096 | 0.5008 | 0.9291 |
68
+ | 0.2035 | 0.9324 | 200 | 0.1931 | 0.5411 | 0.5962 | 0.5673 | 0.9339 |
69
+ | 0.1787 | 1.3963 | 300 | 0.1846 | 0.5693 | 0.6327 | 0.5993 | 0.9358 |
70
+ | 0.1788 | 1.8625 | 400 | 0.1776 | 0.5741 | 0.6259 | 0.5989 | 0.9383 |
71
+ | 0.176 | 2.3263 | 500 | 0.1759 | 0.5902 | 0.6231 | 0.6062 | 0.9390 |
72
+ | 0.1676 | 2.7925 | 600 | 0.1761 | 0.5868 | 0.6351 | 0.6100 | 0.9386 |
73
 
74
 
75
  ### Framework versions
config.json CHANGED
@@ -1,10 +1,33 @@
1
  {
2
- "_name_or_path": "FacebookAI/xlm-roberta-large",
3
  "architectures": [
4
  "XLMRobertaForTokenClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "bos_token_id": 0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  "classifier_dropout": null,
9
  "eos_token_id": 2,
10
  "hidden_act": "gelu",
@@ -21,7 +44,18 @@
21
  "7": "LABEL_7",
22
  "8": "LABEL_8",
23
  "9": "LABEL_9",
24
- "10": "LABEL_10"
 
 
 
 
 
 
 
 
 
 
 
25
  },
26
  "initializer_range": 0.02,
27
  "intermediate_size": 4096,
@@ -29,7 +63,18 @@
29
  "LABEL_0": 0,
30
  "LABEL_1": 1,
31
  "LABEL_10": 10,
 
 
 
 
 
 
 
 
 
32
  "LABEL_2": 2,
 
 
33
  "LABEL_3": 3,
34
  "LABEL_4": 4,
35
  "LABEL_5": 5,
@@ -39,19 +84,6 @@
39
  "LABEL_9": 9
40
  },
41
  "layer_norm_eps": 1e-05,
42
- "loss_weight": [
43
- 1.0,
44
- 1.0,
45
- 1.0,
46
- 1.0,
47
- 1.0,
48
- 1.0,
49
- 1.0,
50
- 1.0,
51
- 1.0,
52
- 1.0,
53
- 1.0
54
- ],
55
  "max_position_embeddings": 514,
56
  "model_type": "xlm-roberta",
57
  "num_attention_heads": 16,
@@ -59,7 +91,7 @@
59
  "output_past": true,
60
  "pad_token_id": 1,
61
  "position_embedding_type": "absolute",
62
- "torch_dtype": "float32",
63
  "transformers_version": "4.49.0",
64
  "type_vocab_size": 1,
65
  "use_cache": true,
 
1
  {
2
+ "_name_or_path": "xlm-roberta-large",
3
  "architectures": [
4
  "XLMRobertaForTokenClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "bos_token_id": 0,
8
+ "class_weights": [
9
+ 0.053862977772951126,
10
+ 251.441162109375,
11
+ 878.10986328125,
12
+ 8.121392250061035,
13
+ 6.7787580490112305,
14
+ 35.58425521850586,
15
+ 21.885408401489258,
16
+ 4.541465759277344,
17
+ 31.51692008972168,
18
+ 2.9324467182159424,
19
+ 6.667890548706055,
20
+ 16.192096710205078,
21
+ 8.184276580810547,
22
+ 4.891348361968994,
23
+ 2.876582145690918,
24
+ 113.24829864501953,
25
+ 125.99810791015625,
26
+ 3.378345251083374,
27
+ 3.566652774810791,
28
+ 120.9261474609375,
29
+ 223.83193969726562
30
+ ],
31
  "classifier_dropout": null,
32
  "eos_token_id": 2,
33
  "hidden_act": "gelu",
 
44
  "7": "LABEL_7",
45
  "8": "LABEL_8",
46
  "9": "LABEL_9",
47
+ "10": "LABEL_10",
48
+ "11": "LABEL_11",
49
+ "12": "LABEL_12",
50
+ "13": "LABEL_13",
51
+ "14": "LABEL_14",
52
+ "15": "LABEL_15",
53
+ "16": "LABEL_16",
54
+ "17": "LABEL_17",
55
+ "18": "LABEL_18",
56
+ "19": "LABEL_19",
57
+ "20": "LABEL_20",
58
+ "21": "LABEL_21"
59
  },
60
  "initializer_range": 0.02,
61
  "intermediate_size": 4096,
 
63
  "LABEL_0": 0,
64
  "LABEL_1": 1,
65
  "LABEL_10": 10,
66
+ "LABEL_11": 11,
67
+ "LABEL_12": 12,
68
+ "LABEL_13": 13,
69
+ "LABEL_14": 14,
70
+ "LABEL_15": 15,
71
+ "LABEL_16": 16,
72
+ "LABEL_17": 17,
73
+ "LABEL_18": 18,
74
+ "LABEL_19": 19,
75
  "LABEL_2": 2,
76
+ "LABEL_20": 20,
77
+ "LABEL_21": 21,
78
  "LABEL_3": 3,
79
  "LABEL_4": 4,
80
  "LABEL_5": 5,
 
84
  "LABEL_9": 9
85
  },
86
  "layer_norm_eps": 1e-05,
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  "max_position_embeddings": 514,
88
  "model_type": "xlm-roberta",
89
  "num_attention_heads": 16,
 
91
  "output_past": true,
92
  "pad_token_id": 1,
93
  "position_embedding_type": "absolute",
94
+ "torch_dtype": "float16",
95
  "transformers_version": "4.49.0",
96
  "type_vocab_size": 1,
97
  "use_cache": true,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c83f02de25ff8e8d8d2309647cc79245adeae65840b328b0cc93f9fead24d7aa
3
- size 2235456956
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:131c49bb9375a2fb85424cda466c04b32b120961b80b1f90c8bde5e29395a7ea
3
+ size 1117774660
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4978fa21f4e079dc360ba47132ee01a1228a34765b3e2d6231d3c955e59953ce
3
- size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f2137e1f9d20a81d6b09a5a9121c3731c957f75f5a4028372eecaa3676d98ea
3
+ size 5304