ekaterinatao commited on
Commit
9ada9cb
·
verified ·
1 Parent(s): e986930

Model save

Browse files
README.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: alexyalunin/RuBioRoBERTa
3
+ tags:
4
+ - generated_from_trainer
5
+ metrics:
6
+ - precision
7
+ - recall
8
+ - f1
9
+ - accuracy
10
+ model-index:
11
+ - name: nerel-bio-RuBioRoBERTa-base
12
+ results: []
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ # nerel-bio-RuBioRoBERTa-base
19
+
20
+ This model is a fine-tuned version of [alexyalunin/RuBioRoBERTa](https://huggingface.co/alexyalunin/RuBioRoBERTa) on an unknown dataset.
21
+ It achieves the following results on the evaluation set:
22
+ - Loss: 0.5262
23
+ - Precision: 0.8251
24
+ - Recall: 0.8335
25
+ - F1: 0.8293
26
+ - Accuracy: 0.8827
27
+
28
+ ## Model description
29
+
30
+ More information needed
31
+
32
+ ## Intended uses & limitations
33
+
34
+ More information needed
35
+
36
+ ## Training and evaluation data
37
+
38
+ More information needed
39
+
40
+ ## Training procedure
41
+
42
+ ### Training hyperparameters
43
+
44
+ The following hyperparameters were used during training:
45
+ - learning_rate: 5e-05
46
+ - train_batch_size: 6
47
+ - eval_batch_size: 6
48
+ - seed: 64
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: cosine
51
+ - lr_scheduler_warmup_ratio: 0.1
52
+ - num_epochs: 10
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
57
+ |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
58
+ | No log | 1.0 | 102 | 1.7932 | 0.4125 | 0.4094 | 0.4110 | 0.5484 |
59
+ | No log | 2.0 | 204 | 0.5751 | 0.7711 | 0.7635 | 0.7673 | 0.8392 |
60
+ | No log | 3.0 | 306 | 0.4426 | 0.8053 | 0.8163 | 0.8107 | 0.8727 |
61
+ | No log | 4.0 | 408 | 0.4545 | 0.8070 | 0.8049 | 0.8060 | 0.8707 |
62
+ | 0.8666 | 5.0 | 510 | 0.4854 | 0.8100 | 0.8024 | 0.8062 | 0.8693 |
63
+ | 0.8666 | 6.0 | 612 | 0.4791 | 0.8194 | 0.8210 | 0.8202 | 0.8805 |
64
+ | 0.8666 | 7.0 | 714 | 0.4975 | 0.8202 | 0.8306 | 0.8254 | 0.8816 |
65
+ | 0.8666 | 8.0 | 816 | 0.4997 | 0.8217 | 0.8304 | 0.8260 | 0.8817 |
66
+ | 0.8666 | 9.0 | 918 | 0.5237 | 0.8237 | 0.8318 | 0.8277 | 0.8821 |
67
+ | 0.0548 | 10.0 | 1020 | 0.5262 | 0.8251 | 0.8335 | 0.8293 | 0.8827 |
68
+
69
+
70
+ ### Framework versions
71
+
72
+ - Transformers 4.38.2
73
+ - Pytorch 2.2.1+cu121
74
+ - Datasets 2.19.0
75
+ - Tokenizers 0.15.2
config.json ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "alexyalunin/RuBioRoBERTa",
3
+ "architectures": [
4
+ "RobertaForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 1,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 1024,
14
+ "id2label": {
15
+ "0": "ACTIVITY",
16
+ "1": "ADMINISTRATION_ROUTE",
17
+ "2": "ANATOMY",
18
+ "3": "CHEM",
19
+ "4": "DEVICE",
20
+ "5": "DISO",
21
+ "6": "FINDING",
22
+ "7": "FOOD",
23
+ "8": "GENE",
24
+ "9": "INJURY_POISONING",
25
+ "10": "HEALTH_CARE_ACTIVITY",
26
+ "11": "LABPROC",
27
+ "12": "LIVB",
28
+ "13": "MEDPROC",
29
+ "14": "MENTALPROC",
30
+ "15": "PHYS",
31
+ "16": "SCIPROC",
32
+ "17": "AGE",
33
+ "18": "CITY",
34
+ "19": "COUNTRY",
35
+ "20": "DATE",
36
+ "21": "DISTRICT",
37
+ "22": "EVENT",
38
+ "23": "FAMILY",
39
+ "24": "FACILITY",
40
+ "25": "LOCATION",
41
+ "26": "MONEY",
42
+ "27": "NATIONALITY",
43
+ "28": "NUMBER",
44
+ "29": "ORDINAL",
45
+ "30": "ORGANIZATION",
46
+ "31": "PERCENT",
47
+ "32": "PERSON",
48
+ "33": "PRODUCT",
49
+ "34": "PROFESSION",
50
+ "35": "STATE_OR_PROVINCE",
51
+ "36": "TIME",
52
+ "37": "AWARD",
53
+ "38": "CRIME",
54
+ "39": "IDEOLOGY",
55
+ "40": "LANGUAGE",
56
+ "41": "LAW",
57
+ "42": "PENALTY",
58
+ "43": "RELIGION",
59
+ "44": "WORK_OF_ART"
60
+ },
61
+ "initializer_range": 0.02,
62
+ "intermediate_size": 4096,
63
+ "label2id": {
64
+ "ACTIVITY": 0,
65
+ "ADMINISTRATION_ROUTE": 1,
66
+ "AGE": 17,
67
+ "ANATOMY": 2,
68
+ "AWARD": 37,
69
+ "CHEM": 3,
70
+ "CITY": 18,
71
+ "COUNTRY": 19,
72
+ "CRIME": 38,
73
+ "DATE": 20,
74
+ "DEVICE": 4,
75
+ "DISO": 5,
76
+ "DISTRICT": 21,
77
+ "EVENT": 22,
78
+ "FACILITY": 24,
79
+ "FAMILY": 23,
80
+ "FINDING": 6,
81
+ "FOOD": 7,
82
+ "GENE": 8,
83
+ "HEALTH_CARE_ACTIVITY": 10,
84
+ "IDEOLOGY": 39,
85
+ "INJURY_POISONING": 9,
86
+ "LABPROC": 11,
87
+ "LANGUAGE": 40,
88
+ "LAW": 41,
89
+ "LIVB": 12,
90
+ "LOCATION": 25,
91
+ "MEDPROC": 13,
92
+ "MENTALPROC": 14,
93
+ "MONEY": 26,
94
+ "NATIONALITY": 27,
95
+ "NUMBER": 28,
96
+ "ORDINAL": 29,
97
+ "ORGANIZATION": 30,
98
+ "PENALTY": 42,
99
+ "PERCENT": 31,
100
+ "PERSON": 32,
101
+ "PHYS": 15,
102
+ "PRODUCT": 33,
103
+ "PROFESSION": 34,
104
+ "RELIGION": 43,
105
+ "SCIPROC": 16,
106
+ "STATE_OR_PROVINCE": 35,
107
+ "TIME": 36,
108
+ "WORK_OF_ART": 44
109
+ },
110
+ "layer_norm_eps": 1e-05,
111
+ "max_position_embeddings": 514,
112
+ "model_type": "roberta",
113
+ "num_attention_heads": 16,
114
+ "num_hidden_layers": 24,
115
+ "pad_token_id": 1,
116
+ "position_embedding_type": "absolute",
117
+ "torch_dtype": "float32",
118
+ "transformers_version": "4.38.2",
119
+ "type_vocab_size": 1,
120
+ "use_cache": true,
121
+ "vocab_size": 50265
122
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1440fa09f5f49c5441c3863ecaf1209c36a164f42c91ef9fee6a11cf3293783c
3
+ size 1417473100
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<pad>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "c": ".cache",
47
+ "clean_up_tokenization_spaces": true,
48
+ "cls_token": "<s>",
49
+ "do_lower_case": true,
50
+ "eos_token": "</s>",
51
+ "errors": "replace",
52
+ "mask_token": "<mask>",
53
+ "model_max_length": 1000000000000000019884624838656,
54
+ "pad_token": "<pad>",
55
+ "sep_token": "</s>",
56
+ "tokenizer_class": "RobertaTokenizer",
57
+ "trim_offsets": true,
58
+ "unk_token": "<unk>"
59
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6c19964f690434ae8cf1576af359f052742366b64bd30131ddc8982f3e5e902
3
+ size 4984
vocab.json ADDED
The diff for this file is too large to render. See raw diff