Bajiyo commited on
Commit
e550132
1 Parent(s): 409863e

Bajiyo/my-finetuned-transliteration-model

Browse files
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ base_model: ai4bharat/IndicBART
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - wer
8
+ model-index:
9
+ - name: results
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # results
17
+
18
+ This model is a fine-tuned version of [ai4bharat/IndicBART](https://huggingface.co/ai4bharat/IndicBART) on the None dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.0863
21
+ - Wer: 0.4892
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 2e-05
41
+ - train_batch_size: 64
42
+ - eval_batch_size: 64
43
+ - seed: 42
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: linear
46
+ - num_epochs: 4
47
+
48
+ ### Training results
49
+
50
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
51
+ |:-------------:|:------:|:-----:|:---------------:|:------:|
52
+ | 1.4597 | 0.4504 | 4000 | 0.3822 | 0.6293 |
53
+ | 0.3574 | 0.9008 | 8000 | 0.2432 | 0.5683 |
54
+ | 0.252 | 1.3512 | 12000 | 0.1656 | 0.5286 |
55
+ | 0.1921 | 1.8016 | 16000 | 0.1277 | 0.5102 |
56
+ | 0.1583 | 2.2520 | 20000 | 0.1074 | 0.5010 |
57
+ | 0.1383 | 2.7024 | 24000 | 0.0967 | 0.4937 |
58
+ | 0.1266 | 3.1528 | 28000 | 0.0896 | 0.4904 |
59
+ | 0.1199 | 3.6032 | 32000 | 0.0863 | 0.4892 |
60
+
61
+
62
+ ### Framework versions
63
+
64
+ - Transformers 4.45.2
65
+ - Pytorch 2.1.1+cu121
66
+ - Datasets 3.0.1
67
+ - Tokenizers 0.20.1
added_tokens.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</s>": 64001,
3
+ "<2as>": 64002,
4
+ "<2bn>": 64003,
5
+ "<2en>": 64004,
6
+ "<2gu>": 64005,
7
+ "<2hi>": 64006,
8
+ "<2kn>": 64007,
9
+ "<2ml>": 64008,
10
+ "<2mr>": 64009,
11
+ "<2or>": 64010,
12
+ "<2pa>": 64011,
13
+ "<2ta>": 64012,
14
+ "<2te>": 64013,
15
+ "<s>": 64000
16
+ }
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ai4bharat/IndicBART",
3
+ "activation_dropout": 0.1,
4
+ "activation_function": "gelu",
5
+ "architectures": [
6
+ "MBartForConditionalGeneration"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 64000,
10
+ "classifier_dropout": 0.0,
11
+ "d_model": 1024,
12
+ "decoder_attention_heads": 16,
13
+ "decoder_ffn_dim": 4096,
14
+ "decoder_layerdrop": 0.0,
15
+ "decoder_layers": 6,
16
+ "dropout": 0.1,
17
+ "encoder_attention_heads": 16,
18
+ "encoder_ffn_dim": 4096,
19
+ "encoder_layerdrop": 0.0,
20
+ "encoder_layers": 6,
21
+ "eos_token_id": 64001,
22
+ "forced_eos_token_id": 2,
23
+ "gradient_checkpointing": false,
24
+ "init_std": 0.02,
25
+ "is_encoder_decoder": true,
26
+ "max_position_embeddings": 1024,
27
+ "model_type": "mbart",
28
+ "num_hidden_layers": 6,
29
+ "pad_token_id": 0,
30
+ "scale_embedding": false,
31
+ "tokenizer_class": "AlbertTokenizer",
32
+ "torch_dtype": "float32",
33
+ "transformers_version": "4.45.2",
34
+ "use_cache": true,
35
+ "vocab_size": 64014
36
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 64000,
3
+ "eos_token_id": 64001,
4
+ "forced_eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.45.2"
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b41e325222bc6de83f0c0cf3ba53f584582800148fc4905ebd3fa649b60539ae
3
+ size 976355336
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<s>",
4
+ "</s>",
5
+ "<2as>",
6
+ "<2bn>",
7
+ "<2en>",
8
+ "<2gu>",
9
+ "<2hi>",
10
+ "<2kn>",
11
+ "<2ml>",
12
+ "<2mr>",
13
+ "<2or>",
14
+ "<2pa>",
15
+ "<2ta>",
16
+ "<2te>"
17
+ ],
18
+ "bos_token": "[CLS]",
19
+ "cls_token": "[CLS]",
20
+ "eos_token": "[SEP]",
21
+ "mask_token": {
22
+ "content": "[MASK]",
23
+ "lstrip": true,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "pad_token": "<pad>",
29
+ "sep_token": "[SEP]",
30
+ "unk_token": "<unk>"
31
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d924c4e4e0d7a073e8289c20e29cd27657629064f2571fb18ab82008cf79a72
3
+ size 1898704
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<pad>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<unk>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": false
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": false
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": true,
38
+ "normalized": true,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "64000": {
44
+ "content": "<s>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "64001": {
52
+ "content": "</s>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "64002": {
60
+ "content": "<2as>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "64003": {
68
+ "content": "<2bn>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "64004": {
76
+ "content": "<2en>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "64005": {
84
+ "content": "<2gu>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "64006": {
92
+ "content": "<2hi>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "64007": {
100
+ "content": "<2kn>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "64008": {
108
+ "content": "<2ml>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "64009": {
116
+ "content": "<2mr>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "64010": {
124
+ "content": "<2or>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "64011": {
132
+ "content": "<2pa>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "64012": {
140
+ "content": "<2ta>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "64013": {
148
+ "content": "<2te>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ }
155
+ },
156
+ "additional_special_tokens": [
157
+ "<s>",
158
+ "</s>",
159
+ "<2as>",
160
+ "<2bn>",
161
+ "<2en>",
162
+ "<2gu>",
163
+ "<2hi>",
164
+ "<2kn>",
165
+ "<2ml>",
166
+ "<2mr>",
167
+ "<2or>",
168
+ "<2pa>",
169
+ "<2ta>",
170
+ "<2te>"
171
+ ],
172
+ "bos_token": "[CLS]",
173
+ "clean_up_tokenization_spaces": false,
174
+ "cls_token": "[CLS]",
175
+ "do_lower_case": false,
176
+ "eos_token": "[SEP]",
177
+ "keep_accents": true,
178
+ "mask_token": "[MASK]",
179
+ "model_max_length": 1000000000000000019884624838656,
180
+ "pad_token": "<pad>",
181
+ "remove_space": true,
182
+ "sep_token": "[SEP]",
183
+ "sp_model_kwargs": {},
184
+ "tokenizer_class": "AlbertTokenizer",
185
+ "unk_token": "<unk>",
186
+ "use_fast": false
187
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20ef9919bc8d6ff2cf9467910d4e290b6dadad183709d3c8dcb2451e242f65f2
3
+ size 5304