Xenova HF staff commited on
Commit
b80a289
1 Parent(s): 7520dbb

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/mbart-large-50-many-to-one-mmt",
3
+ "_num_labels": 3,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "relu",
6
+ "add_bias_logits": false,
7
+ "add_final_layer_norm": true,
8
+ "architectures": [
9
+ "MBartForConditionalGeneration"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 0,
13
+ "classif_dropout": 0.0,
14
+ "classifier_dropout": 0.0,
15
+ "d_model": 1024,
16
+ "decoder_attention_heads": 16,
17
+ "decoder_ffn_dim": 4096,
18
+ "decoder_layerdrop": 0.0,
19
+ "decoder_layers": 12,
20
+ "decoder_start_token_id": 2,
21
+ "dropout": 0.1,
22
+ "encoder_attention_heads": 16,
23
+ "encoder_ffn_dim": 4096,
24
+ "encoder_layerdrop": 0.0,
25
+ "encoder_layers": 12,
26
+ "eos_token_id": 2,
27
+ "forced_bos_token_id": 250004,
28
+ "forced_eos_token_id": 2,
29
+ "gradient_checkpointing": false,
30
+ "id2label": {
31
+ "0": "LABEL_0",
32
+ "1": "LABEL_1",
33
+ "2": "LABEL_2"
34
+ },
35
+ "init_std": 0.02,
36
+ "is_encoder_decoder": true,
37
+ "label2id": {
38
+ "LABEL_0": 0,
39
+ "LABEL_1": 1,
40
+ "LABEL_2": 2
41
+ },
42
+ "max_length": 200,
43
+ "max_position_embeddings": 1024,
44
+ "model_type": "mbart",
45
+ "normalize_before": true,
46
+ "normalize_embedding": true,
47
+ "num_beams": 5,
48
+ "num_hidden_layers": 12,
49
+ "output_past": true,
50
+ "pad_token_id": 1,
51
+ "scale_embedding": true,
52
+ "static_position_embeddings": false,
53
+ "task_specific_params": {
54
+ "translation_en_to_ro": {
55
+ "decoder_start_token_id": 250020
56
+ }
57
+ },
58
+ "tokenizer_class": "MBart50Tokenizer",
59
+ "transformers_version": "4.34.0.dev0",
60
+ "use_cache": true,
61
+ "vocab_size": 250054
62
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "decoder_start_token_id": 2,
5
+ "eos_token_id": 2,
6
+ "forced_bos_token_id": 250004,
7
+ "forced_eos_token_id": 2,
8
+ "max_length": 200,
9
+ "num_beams": 5,
10
+ "pad_token_id": 1,
11
+ "transformers_version": "4.34.0.dev0"
12
+ }
onnx/decoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2e4698829a1fecc1d3eb2d0a2a178d53cf4e9e261864c6d02041d755b844547
3
+ size 1836266030
onnx/decoder_model_merged.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93ed725e9f49ad6ac34d5c14fe51c464fbdea9babcc4a0a8675c4670ed9de5f8
3
+ size 1836768889
onnx/decoder_model_merged_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c95eb0c2192095f89b22c3fede945ec2454cb6bbcb5db4a95b47da72a3ef7db
3
+ size 462864720
onnx/decoder_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7edf0432794b7d9a3d49bd98f1c924bbeba98fbfc865f992bdc0e4716528186a
3
+ size 462092077
onnx/decoder_with_past_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f88c25d8703bea38732c7c4ce83c0ef5da2ebaf68e81abd1d28fe6cfc9f2cc6
3
+ size 1735418013
onnx/decoder_with_past_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79bb859a4913a96b4e25879bb62555f09fe85ec70fd44c7783b2d9b651117867
3
+ size 436566459
onnx/encoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6f552b348949e250a56b8a805de5e55cbdedd3ce54f9f56da69e867337f1bd1
3
+ size 1633279750
onnx/encoder_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c662aa5bb05ad768be05c773bce44cabc451913a18b513bebfeccf0e8c12140d
3
+ size 409679126
quantize_config.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "per_channel": true,
3
+ "reduce_range": true,
4
+ "per_model_config": {
5
+ "encoder_model": {
6
+ "op_types": [
7
+ "Constant",
8
+ "Range",
9
+ "Sub",
10
+ "Where",
11
+ "ReduceMean",
12
+ "Mul",
13
+ "Unsqueeze",
14
+ "Add",
15
+ "Expand",
16
+ "Shape",
17
+ "Reshape",
18
+ "Equal",
19
+ "Pow",
20
+ "Relu",
21
+ "MatMul",
22
+ "Cast",
23
+ "Transpose",
24
+ "ConstantOfShape",
25
+ "Div",
26
+ "Sqrt",
27
+ "Gather",
28
+ "Concat",
29
+ "Softmax"
30
+ ],
31
+ "weight_type": "QInt8"
32
+ },
33
+ "decoder_with_past_model": {
34
+ "op_types": [
35
+ "Constant",
36
+ "Sub",
37
+ "Range",
38
+ "Where",
39
+ "ReduceMean",
40
+ "Mul",
41
+ "Unsqueeze",
42
+ "Add",
43
+ "Expand",
44
+ "Shape",
45
+ "Reshape",
46
+ "Equal",
47
+ "Pow",
48
+ "Relu",
49
+ "MatMul",
50
+ "Cast",
51
+ "Transpose",
52
+ "ConstantOfShape",
53
+ "Div",
54
+ "Sqrt",
55
+ "Gather",
56
+ "Concat",
57
+ "Softmax"
58
+ ],
59
+ "weight_type": "QInt8"
60
+ },
61
+ "decoder_model_merged": {
62
+ "op_types": [
63
+ "Constant",
64
+ "Range",
65
+ "Sub",
66
+ "Where",
67
+ "ReduceMean",
68
+ "Mul",
69
+ "Unsqueeze",
70
+ "Add",
71
+ "Expand",
72
+ "Shape",
73
+ "Reshape",
74
+ "Equal",
75
+ "If",
76
+ "Pow",
77
+ "Less",
78
+ "Slice",
79
+ "Squeeze",
80
+ "Relu",
81
+ "MatMul",
82
+ "Cast",
83
+ "Transpose",
84
+ "ConstantOfShape",
85
+ "Div",
86
+ "Sqrt",
87
+ "Gather",
88
+ "Concat",
89
+ "Softmax"
90
+ ],
91
+ "weight_type": "QInt8"
92
+ },
93
+ "decoder_model": {
94
+ "op_types": [
95
+ "Constant",
96
+ "Range",
97
+ "Sub",
98
+ "Where",
99
+ "ReduceMean",
100
+ "Mul",
101
+ "Unsqueeze",
102
+ "Add",
103
+ "Expand",
104
+ "Shape",
105
+ "Reshape",
106
+ "Equal",
107
+ "Pow",
108
+ "Less",
109
+ "Slice",
110
+ "Squeeze",
111
+ "Relu",
112
+ "MatMul",
113
+ "Cast",
114
+ "Transpose",
115
+ "ConstantOfShape",
116
+ "Div",
117
+ "Sqrt",
118
+ "Gather",
119
+ "Concat",
120
+ "Softmax"
121
+ ],
122
+ "weight_type": "QInt8"
123
+ }
124
+ }
125
+ }
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "ar_AR",
4
+ "cs_CZ",
5
+ "de_DE",
6
+ "en_XX",
7
+ "es_XX",
8
+ "et_EE",
9
+ "fi_FI",
10
+ "fr_XX",
11
+ "gu_IN",
12
+ "hi_IN",
13
+ "it_IT",
14
+ "ja_XX",
15
+ "kk_KZ",
16
+ "ko_KR",
17
+ "lt_LT",
18
+ "lv_LV",
19
+ "my_MM",
20
+ "ne_NP",
21
+ "nl_XX",
22
+ "ro_RO",
23
+ "ru_RU",
24
+ "si_LK",
25
+ "tr_TR",
26
+ "vi_VN",
27
+ "zh_CN",
28
+ "af_ZA",
29
+ "az_AZ",
30
+ "bn_IN",
31
+ "fa_IR",
32
+ "he_IL",
33
+ "hr_HR",
34
+ "id_ID",
35
+ "ka_GE",
36
+ "km_KH",
37
+ "mk_MK",
38
+ "ml_IN",
39
+ "mn_MN",
40
+ "mr_IN",
41
+ "pl_PL",
42
+ "ps_AF",
43
+ "pt_XX",
44
+ "sv_SE",
45
+ "sw_KE",
46
+ "ta_IN",
47
+ "te_IN",
48
+ "th_TH",
49
+ "tl_XX",
50
+ "uk_UA",
51
+ "ur_PK",
52
+ "xh_ZA",
53
+ "gl_ES",
54
+ "sl_SI"
55
+ ],
56
+ "bos_token": "<s>",
57
+ "cls_token": "<s>",
58
+ "eos_token": "</s>",
59
+ "mask_token": "<mask>",
60
+ "pad_token": "<pad>",
61
+ "sep_token": "</s>",
62
+ "unk_token": "<unk>"
63
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d58a68c276b56fcc48c165c63f70e5e4d452b4182032a5f7a2d018f4aa1a889
3
+ size 17109752
tokenizer_config.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "ar_AR",
4
+ "cs_CZ",
5
+ "de_DE",
6
+ "en_XX",
7
+ "es_XX",
8
+ "et_EE",
9
+ "fi_FI",
10
+ "fr_XX",
11
+ "gu_IN",
12
+ "hi_IN",
13
+ "it_IT",
14
+ "ja_XX",
15
+ "kk_KZ",
16
+ "ko_KR",
17
+ "lt_LT",
18
+ "lv_LV",
19
+ "my_MM",
20
+ "ne_NP",
21
+ "nl_XX",
22
+ "ro_RO",
23
+ "ru_RU",
24
+ "si_LK",
25
+ "tr_TR",
26
+ "vi_VN",
27
+ "zh_CN",
28
+ "af_ZA",
29
+ "az_AZ",
30
+ "bn_IN",
31
+ "fa_IR",
32
+ "he_IL",
33
+ "hr_HR",
34
+ "id_ID",
35
+ "ka_GE",
36
+ "km_KH",
37
+ "mk_MK",
38
+ "ml_IN",
39
+ "mn_MN",
40
+ "mr_IN",
41
+ "pl_PL",
42
+ "ps_AF",
43
+ "pt_XX",
44
+ "sv_SE",
45
+ "sw_KE",
46
+ "ta_IN",
47
+ "te_IN",
48
+ "th_TH",
49
+ "tl_XX",
50
+ "uk_UA",
51
+ "ur_PK",
52
+ "xh_ZA",
53
+ "gl_ES",
54
+ "sl_SI"
55
+ ],
56
+ "bos_token": "<s>",
57
+ "clean_up_tokenization_spaces": true,
58
+ "cls_token": "<s>",
59
+ "eos_token": "</s>",
60
+ "language_codes": "ML50",
61
+ "mask_token": {
62
+ "__type": "AddedToken",
63
+ "content": "<mask>",
64
+ "lstrip": true,
65
+ "normalized": true,
66
+ "rstrip": false,
67
+ "single_word": false
68
+ },
69
+ "model_max_length": 1000000000000000019884624838656,
70
+ "pad_token": "<pad>",
71
+ "sep_token": "</s>",
72
+ "sp_model_kwargs": {},
73
+ "src_lang": null,
74
+ "tgt_lang": null,
75
+ "tokenizer_class": "MBart50Tokenizer",
76
+ "unk_token": "<unk>"
77
+ }