hans00 commited on
Commit
83262a0
1 Parent(s): f866902

Upload folder using huggingface_hub

Browse files
bert_zh/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
bert_zh/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
bert_zh/tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 1000000000000000019884624838656,
50
+ "never_split": null,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "strip_accents": null,
54
+ "tokenize_chinese_chars": true,
55
+ "tokenizer_class": "BertTokenizer",
56
+ "unk_token": "[UNK]"
57
+ }
bert_zh/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./hf/ellie",
3
+ "activation_dropout": 0.1,
4
+ "architectures": [
5
+ "BertVits2Model"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "bert_configs": [
9
+ {
10
+ "_name_or_path": "",
11
+ "add_cross_attention": false,
12
+ "architectures": [
13
+ "BertForMaskedLM"
14
+ ],
15
+ "attention_probs_dropout_prob": 0.1,
16
+ "bad_words_ids": null,
17
+ "begin_suppress_tokens": null,
18
+ "bos_token_id": 0,
19
+ "chunk_size_feed_forward": 0,
20
+ "classifier_dropout": null,
21
+ "cross_attention_hidden_size": null,
22
+ "decoder_start_token_id": null,
23
+ "directionality": "bidi",
24
+ "diversity_penalty": 0.0,
25
+ "do_sample": false,
26
+ "early_stopping": false,
27
+ "encoder_no_repeat_ngram_size": 0,
28
+ "eos_token_id": 2,
29
+ "exponential_decay_length_penalty": null,
30
+ "finetuning_task": null,
31
+ "forced_bos_token_id": null,
32
+ "forced_eos_token_id": null,
33
+ "hidden_act": "gelu",
34
+ "hidden_dropout_prob": 0.1,
35
+ "hidden_size": 1024,
36
+ "id2label": {
37
+ "0": "LABEL_0",
38
+ "1": "LABEL_1"
39
+ },
40
+ "initializer_range": 0.02,
41
+ "intermediate_size": 4096,
42
+ "is_decoder": false,
43
+ "is_encoder_decoder": false,
44
+ "label2id": {
45
+ "LABEL_0": 0,
46
+ "LABEL_1": 1
47
+ },
48
+ "layer_norm_eps": 1e-12,
49
+ "length_penalty": 1.0,
50
+ "max_length": 20,
51
+ "max_position_embeddings": 512,
52
+ "min_length": 0,
53
+ "model_type": "bert",
54
+ "no_repeat_ngram_size": 0,
55
+ "num_attention_heads": 16,
56
+ "num_beam_groups": 1,
57
+ "num_beams": 1,
58
+ "num_hidden_layers": 24,
59
+ "num_return_sequences": 1,
60
+ "output_attentions": false,
61
+ "output_hidden_states": false,
62
+ "output_past": true,
63
+ "output_scores": false,
64
+ "pad_token_id": 0,
65
+ "pooler_fc_size": 768,
66
+ "pooler_num_attention_heads": 12,
67
+ "pooler_num_fc_layers": 3,
68
+ "pooler_size_per_head": 128,
69
+ "pooler_type": "first_token_transform",
70
+ "position_embedding_type": "absolute",
71
+ "prefix": null,
72
+ "problem_type": null,
73
+ "pruned_heads": {},
74
+ "remove_invalid_values": false,
75
+ "repetition_penalty": 1.0,
76
+ "return_dict": true,
77
+ "return_dict_in_generate": false,
78
+ "sep_token_id": null,
79
+ "suppress_tokens": null,
80
+ "task_specific_params": null,
81
+ "temperature": 1.0,
82
+ "tf_legacy_loss": false,
83
+ "tie_encoder_decoder": false,
84
+ "tie_word_embeddings": true,
85
+ "tokenizer_class": null,
86
+ "top_k": 50,
87
+ "top_p": 1.0,
88
+ "torch_dtype": null,
89
+ "torchscript": false,
90
+ "transformers_version": "4.38.2",
91
+ "type_vocab_size": 2,
92
+ "typical_p": 1.0,
93
+ "use_bfloat16": false,
94
+ "use_cache": true,
95
+ "vocab_size": 21128
96
+ }
97
+ ],
98
+ "cond_layer_index": 2,
99
+ "conditioning_layer_index": 2,
100
+ "depth_separable_channels": 2,
101
+ "depth_separable_num_layers": 3,
102
+ "duration_predictor_dropout": 0.1,
103
+ "duration_predictor_filter_channels": 256,
104
+ "duration_predictor_flow_bins": 10,
105
+ "duration_predictor_kernel_size": 3,
106
+ "duration_predictor_num_flows": 4,
107
+ "duration_predictor_tail_bound": 5.0,
108
+ "ffn_dim": 768,
109
+ "ffn_kernel_size": 3,
110
+ "flow_size": 192,
111
+ "hidden_act": "relu",
112
+ "hidden_dropout": 0.1,
113
+ "hidden_size": 192,
114
+ "initializer_range": 0.02,
115
+ "layer_norm_eps": 1e-05,
116
+ "layerdrop": 0.1,
117
+ "leaky_relu_slope": 0.1,
118
+ "model_type": "bert_vits2",
119
+ "noise_scale": 0.667,
120
+ "noise_scale_duration": 0.8,
121
+ "num_attention_heads": 2,
122
+ "num_hidden_layers": 6,
123
+ "num_languages": 3,
124
+ "num_speakers": 256,
125
+ "num_tones": 11,
126
+ "posterior_encoder_num_wavenet_layers": 16,
127
+ "prior_encoder_num_flows": 4,
128
+ "prior_encoder_num_flows_layers": 6,
129
+ "prior_encoder_num_wavenet_layers": 4,
130
+ "resblock_dilation_sizes": [
131
+ [
132
+ 1,
133
+ 3,
134
+ 5
135
+ ],
136
+ [
137
+ 1,
138
+ 3,
139
+ 5
140
+ ],
141
+ [
142
+ 1,
143
+ 3,
144
+ 5
145
+ ]
146
+ ],
147
+ "resblock_kernel_sizes": [
148
+ 3,
149
+ 7,
150
+ 11
151
+ ],
152
+ "sampling_rate": 44100,
153
+ "speaker_embedding_size": 256,
154
+ "speaking_rate": 1.0,
155
+ "spectrogram_bins": 1025,
156
+ "stochastic_duration_prediction_ratio": 0.0,
157
+ "torch_dtype": "float32",
158
+ "transformers_version": "4.38.2",
159
+ "upsample_initial_channel": 512,
160
+ "upsample_kernel_sizes": [
161
+ 16,
162
+ 16,
163
+ 8,
164
+ 2,
165
+ 2
166
+ ],
167
+ "upsample_rates": [
168
+ 8,
169
+ 8,
170
+ 2,
171
+ 2,
172
+ 2
173
+ ],
174
+ "use_bias": true,
175
+ "use_transformer_flow": true,
176
+ "vocab_size": 112,
177
+ "wavenet_dilation_rate": 1,
178
+ "wavenet_dropout": 0.0,
179
+ "wavenet_kernel_size": 5,
180
+ "window_size": 4
181
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d07836d2e17c8d7ddd6cd33144dae5c3a87489a5b349d88efe00a15c7749d87
3
+ size 1587908260
onnx/model-quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ab7624ea46cbf11971571d45572c785e1d307bf16cf5b9f63d1ad396a6bc925
3
+ size 443019696
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3aea0a903af45382edeb2e0d8a4f5b9ca10330c8aa3ff9c77f372c34d93314a2
3
+ size 1446995871
processor_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bert_tokenizers": {
3
+ "zh": "bert_zh"
4
+ },
5
+ "processor_class": "BertVits2Processor"
6
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pad_token": {
3
+ "content": "_",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "unk_token": {
10
+ "content": "UNK",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ }
16
+ }
tokenizer.json ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "_",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 111,
17
+ "content": "UNK",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ }
24
+ ],
25
+ "normalizer": {
26
+ "type": "Sequence",
27
+ "normalizers": [
28
+ {
29
+ "type": "Replace",
30
+ "pattern": {
31
+ "Regex": "(?:\\[SEP\\]|\\b)"
32
+ },
33
+ "content": "_"
34
+ },
35
+ {
36
+ "type": "Replace",
37
+ "pattern": {
38
+ "Regex": "(?:_)+"
39
+ },
40
+ "content": "_"
41
+ }
42
+ ]
43
+ },
44
+ "pre_tokenizer": {
45
+ "type": "Split",
46
+ "pattern": {
47
+ "Regex": "_"
48
+ },
49
+ "behavior": "Isolated",
50
+ "invert": false
51
+ },
52
+ "post_processor": null,
53
+ "decoder": null,
54
+ "model": {
55
+ "vocab": {
56
+ "!": 103,
57
+ "'": 108,
58
+ ",": 106,
59
+ "-": 109,
60
+ ".": 107,
61
+ "?": 104,
62
+ "AA": 1,
63
+ "E": 2,
64
+ "EE": 3,
65
+ "En": 4,
66
+ "N": 5,
67
+ "OO": 6,
68
+ "SP": 110,
69
+ "UNK": 111,
70
+ "V": 7,
71
+ "_": 0,
72
+ "a": 8,
73
+ "a:": 9,
74
+ "aa": 10,
75
+ "ae": 11,
76
+ "ah": 12,
77
+ "ai": 13,
78
+ "an": 14,
79
+ "ang": 15,
80
+ "ao": 16,
81
+ "aw": 17,
82
+ "ay": 18,
83
+ "b": 19,
84
+ "by": 20,
85
+ "c": 21,
86
+ "ch": 22,
87
+ "d": 23,
88
+ "dh": 24,
89
+ "dy": 25,
90
+ "e": 26,
91
+ "e:": 27,
92
+ "eh": 28,
93
+ "ei": 29,
94
+ "en": 30,
95
+ "eng": 31,
96
+ "er": 32,
97
+ "ey": 33,
98
+ "f": 34,
99
+ "g": 35,
100
+ "gy": 36,
101
+ "h": 37,
102
+ "hh": 38,
103
+ "hy": 39,
104
+ "i": 40,
105
+ "i0": 41,
106
+ "i:": 42,
107
+ "ia": 43,
108
+ "ian": 44,
109
+ "iang": 45,
110
+ "iao": 46,
111
+ "ie": 47,
112
+ "ih": 48,
113
+ "in": 49,
114
+ "ing": 50,
115
+ "iong": 51,
116
+ "ir": 52,
117
+ "iu": 53,
118
+ "iy": 54,
119
+ "j": 55,
120
+ "jh": 56,
121
+ "k": 57,
122
+ "ky": 58,
123
+ "l": 59,
124
+ "m": 60,
125
+ "my": 61,
126
+ "n": 62,
127
+ "ng": 63,
128
+ "ny": 64,
129
+ "o": 65,
130
+ "o:": 66,
131
+ "ong": 67,
132
+ "ou": 68,
133
+ "ow": 69,
134
+ "oy": 70,
135
+ "p": 71,
136
+ "py": 72,
137
+ "q": 73,
138
+ "r": 74,
139
+ "ry": 75,
140
+ "s": 76,
141
+ "sh": 77,
142
+ "t": 78,
143
+ "th": 79,
144
+ "ts": 80,
145
+ "ty": 81,
146
+ "u": 82,
147
+ "u:": 83,
148
+ "ua": 84,
149
+ "uai": 85,
150
+ "uan": 86,
151
+ "uang": 87,
152
+ "uh": 88,
153
+ "ui": 89,
154
+ "un": 90,
155
+ "uo": 91,
156
+ "uw": 92,
157
+ "v": 93,
158
+ "van": 94,
159
+ "ve": 95,
160
+ "vn": 96,
161
+ "w": 97,
162
+ "x": 98,
163
+ "y": 99,
164
+ "z": 100,
165
+ "zh": 101,
166
+ "zy": 102,
167
+ "…": 105
168
+ }
169
+ }
170
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_blank": true,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "_",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "111": {
13
+ "content": "UNK",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "clean_up_tokenization_spaces": true,
22
+ "languages": [
23
+ "zh"
24
+ ],
25
+ "model_max_length": 1000000000000000019884624838656,
26
+ "pad_token": "_",
27
+ "processor_class": "BertVits2Processor",
28
+ "tokenizer_class": "BertVits2Tokenizer",
29
+ "unk_token": "UNK",
30
+ "space_token": "SP"
31
+ }
vocab.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "!": 103,
3
+ "'": 108,
4
+ ",": 106,
5
+ "-": 109,
6
+ ".": 107,
7
+ "?": 104,
8
+ "AA": 1,
9
+ "E": 2,
10
+ "EE": 3,
11
+ "En": 4,
12
+ "N": 5,
13
+ "OO": 6,
14
+ "SP": 110,
15
+ "UNK": 111,
16
+ "V": 7,
17
+ "_": 0,
18
+ "a": 8,
19
+ "a:": 9,
20
+ "aa": 10,
21
+ "ae": 11,
22
+ "ah": 12,
23
+ "ai": 13,
24
+ "an": 14,
25
+ "ang": 15,
26
+ "ao": 16,
27
+ "aw": 17,
28
+ "ay": 18,
29
+ "b": 19,
30
+ "by": 20,
31
+ "c": 21,
32
+ "ch": 22,
33
+ "d": 23,
34
+ "dh": 24,
35
+ "dy": 25,
36
+ "e": 26,
37
+ "e:": 27,
38
+ "eh": 28,
39
+ "ei": 29,
40
+ "en": 30,
41
+ "eng": 31,
42
+ "er": 32,
43
+ "ey": 33,
44
+ "f": 34,
45
+ "g": 35,
46
+ "gy": 36,
47
+ "h": 37,
48
+ "hh": 38,
49
+ "hy": 39,
50
+ "i": 40,
51
+ "i0": 41,
52
+ "i:": 42,
53
+ "ia": 43,
54
+ "ian": 44,
55
+ "iang": 45,
56
+ "iao": 46,
57
+ "ie": 47,
58
+ "ih": 48,
59
+ "in": 49,
60
+ "ing": 50,
61
+ "iong": 51,
62
+ "ir": 52,
63
+ "iu": 53,
64
+ "iy": 54,
65
+ "j": 55,
66
+ "jh": 56,
67
+ "k": 57,
68
+ "ky": 58,
69
+ "l": 59,
70
+ "m": 60,
71
+ "my": 61,
72
+ "n": 62,
73
+ "ng": 63,
74
+ "ny": 64,
75
+ "o": 65,
76
+ "o:": 66,
77
+ "ong": 67,
78
+ "ou": 68,
79
+ "ow": 69,
80
+ "oy": 70,
81
+ "p": 71,
82
+ "py": 72,
83
+ "q": 73,
84
+ "r": 74,
85
+ "ry": 75,
86
+ "s": 76,
87
+ "sh": 77,
88
+ "t": 78,
89
+ "th": 79,
90
+ "ts": 80,
91
+ "ty": 81,
92
+ "u": 82,
93
+ "u:": 83,
94
+ "ua": 84,
95
+ "uai": 85,
96
+ "uan": 86,
97
+ "uang": 87,
98
+ "uh": 88,
99
+ "ui": 89,
100
+ "un": 90,
101
+ "uo": 91,
102
+ "uw": 92,
103
+ "v": 93,
104
+ "van": 94,
105
+ "ve": 95,
106
+ "vn": 96,
107
+ "w": 97,
108
+ "x": 98,
109
+ "y": 99,
110
+ "z": 100,
111
+ "zh": 101,
112
+ "zy": 102,
113
+ "…": 105
114
+ }