rm wandb and tokenizer path
Browse files- config.json +2 -2
- run.sh +1 -2
- run_speech_recognition_ctc.py +1 -2
- special_tokens_map.json +1 -1
- tokenizer_config.json +1 -1
- vocab.json +1 -1
config.json
CHANGED
@@ -64,7 +64,7 @@
|
|
64 |
"mask_feature_prob": 0.33,
|
65 |
"mask_time_length": 10,
|
66 |
"mask_time_min_masks": 2,
|
67 |
-
"mask_time_prob": 0.
|
68 |
"model_type": "wav2vec2",
|
69 |
"num_adapter_layers": 3,
|
70 |
"num_attention_heads": 16,
|
@@ -102,6 +102,6 @@
|
|
102 |
"torch_dtype": "float32",
|
103 |
"transformers_version": "4.17.0.dev0",
|
104 |
"use_weighted_layer_sum": false,
|
105 |
-
"vocab_size":
|
106 |
"xvector_output_dim": 512
|
107 |
}
|
|
|
64 |
"mask_feature_prob": 0.33,
|
65 |
"mask_time_length": 10,
|
66 |
"mask_time_min_masks": 2,
|
67 |
+
"mask_time_prob": 0.75,
|
68 |
"model_type": "wav2vec2",
|
69 |
"num_adapter_layers": 3,
|
70 |
"num_attention_heads": 16,
|
|
|
102 |
"torch_dtype": "float32",
|
103 |
"transformers_version": "4.17.0.dev0",
|
104 |
"use_weighted_layer_sum": false,
|
105 |
+
"vocab_size": 216,
|
106 |
"xvector_output_dim": 512
|
107 |
}
|
run.sh
CHANGED
@@ -4,7 +4,6 @@ python run_speech_recognition_ctc.py \
|
|
4 |
--model_name_or_path="facebook/wav2vec2-xls-r-300m" \
|
5 |
--dataset_config_name="fr" \
|
6 |
--output_dir="./" \
|
7 |
-
--tokenizer_name_or_path="./" \
|
8 |
--overwrite_output_dir \
|
9 |
--num_train_epochs="5" \
|
10 |
--per_device_train_batch_size="64" \
|
@@ -23,7 +22,7 @@ python run_speech_recognition_ctc.py \
|
|
23 |
--save_total_limit="3" \
|
24 |
--freeze_feature_encoder \
|
25 |
--feat_proj_dropout="0.0" \
|
26 |
-
--mask_time_prob="0.
|
27 |
--mask_time_length="10" \
|
28 |
--mask_feature_prob="0.33" \
|
29 |
--mask_feature_length="10" \
|
|
|
4 |
--model_name_or_path="facebook/wav2vec2-xls-r-300m" \
|
5 |
--dataset_config_name="fr" \
|
6 |
--output_dir="./" \
|
|
|
7 |
--overwrite_output_dir \
|
8 |
--num_train_epochs="5" \
|
9 |
--per_device_train_batch_size="64" \
|
|
|
22 |
--save_total_limit="3" \
|
23 |
--freeze_feature_encoder \
|
24 |
--feat_proj_dropout="0.0" \
|
25 |
+
--mask_time_prob="0.75" \
|
26 |
--mask_time_length="10" \
|
27 |
--mask_feature_prob="0.33" \
|
28 |
--mask_feature_length="10" \
|
run_speech_recognition_ctc.py
CHANGED
@@ -511,8 +511,7 @@ def main():
|
|
511 |
tokenizer_kwargs = {
|
512 |
"config": config if config.tokenizer_class is not None else None,
|
513 |
"tokenizer_type": config.model_type if config.tokenizer_class is None else None,
|
514 |
-
"bos_token":
|
515 |
-
"eos_token": None,
|
516 |
"unk_token": unk_token,
|
517 |
"pad_token": pad_token,
|
518 |
"word_delimiter_token": word_delimiter_token,
|
|
|
511 |
tokenizer_kwargs = {
|
512 |
"config": config if config.tokenizer_class is not None else None,
|
513 |
"tokenizer_type": config.model_type if config.tokenizer_class is None else None,
|
514 |
+
"bos_token": "<s>",
|
|
|
515 |
"unk_token": unk_token,
|
516 |
"pad_token": pad_token,
|
517 |
"word_delimiter_token": word_delimiter_token,
|
special_tokens_map.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"
|
|
|
1 |
+
{"unk_token": "[UNK]", "pad_token": "[PAD]"}
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"unk_token": "[UNK]", "bos_token":
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": null, "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"'": 1, "(": 2, ")": 3, "*": 4, ".": 5, "/": 6, "1": 7, "2": 8, "=": 9, "C": 10, "E": 11, "N": 12, "Q": 13, "R": 14, "Z": 15, "`": 16, "a": 17, "b": 18, "c": 19, "d": 20, "e": 21, "f": 22, "g": 23, "h": 24, "i": 25, "j": 26, "k": 27, "l": 28, "m": 29, "n": 30, "o": 31, "p": 32, "q": 33, "r": 34, "s": 35, "t": 36, "u": 37, "v": 38, "w": 39, "x": 40, "y": 41, "z": 42, "{": 43, "|": 0, "}": 45, "~": 46, "
|
|
|
1 |
+
{"'": 1, "(": 2, ")": 3, "*": 4, ".": 5, "/": 6, "1": 7, "2": 8, "=": 9, "C": 10, "E": 11, "N": 12, "Q": 13, "R": 14, "Z": 15, "`": 16, "a": 17, "b": 18, "c": 19, "d": 20, "e": 21, "f": 22, "g": 23, "h": 24, "i": 25, "j": 26, "k": 27, "l": 28, "m": 29, "n": 30, "o": 31, "p": 32, "q": 33, "r": 34, "s": 35, "t": 36, "u": 37, "v": 38, "w": 39, "x": 40, "y": 41, "z": 42, "{": 43, "|": 0, "}": 45, "~": 46, "§": 47, "«": 48, "®": 49, "°": 50, "±": 51, "·": 52, "»": 53, "×": 54, "ß": 55, "æ": 56, "ç": 57, "ð": 58, "ø": 59, "þ": 60, "đ": 61, "ħ": 62, "ı": 63, "ł": 64, "œ": 65, "ǀ": 66, "ǃ": 67, "ɑ": 68, "ə": 69, "ɨ": 70, "ʉ": 71, "ʔ": 72, "ʻ": 73, "ʼ": 74, "ʽ": 75, "ʾ": 76, "ʿ": 77, "ː": 78, "α": 79, "β": 80, "γ": 81, "δ": 82, "ε": 83, "ζ": 84, "η": 85, "θ": 86, "ι": 87, "κ": 88, "λ": 89, "μ": 90, "ν": 91, "ο": 92, "π": 93, "ρ": 94, "ς": 95, "σ": 96, "τ": 97, "υ": 98, "φ": 99, "χ": 100, "ψ": 101, "ω": 102, "а": 103, "г": 104, "е": 105, "з": 106, "и": 107, "к": 108, "м": 109, "н": 110, "о": 111, "п": 112, "р": 113, "ц": 114, "ч": 115, "э": 116, "я": 117, "є": 118, "і": 119, "ј": 120, "џ": 121, "ҫ": 122, "ӌ": 123, "գ": 124, "զ": 125, "ا": 126, "ب": 127, "ة": 128, "د": 129, "ر": 130, "ل": 131, "م": 132, "ن": 133, "و": 134, "ي": 135, "ᄀ": 136, "ᄆ": 137, "ᄉ": 138, "ᄌ": 139, "ᅡ": 140, "ᅢ": 141, "ᅥ": 142, "ᅩ": 143, "ᅵ": 144, "ᆨ": 145, "ᆷ": 146, "ᆸ": 147, "ᆼ": 148, "ቀ": 149, "ከ": 150, "ወ": 151, "ደ": 152, "ጀ": 153, "‐": 154, "–": 155, "—": 156, "―": 157, "’": 158, "„": 159, "†": 160, "′": 161, "‹": 162, "›": 163, "⁄": 164, "₽": 165, "→": 166, "↔": 167, "∅": 168, "∆": 169, "∈": 170, "−": 171, "∞": 172, "∨": 173, "∼": 174, "≥": 175, "⊨": 176, "⋅": 177, "─": 178, "☉": 179, "ⱅ": 180, "ⱎ": 181, "い": 182, "う": 183, "た": 184, "つ": 185, "の": 186, "ひ": 187, "へ": 188, "ま": 189, "む": 190, "め": 191, "も": 192, "や": 193, "三": 194, "丹": 195, "乃": 196, "京": 197, "保": 198, "北": 199, "厳": 200, "宇": 201, "扬": 202, "文": 203, "星": 204, "术": 205, "杜": 206, "津": 207, "牡": 208, "甌": 209, "美": 210, "西": 211, "貴": 212, "青": 213, "馆": 214, "ꝑ": 215, "[UNK]": 215, "[PAD]": 216}
|