yangwang825 commited on
Commit
9b12b7b
1 Parent(s): 8067293

Upload model

Browse files
Files changed (1) hide show
  1. config.json +3 -2
config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "activation_dropout": 0.0,
3
  "adapter_attn_dim": null,
4
  "adapter_kernel_size": 3,
@@ -6,7 +7,7 @@
6
  "add_adapter": false,
7
  "apply_spec_augment": true,
8
  "architectures": [
9
- "Wav2Vec2ForPreTraining"
10
  ],
11
  "attention_dropout": 0.1,
12
  "bos_token_id": 1,
@@ -53,7 +54,6 @@
53
  "feat_quantizer_dropout": 0.0,
54
  "final_dropout": 0.0,
55
  "freeze_feat_extract_train": true,
56
- "gradient_checkpointing": true,
57
  "hidden_act": "gelu",
58
  "hidden_dropout": 0.1,
59
  "hidden_size": 768,
@@ -117,6 +117,7 @@
117
  1,
118
  1
119
  ],
 
120
  "transformers_version": "4.46.2",
121
  "use_weighted_layer_sum": false,
122
  "vocab_size": 32,
 
1
  {
2
+ "_name_or_path": "facebook/wav2vec2-base",
3
  "activation_dropout": 0.0,
4
  "adapter_attn_dim": null,
5
  "adapter_kernel_size": 3,
 
7
  "add_adapter": false,
8
  "apply_spec_augment": true,
9
  "architectures": [
10
+ "Wav2Vec2SpkRegModel"
11
  ],
12
  "attention_dropout": 0.1,
13
  "bos_token_id": 1,
 
54
  "feat_quantizer_dropout": 0.0,
55
  "final_dropout": 0.0,
56
  "freeze_feat_extract_train": true,
 
57
  "hidden_act": "gelu",
58
  "hidden_dropout": 0.1,
59
  "hidden_size": 768,
 
117
  1,
118
  1
119
  ],
120
+ "torch_dtype": "float32",
121
  "transformers_version": "4.46.2",
122
  "use_weighted_layer_sum": false,
123
  "vocab_size": 32,