yangwang825 commited on
Commit
f9ad84b
·
verified ·
1 Parent(s): c1c8bcd

Upload model

Browse files
Files changed (2) hide show
  1. config.json +5 -3
  2. modeling_wav2vec2_spkreg.py +3 -3
config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "activation_dropout": 0.0,
3
  "adapter_attn_dim": null,
4
  "adapter_kernel_size": 3,
@@ -6,11 +7,12 @@
6
  "add_adapter": false,
7
  "apply_spec_augment": true,
8
  "architectures": [
9
- "Wav2Vec2ForPreTraining"
10
  ],
11
  "attention_dropout": 0.1,
12
  "auto_map": {
13
- "AutoConfig": "configuration_wav2vec2_spkreg.Wav2Vec2SpkRegConfig"
 
14
  },
15
  "bos_token_id": 1,
16
  "classifier_proj_size": 256,
@@ -56,7 +58,6 @@
56
  "feat_quantizer_dropout": 0.0,
57
  "final_dropout": 0.0,
58
  "freeze_feat_extract_train": true,
59
- "gradient_checkpointing": true,
60
  "hidden_act": "gelu",
61
  "hidden_dropout": 0.1,
62
  "hidden_size": 768,
@@ -119,6 +120,7 @@
119
  1,
120
  1
121
  ],
 
122
  "transformers_version": "4.46.2",
123
  "use_weighted_layer_sum": false,
124
  "vocab_size": 32,
 
1
  {
2
+ "_name_or_path": "facebook/wav2vec2-base",
3
  "activation_dropout": 0.0,
4
  "adapter_attn_dim": null,
5
  "adapter_kernel_size": 3,
 
7
  "add_adapter": false,
8
  "apply_spec_augment": true,
9
  "architectures": [
10
+ "Wav2Vec2SpkRegModel"
11
  ],
12
  "attention_dropout": 0.1,
13
  "auto_map": {
14
+ "AutoConfig": "configuration_wav2vec2_spkreg.Wav2Vec2SpkRegConfig",
15
+ "AutoModel": "modeling_wav2vec2_spkreg.Wav2Vec2SpkRegModel"
16
  },
17
  "bos_token_id": 1,
18
  "classifier_proj_size": 256,
 
58
  "feat_quantizer_dropout": 0.0,
59
  "final_dropout": 0.0,
60
  "freeze_feat_extract_train": true,
 
61
  "hidden_act": "gelu",
62
  "hidden_dropout": 0.1,
63
  "hidden_size": 768,
 
120
  1,
121
  1
122
  ],
123
+ "torch_dtype": "float32",
124
  "transformers_version": "4.46.2",
125
  "use_weighted_layer_sum": false,
126
  "vocab_size": 32,
modeling_wav2vec2_spkreg.py CHANGED
@@ -732,16 +732,16 @@ class Wav2Vec2SpkRegForSequenceClassification(Wav2Vec2SpkRegPreTrainedModel):
732
 
733
  loss = None
734
  if labels is not None:
735
- if self.loss_fct == 'cross_entropy':
736
  loss_fct = nn.CrossEntropyLoss(
737
  label_smoothing=self.config.label_smoothing,
738
  reduction=self.config.reduction
739
  )
740
- elif self.loss_fct == 'additive_margin':
741
  loss_fct = AMSoftmaxLoss(
742
  self.config.num_labels, self.config.scale, self.config.margin
743
  )
744
- elif self.loss_fct == 'additive_angular_margin':
745
  loss_fct = AAMSoftmaxLoss(
746
  self.config.num_labels, self.config.scale, self.config.margin, self.config.easy_margin
747
  )
 
732
 
733
  loss = None
734
  if labels is not None:
735
+ if self.config.loss_fct == 'cross_entropy':
736
  loss_fct = nn.CrossEntropyLoss(
737
  label_smoothing=self.config.label_smoothing,
738
  reduction=self.config.reduction
739
  )
740
+ elif self.config.loss_fct == 'additive_margin':
741
  loss_fct = AMSoftmaxLoss(
742
  self.config.num_labels, self.config.scale, self.config.margin
743
  )
744
+ elif self.config.loss_fct == 'additive_angular_margin':
745
  loss_fct = AAMSoftmaxLoss(
746
  self.config.num_labels, self.config.scale, self.config.margin, self.config.easy_margin
747
  )