wasmdashai commited on
Commit
a273093
·
verified ·
1 Parent(s): b75e1c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -127,7 +127,7 @@ def get_state_grad_loss(k1=True,
127
  discriminator=True):
128
  return {'k1':k1,'mel':mel,'duration':duration,'generator':generator,'discriminator':discriminator}
129
 
130
-
131
  def clip_grad_value_(parameters, clip_value, norm_type=2):
132
  if isinstance(parameters, torch.Tensor):
133
  parameters = [parameters]
@@ -145,7 +145,7 @@ def clip_grad_value_(parameters, clip_value, norm_type=2):
145
  total_norm = total_norm ** (1. / norm_type)
146
  return total_norm
147
 
148
-
149
  def get_embed_speaker(self,speaker_id):
150
  if self.config.num_speakers > 1 and speaker_id is not None:
151
  if isinstance(speaker_id, int):
@@ -160,6 +160,7 @@ def get_embed_speaker(self,speaker_id):
160
  return self.embed_speaker(speaker_id).unsqueeze(-1)
161
  else:
162
  return None
 
163
  def get_data_loader(train_dataset_dirs,eval_dataset_dir,full_generation_dir,device):
164
  ctrain_datasets=[]
165
  for dataset_dir ,id_sp in train_dataset_dirs:
 
127
  discriminator=True):
128
  return {'k1':k1,'mel':mel,'duration':duration,'generator':generator,'discriminator':discriminator}
129
 
130
+ @spaces.GPU
131
  def clip_grad_value_(parameters, clip_value, norm_type=2):
132
  if isinstance(parameters, torch.Tensor):
133
  parameters = [parameters]
 
145
  total_norm = total_norm ** (1. / norm_type)
146
  return total_norm
147
 
148
+ @spaces.GPU
149
  def get_embed_speaker(self,speaker_id):
150
  if self.config.num_speakers > 1 and speaker_id is not None:
151
  if isinstance(speaker_id, int):
 
160
  return self.embed_speaker(speaker_id).unsqueeze(-1)
161
  else:
162
  return None
163
+ @spaces.GPU
164
  def get_data_loader(train_dataset_dirs,eval_dataset_dir,full_generation_dir,device):
165
  ctrain_datasets=[]
166
  for dataset_dir ,id_sp in train_dataset_dirs: