Spaces:
Sleeping
Sleeping
Update ljspeechimportable.py
Browse files- ljspeechimportable.py +3 -2
ljspeechimportable.py
CHANGED
@@ -658,7 +658,7 @@ _ = [model[key].eval() for key in model]
|
|
658 |
_ = [model[key].to(device) for key in model]
|
659 |
|
660 |
# params_whole = torch.load("Models/LJSpeech/epoch_2nd_00100.pth", map_location='cpu')
|
661 |
-
params_whole = torch.load("Models/Kaede.pth"
|
662 |
params = params_whole['net']
|
663 |
|
664 |
for key in model:
|
@@ -696,6 +696,7 @@ def inference(text, noise, diffusion_steps=5, embedding_scale=1):
|
|
696 |
# ps = ' '.join(ps)
|
697 |
|
698 |
text = japanese_cleaners4(text)
|
|
|
699 |
|
700 |
tokens = textclenaer(text)
|
701 |
tokens.insert(0, 0)
|
@@ -746,7 +747,7 @@ def LFinference(text, s_prev, noise, alpha=0.7, diffusion_steps=5, embedding_sca
|
|
746 |
# ps = word_tokenize(ps[0])
|
747 |
# ps = ' '.join(ps)
|
748 |
text = japanese_cleaners4(text)
|
749 |
-
|
750 |
tokens = textclenaer(text)
|
751 |
tokens.insert(0, 0)
|
752 |
tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)
|
|
|
658 |
_ = [model[key].to(device) for key in model]
|
659 |
|
660 |
# params_whole = torch.load("Models/LJSpeech/epoch_2nd_00100.pth", map_location='cpu')
|
661 |
+
params_whole = torch.load("Models/Kaede.pth", map_location='cpu')
|
662 |
params = params_whole['net']
|
663 |
|
664 |
for key in model:
|
|
|
696 |
# ps = ' '.join(ps)
|
697 |
|
698 |
text = japanese_cleaners4(text)
|
699 |
+
print(text)
|
700 |
|
701 |
tokens = textclenaer(text)
|
702 |
tokens.insert(0, 0)
|
|
|
747 |
# ps = word_tokenize(ps[0])
|
748 |
# ps = ' '.join(ps)
|
749 |
text = japanese_cleaners4(text)
|
750 |
+
print(text)
|
751 |
tokens = textclenaer(text)
|
752 |
tokens.insert(0, 0)
|
753 |
tokens = torch.LongTensor(tokens).to(device).unsqueeze(0)
|