Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -75,6 +75,18 @@ def show_memory_info(hint):
|
|
75 |
memory = info.rss / 1024.0 / 1024
|
76 |
print("{} 内存占用: {} MB".format(hint, memory))
|
77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
|
79 |
def get_text(text, hps, is_symbol):
|
80 |
text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
|
@@ -101,8 +113,6 @@ def to_symbol_fn(is_symbol_input, input_text, temp_text):
|
|
101 |
|
102 |
def infer(text_raw, character, language, duration, noise_scale, noise_scale_w, is_symbol):
|
103 |
# check character & duraction parameter
|
104 |
-
# remove \n
|
105 |
-
text_raw = text_raw.replace("\n", "")
|
106 |
if language not in languages:
|
107 |
print("Error: No such language\n")
|
108 |
return "Error: No such language", None, None, None
|
@@ -136,10 +146,7 @@ def infer(text_raw, character, language, duration, noise_scale, noise_scale_w, i
|
|
136 |
x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
|
137 |
sid = torch.LongTensor([char_id])
|
138 |
try:
|
139 |
-
|
140 |
-
jp2phoneme = japanese_cleaners(text)
|
141 |
-
else:
|
142 |
-
jp2phoneme = text
|
143 |
durations = net_g.predict_duration(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale,
|
144 |
noise_scale_w=noise_scale_w, length_scale=duration)
|
145 |
char_dur_list = []
|
|
|
75 |
memory = info.rss / 1024.0 / 1024
|
76 |
print("{} 内存占用: {} MB".format(hint, memory))
|
77 |
|
78 |
+
def text_to_phoneme(text, symbols, is_symbol):
|
79 |
+
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
|
80 |
+
|
81 |
+
sequence = ""
|
82 |
+
if not is_symbol:
|
83 |
+
clean_text = japanese_cleaners(text)
|
84 |
+
for symbol in clean_text:
|
85 |
+
if symbol not in _symbol_to_id.keys():
|
86 |
+
continue
|
87 |
+
symbol_id = _symbol_to_id[symbol]
|
88 |
+
sequence += symbol
|
89 |
+
return sequence
|
90 |
|
91 |
def get_text(text, hps, is_symbol):
|
92 |
text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
|
|
|
113 |
|
114 |
def infer(text_raw, character, language, duration, noise_scale, noise_scale_w, is_symbol):
|
115 |
# check character & duraction parameter
|
|
|
|
|
116 |
if language not in languages:
|
117 |
print("Error: No such language\n")
|
118 |
return "Error: No such language", None, None, None
|
|
|
146 |
x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
|
147 |
sid = torch.LongTensor([char_id])
|
148 |
try:
|
149 |
+
jp2phoneme = text_to_phoneme(text, hps.symbols, is_symbol)
|
|
|
|
|
|
|
150 |
durations = net_g.predict_duration(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale,
|
151 |
noise_scale_w=noise_scale_w, length_scale=duration)
|
152 |
char_dur_list = []
|