Spaces:
Runtime error
Runtime error
from monotonic_align import maximum_path | |
from monotonic_align import mask_from_lens | |
from monotonic_align.core import maximum_path_c | |
import numpy as np | |
import torch | |
import copy | |
from torch import nn | |
import torch.nn.functional as F | |
import torchaudio | |
import librosa | |
import matplotlib.pyplot as plt | |
from munch import Munch | |
import re | |
import json | |
import numpy as np | |
def maximum_path(neg_cent, mask): | |
"""Cython optimized version. | |
neg_cent: [b, t_t, t_s] | |
mask: [b, t_t, t_s] | |
""" | |
device = neg_cent.device | |
dtype = neg_cent.dtype | |
neg_cent = np.ascontiguousarray(neg_cent.data.cpu().numpy().astype(np.float32)) | |
path = np.ascontiguousarray(np.zeros(neg_cent.shape, dtype=np.int32)) | |
t_t_max = np.ascontiguousarray( | |
mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) | |
) | |
t_s_max = np.ascontiguousarray( | |
mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) | |
) | |
maximum_path_c(path, neg_cent, t_t_max, t_s_max) | |
return torch.from_numpy(path).to(device=device, dtype=dtype) | |
def get_data_path_list(train_path=None, val_path=None): | |
if train_path is None: | |
train_path = "Data/train_list.txt" | |
if val_path is None: | |
val_path = "Data/val_list.txt" | |
with open(train_path, "r", encoding="utf-8", errors="ignore") as f: | |
train_list = f.readlines() | |
with open(val_path, "r", encoding="utf-8", errors="ignore") as f: | |
val_list = f.readlines() | |
return train_list, val_list | |
def length_to_mask(lengths): | |
mask = ( | |
torch.arange(lengths.max()) | |
.unsqueeze(0) | |
.expand(lengths.shape[0], -1) | |
.type_as(lengths) | |
) | |
mask = torch.gt(mask + 1, lengths.unsqueeze(1)) | |
return mask | |
# for norm consistency loss | |
def log_norm(x, mean=-4, std=4, dim=2): | |
""" | |
normalized log mel -> mel -> norm -> log(norm) | |
""" | |
x = torch.log(torch.exp(x * std + mean).norm(dim=dim)) | |
return x | |
def get_image(arrs): | |
plt.switch_backend("agg") | |
fig = plt.figure() | |
ax = plt.gca() | |
ax.imshow(arrs) | |
return fig | |
def recursive_munch(d): | |
if isinstance(d, dict): | |
return Munch((k, recursive_munch(v)) for k, v in d.items()) | |
elif isinstance(d, list): | |
return [recursive_munch(v) for v in d] | |
else: | |
return d | |
def log_print(message, logger): | |
logger.info(message) | |
print(message) | |
def get_hparams_from_file(config_path): | |
with open(config_path, "r", encoding="utf-8") as f: | |
data = f.read() | |
config = json.loads(data) | |
hparams = HParams(**config) | |
return hparams | |
class HParams: | |
def __init__(self, **kwargs): | |
for k, v in kwargs.items(): | |
if type(v) == dict: | |
v = HParams(**v) | |
self[k] = v | |
def keys(self): | |
return self.__dict__.keys() | |
def items(self): | |
return self.__dict__.items() | |
def values(self): | |
return self.__dict__.values() | |
def __len__(self): | |
return len(self.__dict__) | |
def __getitem__(self, key): | |
return getattr(self, key) | |
def __setitem__(self, key, value): | |
return setattr(self, key, value) | |
def __contains__(self, key): | |
return key in self.__dict__ | |
def __repr__(self): | |
return self.__dict__.__repr__() | |
def string_to_bits(string, pad_len=8): | |
# Convert each character to its ASCII value | |
ascii_values = [ord(char) for char in string] | |
# Convert ASCII values to binary representation | |
binary_values = [bin(value)[2:].zfill(8) for value in ascii_values] | |
# Convert binary strings to integer arrays | |
bit_arrays = [[int(bit) for bit in binary] for binary in binary_values] | |
# Convert list of arrays to NumPy array | |
numpy_array = np.array(bit_arrays) | |
numpy_array_full = np.zeros((pad_len, 8), dtype=numpy_array.dtype) | |
numpy_array_full[:, 2] = 1 | |
max_len = min(pad_len, len(numpy_array)) | |
numpy_array_full[:max_len] = numpy_array[:max_len] | |
return numpy_array_full | |
def bits_to_string(bits_array): | |
# Convert each row of the array to a binary string | |
binary_values = [''.join(str(bit) for bit in row) for row in bits_array] | |
# Convert binary strings to ASCII values | |
ascii_values = [int(binary, 2) for binary in binary_values] | |
# Convert ASCII values to characters | |
output_string = ''.join(chr(value) for value in ascii_values) | |
return output_string | |
def split_sentence(text, min_len=10, language_str='[EN]'): | |
if language_str in ['EN']: | |
sentences = split_sentences_latin(text, min_len=min_len) | |
else: | |
sentences = split_sentences_zh(text, min_len=min_len) | |
return sentences | |
def split_sentences_latin(text, min_len=10): | |
"""Split Long sentences into list of short ones | |
Args: | |
str: Input sentences. | |
Returns: | |
List[str]: list of output sentences. | |
""" | |
# deal with dirty sentences | |
text = re.sub('[。!?;]', '.', text) | |
text = re.sub('[,]', ',', text) | |
text = re.sub('[“”]', '"', text) | |
text = re.sub('[‘’]', "'", text) | |
text = re.sub(r"[\<\>\(\)\[\]\"\«\»]+", "", text) | |
text = re.sub('[\n\t ]+', ' ', text) | |
text = re.sub('([,.!?;])', r'\1 $#!', text) | |
# split | |
sentences = [s.strip() for s in text.split('$#!')] | |
if len(sentences[-1]) == 0: del sentences[-1] | |
new_sentences = [] | |
new_sent = [] | |
count_len = 0 | |
for ind, sent in enumerate(sentences): | |
# print(sent) | |
new_sent.append(sent) | |
count_len += len(sent.split(" ")) | |
if count_len > min_len or ind == len(sentences) - 1: | |
count_len = 0 | |
new_sentences.append(' '.join(new_sent)) | |
new_sent = [] | |
return merge_short_sentences_latin(new_sentences) | |
def merge_short_sentences_latin(sens): | |
sens_out = [] | |
for s in sens: | |
# If the previous sentense is too short, merge them with | |
# the current sentence. | |
if len(sens_out) > 0 and len(sens_out[-1].split(" ")) <= 2: | |
sens_out[-1] = sens_out[-1] + " " + s | |
else: | |
sens_out.append(s) | |
try: | |
if len(sens_out[-1].split(" ")) <= 2: | |
sens_out[-2] = sens_out[-2] + " " + sens_out[-1] | |
sens_out.pop(-1) | |
except: | |
pass | |
return sens_out | |
def split_sentences_zh(text, min_len=10): | |
text = re.sub('[。!?;]', '.', text) | |
text = re.sub('[,]', ',', text) | |
# 将文本中的换行符、空格和制表符替换为空格 | |
text = re.sub('[\n\t ]+', ' ', text) | |
# 在标点符号后添加一个空格 | |
text = re.sub('([,.!?;])', r'\1 $#!', text) | |
# 分隔句子并去除前后空格 | |
# sentences = [s.strip() for s in re.split('(。|!|?|;)', text)] | |
sentences = [s.strip() for s in text.split('$#!')] | |
if len(sentences[-1]) == 0: del sentences[-1] | |
new_sentences = [] | |
new_sent = [] | |
count_len = 0 | |
for ind, sent in enumerate(sentences): | |
new_sent.append(sent) | |
count_len += len(sent) | |
if count_len > min_len or ind == len(sentences) - 1: | |
count_len = 0 | |
new_sentences.append(' '.join(new_sent)) | |
new_sent = [] | |
return merge_short_sentences_zh(new_sentences) | |
def merge_short_sentences_zh(sens): | |
# return sens | |
"""Avoid short sentences by merging them with the following sentence. | |
Args: | |
List[str]: list of input sentences. | |
Returns: | |
List[str]: list of output sentences. | |
""" | |
sens_out = [] | |
for s in sens: | |
# If the previous sentense is too short, merge them with | |
# the current sentence. | |
if len(sens_out) > 0 and len(sens_out[-1]) <= 2: | |
sens_out[-1] = sens_out[-1] + " " + s | |
else: | |
sens_out.append(s) | |
try: | |
if len(sens_out[-1]) <= 2: | |
sens_out[-2] = sens_out[-2] + " " + sens_out[-1] | |
sens_out.pop(-1) | |
except: | |
pass | |
return sens_out |