Datasets:
Tasks:
Automatic Speech Recognition
Languages:
English
import os | |
import keras.layers | |
import librosa | |
import matplotlib.pyplot as plt | |
import numpy as np | |
import pandas as pd | |
from jiwer import wer | |
from keras.src.applications.densenet import layers | |
from scipy.io import wavfile | |
import tensorflow as tf | |
data_path = r"D:\MyCode\Python\dataset\LJSpeech-1.1" | |
wave_path = data_path + "/wavs/" | |
metadata_path = data_path + '/metadata.csv' | |
metadata_df = pd.read_csv(metadata_path, sep="|", header=None, quoting=3) | |
metadata_df.columns = ["file_name", "transcription", "normalized_transcription"] | |
metadata_df = metadata_df[["file_name", "transcription", "normalized_transcription"]] | |
metadata_df = metadata_df.sample(frac=1).reset_index(drop=True) | |
print(metadata_df.head(10)) | |
split = int(len(metadata_df) * 0.90) | |
df_train = metadata_df[:split] | |
df_test = metadata_df[split:] | |
frame_length = 256 | |
frame_step = 160 | |
fft_length = 384 | |
batch_size = 32 | |
epochs = 10 | |
# preprocessing | |
characters = [x for x in "abcdefghijklmnopqrstuvwxyzăâêôơưđ'?! "] | |
char_to_num = keras.layers.StringLookup(vocabulary=characters, oov_token="") | |
num_to_char = keras.layers.StringLookup(vocabulary=char_to_num.get_vocabulary(), oov_token="", invert=True) | |
# def encode_single_sample(wav_file, label): | |
# file = tf.io.read_file(wave_path, wav_file + ".wav") | |
# audio, _ = tf.audio.decode_wav(file) | |
# audio = tf.squeeze(audio, axis=-1) | |
# audio = tf.cast(audio, tf.float32) | |
# | |
# spectrogram = tf.signal.stft(audio, frame_length=frame_length, frame_step=frame_step, fft_length=fft_length) | |
# spectrogram = tf.abs(spectrogram) | |
# spectrogram = tf.math.pow(spectrogram, 0.5) | |
# | |
# mean = tf.math.reduce_mean(spectrogram, 1, keepdims=True) | |
# stddevs = tf.math.reduce_std(spectrogram, 1, keepdims=True) | |
# spectrogram = (spectrogram - mean) / (stddevs + 1e-10) | |
# | |
# label = tf.strings.lower(label) | |
# label = tf.strings.unicode_split(label, input_encoding='UTF-8') | |
# label = char_to_num(label) | |
# return spectrogram, label | |
def encode_single_sample(wav_file, label): | |
# Tạo đường dẫn file âm thanh | |
file_path = tf.strings.join([wave_path, wav_file, ".wav"], separator="") | |
# Đọc file âm thanh | |
file = tf.io.read_file(file_path) | |
audio, _ = tf.audio.decode_wav(file) | |
audio = tf.squeeze(audio, axis=-1) | |
audio = tf.cast(audio, tf.float32) | |
# Tính toán spectrogram | |
spectrogram = tf.signal.stft(audio, frame_length=frame_length, frame_step=frame_step, fft_length=fft_length) | |
spectrogram = tf.abs(spectrogram) | |
spectrogram = tf.math.pow(spectrogram, 0.5) | |
# Chuẩn hóa | |
mean = tf.math.reduce_mean(spectrogram, axis=1, keepdims=True) | |
stddevs = tf.math.reduce_std(spectrogram, axis=1, keepdims=True) | |
spectrogram = (spectrogram - mean) / (stddevs + 1e-10) | |
# Thêm chiều cho "channels" | |
spectrogram = tf.expand_dims(spectrogram, axis=-1) # Giữ nguyên | |
spectrogram = tf.expand_dims(spectrogram, axis=0) # Thêm chiều batch | |
# Xử lý nhãn | |
label = tf.strings.lower(label) | |
label = tf.strings.unicode_split(label, input_encoding='UTF-8') | |
label = char_to_num(label) | |
return spectrogram, label | |
train_dataset = tf.data.Dataset.from_tensor_slices(( | |
list(df_train["file_name"]), | |
list(df_train["normalized_transcription"]) | |
)) | |
train_dataset = ( | |
train_dataset.map(encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE) | |
.padded_batch(batch_size) | |
.prefetch(buffer_size=tf.data.AUTOTUNE) | |
) | |
# Tạo dataset cho validation | |
validation_dataset = tf.data.Dataset.from_tensor_slices(( | |
list(df_test["file_name"]), | |
list(df_test["normalized_transcription"]) | |
)) | |
validation_dataset = ( | |
validation_dataset.map(encode_single_sample, num_parallel_calls=tf.data.AUTOTUNE) | |
.padded_batch(batch_size) | |
.prefetch(buffer_size=tf.data.AUTOTUNE) | |
) | |
for batch in train_dataset.take(1): | |
spectrogram = batch[0][0].numpy() # Lấy spectrogram từ batch | |
# Kiểm tra kích thước | |
if spectrogram.ndim == 4: # Nếu là mảng 4D, loại bỏ chiều batch | |
spectrogram = tf.squeeze(spectrogram, axis=0) | |
# Kiểm tra lại nếu là mảng 3D | |
if spectrogram.ndim == 3: # Nếu vẫn là mảng 3D, chuyển đổi về mảng 2D | |
spectrogram = np.squeeze(spectrogram, axis=-1) # Chuyển đổi về mảng 2D | |
# Áp dụng np.trim_zeros cho từng hàng | |
trimmed_spectrogram = [np.trim_zeros(x) for x in spectrogram.T] # Chuyển vị và trim | |
# Chuyển đổi về numpy array 2D nếu cần | |
max_length = max(len(x) for x in trimmed_spectrogram) # Tìm chiều dài tối đa | |
trimmed_spectrogram = np.array([np.pad(x, (0, max_length - len(x)), mode='constant') for x in trimmed_spectrogram]) | |
def CTCLoss(y_true, y_pred): | |
batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64") | |
input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64") | |
label_length = tf.cast(tf.shape(y_true)[1], dtype="int64") | |
input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64") | |
label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64") | |
loss = keras.backend.ctc_batch_cost(y_true, y_pred, input_length, label_length) | |
return loss | |
def build_model(input_dim, output_dim, rnn_layer=5, rnn_units=128): | |
input_spectrogram = layers.Input(shape=(None, input_dim), name="input") | |
x = layers.Reshape((-1, input_dim, 1), name="expand_dim")(input_spectrogram) | |
# Lớp Convolutional 1 | |
x = layers.Conv2D(filters=32, kernel_size=[11, 41], strides=[2, 2], | |
padding="same", use_bias=False, name="conv_1")(x) | |
x = layers.BatchNormalization(name="bn_conv_1")(x) # Đổi tên lớp này | |
x = layers.ReLU(name="relu_1")(x) | |
# Lớp Convolutional 2 | |
x = layers.Conv2D(filters=32, kernel_size=[11, 21], strides=[1, 2], | |
padding="same", use_bias=False, name="conv_2")(x) | |
x = layers.BatchNormalization(name="bn_conv_2")(x) # Đổi tên lớp này | |
x = layers.ReLU(name="relu_2")(x) | |
# Reshape để sử dụng với RNN | |
x = layers.Reshape((-1, x.shape[-2] * x.shape[-1]))(x) | |
for i in range(1, rnn_layer + 1): | |
recurrent = layers.GRU( | |
units=rnn_units, | |
activation="tanh", | |
recurrent_activation="sigmoid", | |
use_bias=True, | |
return_sequences=True, | |
reset_after=True, | |
name=f"gru_{i}", | |
) | |
# Các lớp Recurrent | |
x = layers.Bidirectional( | |
recurrent, name=f"bidirectional_{i}", merge_mode="concat", | |
)(x) | |
if i < rnn_layer: | |
x = layers.Dropout(rate=0.5)(x) | |
x = layers.Dense(units=rnn_units * 2, name="dense_1")(x) | |
x = layers.ReLU(name="relu_3")(x) | |
x = layers.Dropout(rate=0.5)(x) | |
output = layers.Dense(units=output_dim + 1, activation="softmax")(x) | |
model = keras.Model(input_spectrogram, output, name="DeepSpeech_2") | |
otp = keras.optimizers.Adam(learning_rate=1e-4) | |
model.compile(optimizer=otp, loss=CTCLoss) | |
return model | |
model = build_model( | |
input_dim=fft_length // 2 + 1, | |
output_dim=char_to_num.vocab_size(), | |
rnn_units=512, | |
) | |
model.summary() | |
def decode_batch_predictions(pred): | |
input_len = np.ones(pred.shape[0]) * pred.shape[1] | |
results = keras.backend.ctc_decode(pred, input_len=input_len, greedy=True)[0][0] | |
output_texts = [] | |
for result in results: | |
result = tf.strings.reduce_join(num_to_char(result)).numpy().decode('utf-8') | |
output_texts.append(result) | |
return output_texts | |
class CallbackEval(keras.callbacks.Callback): | |
def __init__(self, dataset): | |
super().__init__() | |
self.dataset = dataset | |
def on_epoch_end(self, epoch, logs=None): | |
prediction = [] | |
targets = [] | |
for batch in self.dataset: | |
X, y = batch | |
batch_predictions = model.predict(X) | |
batch_predictions = decode_batch_predictions(batch_predictions) | |
prediction.extend(batch_predictions) | |
for label in y: | |
label = (tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8")) | |
targets.append(label) | |
wer_score = wer(targets, prediction) | |
print(f"WER: {wer_score:.4f}") | |
for i in np.random.randint(0, len(prediction), 2): | |
print(f"Target: {targets[i]}") | |
print(f"Prediction: {prediction[i]}") | |
validation_callback = CallbackEval(validation_dataset) | |
history = model.fit( | |
train_dataset, | |
validation_data=validation_dataset, | |
epochs=epochs, | |
callbacks=[validation_callback], | |
) | |
model.save(r'D:\MyCode\Python\pythonProject\SavedModed\model_stt.h5') | |