|
import torch
|
|
import csv
|
|
from transformers import pipeline
|
|
import os
|
|
import librosa
|
|
from itertools import islice
|
|
|
|
OVER_SIZE_LIMIT = 200_000_000
|
|
|
|
csv.field_size_limit(OVER_SIZE_LIMIT)
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
|
|
|
generate_kwargs = {
|
|
"language": "Japanese",
|
|
"no_repeat_ngram_size": 0,
|
|
"repetition_penalty": 1.0,
|
|
}
|
|
|
|
pipe = pipeline(
|
|
"automatic-speech-recognition",
|
|
model="litagin/anime-whisper",
|
|
device="cuda",
|
|
torch_dtype=torch.float16,
|
|
chunk_length_s=30.0,
|
|
batch_size=64,
|
|
)
|
|
|
|
|
|
csv_file = r"C:\Users\user\Pictures\speech_recognition_results.csv"
|
|
|
|
|
|
transcripts = {}
|
|
with open(r"C:\Users\user\Pictures\transcript.csv", mode="r", encoding="utf-8") as file:
|
|
reader = csv.DictReader(file)
|
|
for row in reader:
|
|
transcripts[row["filename"]] = row["transcript"]
|
|
|
|
audio_dir = r"C:\Users\user\Pictures\dataset_converted"
|
|
audio_files = os.listdir(audio_dir)
|
|
|
|
|
|
processed_files = set()
|
|
if os.path.exists(csv_file):
|
|
with open(csv_file, mode="r", encoding="utf-8") as file:
|
|
reader = csv.DictReader(file)
|
|
for row in reader:
|
|
processed_files.add(row["Filename"])
|
|
|
|
|
|
batch_size = 256
|
|
with open(csv_file, mode="a", newline="", encoding="utf-8") as file:
|
|
writer = csv.writer(file)
|
|
|
|
|
|
if not processed_files:
|
|
writer.writerow(["Filename", "True", "ASR"])
|
|
|
|
|
|
unprocessed_files = [
|
|
f for f in audio_files if f in transcripts and f not in processed_files
|
|
]
|
|
|
|
|
|
for i in range(0, len(unprocessed_files), batch_size):
|
|
batch_files = unprocessed_files[i : i + batch_size]
|
|
audio_paths = [os.path.join(audio_dir, f) for f in batch_files]
|
|
|
|
|
|
audios = []
|
|
for audio_path in audio_paths:
|
|
y, sr = librosa.load(audio_path, sr=16000)
|
|
audios.append(y)
|
|
|
|
|
|
results = pipe(audios, generate_kwargs=generate_kwargs)
|
|
|
|
|
|
for audio_file, result in zip(batch_files, results):
|
|
asr_result = result["text"]
|
|
true_text = transcripts[audio_file]
|
|
writer.writerow([audio_file, true_text, asr_result])
|
|
|
|
print("Filename:", audio_file)
|
|
print("True:", true_text)
|
|
print("ASR :", asr_result)
|
|
print()
|
|
|
|
print(f"結果は {csv_file} に保存されました。")
|
|
|