File size: 2,781 Bytes
3f137ab
 
 
 
2f42654
a3430d5
3f137ab
2f42654
 
 
a3430d5
3f137ab
 
2f42654
 
 
3f137ab
 
 
2f42654
 
 
 
 
 
3f137ab
 
a3430d5
2f42654
 
 
 
 
 
 
 
 
 
 
a3430d5
2f42654
 
a3430d5
 
2f42654
 
 
a3430d5
2f42654
 
a3430d5
3f137ab
 
2f42654
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f137ab
2f42654
 
3f137ab
2f42654
 
 
 
 
3f137ab
2f42654
 
 
 
3f137ab
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import torch
import csv
from transformers import pipeline
import os
import librosa
from itertools import islice

OVER_SIZE_LIMIT = 200_000_000

csv.field_size_limit(OVER_SIZE_LIMIT)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

generate_kwargs = {
    "language": "Japanese",
    "no_repeat_ngram_size": 0,
    "repetition_penalty": 1.0,
}

pipe = pipeline(
    "automatic-speech-recognition",
    model="litagin/anime-whisper",
    device="cuda",
    torch_dtype=torch.float16,
    chunk_length_s=30.0,
    batch_size=64,
)

# CSVファイルの準備
csv_file = r"C:\Users\user\Pictures\speech_recognition_results.csv"

# transcript.csvからデータを読み込む
transcripts = {}
with open(r"C:\Users\user\Pictures\transcript.csv", mode="r", encoding="utf-8") as file:
    reader = csv.DictReader(file)
    for row in reader:
        transcripts[row["filename"]] = row["transcript"]

audio_dir = r"C:\Users\user\Pictures\dataset_converted"
audio_files = os.listdir(audio_dir)

# CSV から既に処理されたファイル名を取得
processed_files = set()
if os.path.exists(csv_file):
    with open(csv_file, mode="r", encoding="utf-8") as file:
        reader = csv.DictReader(file)
        for row in reader:
            processed_files.add(row["Filename"])

# 処理を開始
batch_size = 256  # バッチサイズ
with open(csv_file, mode="a", newline="", encoding="utf-8") as file:
    writer = csv.writer(file)

    # ヘッダーがない場合は書き込む
    if not processed_files:
        writer.writerow(["Filename", "True", "ASR"])

    # 未処理ファイルをフィルタリング
    unprocessed_files = [
        f for f in audio_files if f in transcripts and f not in processed_files
    ]

    # バッチ処理
    for i in range(0, len(unprocessed_files), batch_size):
        batch_files = unprocessed_files[i : i + batch_size]
        audio_paths = [os.path.join(audio_dir, f) for f in batch_files]

        # 音声ファイルを読み込み
        audios = []
        for audio_path in audio_paths:
            y, sr = librosa.load(audio_path, sr=16000)
            audios.append(y)

        # ASR パイプラインを使ってバッチ処理
        results = pipe(audios, generate_kwargs=generate_kwargs)

        # CSVに結果を保存
        for audio_file, result in zip(batch_files, results):
            asr_result = result["text"]
            true_text = transcripts[audio_file]
            writer.writerow([audio_file, true_text, asr_result])

            print("Filename:", audio_file)
            print("True:", true_text)
            print("ASR :", asr_result)
            print()

print(f"結果は {csv_file} に保存されました。")