CoSTA / ST /inference /inference1.py
bhavanishankarpullela's picture
Upload 360 files
b817ab5 verified
import torch
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
from datasets import load_dataset
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "6" # SET the GPUs you want to use
import csv
from transformers import WhisperProcessor, WhisperForConditionalGeneration
# device = "cuda:0" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
model_id = "openai/whisper-large-v3"
model = WhisperForConditionalGeneration.from_pretrained(model_id)
processor = WhisperProcessor.from_pretrained(model_id)
mp3_folder = "./extra_epi_wav/"
# Get a list of all the mp3 files in the folder
mp3_files = [file for file in os.listdir(mp3_folder) if file.endswith(".wav")]
# Create a CSV file to store the transcripts
csv_filename = "transcripts_with_lang.csv"
with open(csv_filename, mode='w', newline='', encoding='utf-8') as csv_file:
fieldnames = ['File Name', 'Transcript', 'lang']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
# Write the header to the CSV file
writer.writeheader()
# Process each mp3 file and write the results to the CSV file
processed_files_counter = 0
for mp3_file in mp3_files:
mp3_path = os.path.join(mp3_folder, mp3_file)
result = pipe(mp3_path)
print(result)
transcript = result["text"]
# p
processed_files_counter += 1
# Check progress after every 10 files
if processed_files_counter % 10 == 0:
print(f"{processed_files_counter} files processed.")
# Write the file name and transcript to the CSV file
writer.writerow({'File Name': mp3_file, 'Transcript': transcript})
print(f"Transcripts saved to {csv_filename}")