PedroDKE commited on
Commit
3f72b81
·
verified ·
1 Parent(s): 9c0a37f

Upload 4 files

Browse files

uploaded some new files:
- clean_up_csv.py to clean up the original allignment csv for easier use
- libris2s_dataset.py: a pytorch dataset class as a starting point for easy loading
- a notebook to test the dataset class

Files changed (4) hide show
  1. clean_up_csv.py +42 -0
  2. data_example.ipynb +0 -0
  3. libris2s_dataset.py +95 -0
  4. requirements.txt +8 -5
clean_up_csv.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import os
3
+
4
+ # Read the original CSV
5
+ alignment = pd.read_csv("alignments/all_de_en_alligned.csv", index_col=0)
6
+
7
+ # Create mapping of book numbers to English folder names
8
+ en_folder_map = {}
9
+ for folder in os.listdir("EN"):
10
+ book_id = folder.split('.')[0]
11
+ en_folder_map[book_id] = folder
12
+
13
+ # Function to construct full German audio path
14
+ def get_de_path(row):
15
+ if "67" in row['book']:
16
+ return os.path.join("DE","67.frankenstein_de_1211_librivox_newly_alligned", "sentence_level_audio", row['DE_audio'])
17
+ return os.path.join("DE", row['book'], "sentence_level_audio", row['DE_audio'])
18
+
19
+ # Function to construct full English audio path
20
+ def get_en_path(row):
21
+ book_id = str(row['book_id'])
22
+ if book_id in en_folder_map:
23
+ return os.path.join("EN", en_folder_map[book_id], "sentence_level_audio", row['EN_audio'] + ".wav")
24
+ return None
25
+
26
+ # Update paths in the DataFrame
27
+ alignment['DE_audio'] = alignment.apply(get_de_path, axis=1)
28
+ alignment['EN_audio'] = alignment.apply(get_en_path, axis=1)
29
+
30
+ # Drop the 'book' column since paths are now complete
31
+ alignment = alignment.drop('book', axis=1)
32
+
33
+ # Drop rows where EN_audio path couldn't be constructed (book_id not found)
34
+ alignment = alignment.dropna(subset=['EN_audio'])
35
+
36
+ # Save the cleaned up csv
37
+ alignment.to_csv("alignments/all_de_en_alligned_cleaned.csv", index=False)
38
+
39
+ print(f"Saved cleaned CSV with {len(alignment)} rows")
40
+ print("\nFirst few rows of cleaned CSV:")
41
+ print(alignment.head())
42
+
data_example.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
libris2s_dataset.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import pandas as pd
4
+ import torchaudio
5
+ from torch.utils.data import Dataset
6
+ from typing import List, Optional
7
+
8
+ class Libris2sDataset(torch.utils.data.Dataset):
9
+ def __init__(self, data_dir: str, split: str, transform=None, book_ids: Optional[List[str]]=None):
10
+ """
11
+ Initialize the LibriS2S dataset.
12
+
13
+ Args:
14
+ data_dir (str): Root directory containing the dataset
15
+ split (str): Path to the CSV file containing alignments
16
+ transform (callable, optional): Optional transform to be applied on the audio
17
+ book_ids (List[str], optional): List of book IDs to include. If None, includes all books.
18
+ Example: ['9', '10', '11'] will only load these books.
19
+ """
20
+ self.data_dir = data_dir
21
+ self.transform = transform
22
+ self.book_ids = set(book_ids) if book_ids is not None else None
23
+
24
+ # Load alignment CSV file
25
+ self.alignments = pd.read_csv(split)
26
+
27
+ # Create lists to store paths and metadata
28
+ self.de_audio_paths = []
29
+ self.en_audio_paths = []
30
+ self.de_transcripts = []
31
+ self.en_transcripts = []
32
+ self.alignment_scores = []
33
+
34
+ # Process each entry in the alignments
35
+ for _, row in self.alignments.iterrows():
36
+ # Get book ID from the path
37
+ book_id = str(row['book_id'])
38
+
39
+ # Skip if book_id is not in the filtered set
40
+ if self.book_ids is not None and book_id not in self.book_ids:
41
+ continue
42
+
43
+ # Get full paths from CSV
44
+ de_audio = os.path.join(data_dir, row['DE_audio'])
45
+ en_audio = os.path.join(data_dir, row['EN_audio'])
46
+
47
+ # Only add if both audio files exist
48
+ if os.path.exists(de_audio) and os.path.exists(en_audio):
49
+ self.de_audio_paths.append(de_audio)
50
+ self.en_audio_paths.append(en_audio)
51
+ self.de_transcripts.append(row['DE_transcript'])
52
+ self.en_transcripts.append(row['EN_transcript'])
53
+ self.alignment_scores.append(float(row['score']))
54
+ else:
55
+ print(f"Skipping {de_audio} or {en_audio} because they don't exist")
56
+
57
+ def __len__(self):
58
+ """Return the number of items in the dataset."""
59
+ return len(self.de_audio_paths)
60
+
61
+ def __getitem__(self, idx):
62
+ """
63
+ Get a single item from the dataset.
64
+
65
+ Args:
66
+ idx (int): Index of the item to get
67
+
68
+ Returns:
69
+ dict: A dictionary containing:
70
+ - de_audio: German audio waveform
71
+ - de_sample_rate: German audio sample rate
72
+ - en_audio: English audio waveform
73
+ - en_sample_rate: English audio sample rate
74
+ - de_transcript: German transcript
75
+ - en_transcript: English transcript
76
+ - alignment_score: Alignment score between the pair
77
+ """
78
+ # Load audio files
79
+ de_audio, de_sr = torchaudio.load(self.de_audio_paths[idx])
80
+ en_audio, en_sr = torchaudio.load(self.en_audio_paths[idx])
81
+
82
+ # Apply transforms if specified
83
+ if self.transform:
84
+ de_audio = self.transform(de_audio)
85
+ en_audio = self.transform(en_audio)
86
+
87
+ return {
88
+ 'de_audio': de_audio,
89
+ 'de_sample_rate': de_sr,
90
+ 'en_audio': en_audio,
91
+ 'en_sample_rate': en_sr,
92
+ 'de_transcript': self.de_transcripts[idx],
93
+ 'en_transcript': self.en_transcripts[idx],
94
+ 'alignment_score': self.alignment_scores[idx]
95
+ }
requirements.txt CHANGED
@@ -1,5 +1,8 @@
1
- aeneas=1.7.3.0
2
- pandas>=1.1.4
3
- pydub=0.24.1
4
- beautifulsoup4=4.9.3
5
- requests=2.25.1
 
 
 
 
1
+ aeneas=1.7.3.0
2
+ pandas>=1.1.4
3
+ pydub=0.24.1
4
+ beautifulsoup4=4.9.3
5
+ requests=2.25.1
6
+ torch>=2.0.0
7
+ torchaudio>=2.0.0
8
+ soundfile