File size: 2,651 Bytes
47a5221 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import h5py
import numpy as np
from pathlib import Path
from tqdm import tqdm
def load_stimulus_features(root_data_dir: str, modality: str) -> dict:
"""
Loads stimulus features stored in .h5 files under the given root directory.
The function expects each .h5 file to contain one or more datasets corresponding to
different layers. If more than one layer is present, they are concatenated along the
feature dimension (axis=1).
Parameters:
root_data_dir (str): Root directory containing the .h5 files.
modality (str): Modality name (e.g., 'audio', 'video', etc.). This key will be used in the returned dict.
Returns:
dict: A dictionary of the form { modality: { movie_name: features_array } } where:
- movie_name is the stem of the .h5 file (e.g. "s01e01a")
- features_array is a NumPy array of shape (num_intervals, feature_dim) containing the concatenated features.
"""
features = {modality: {}}
root_path = Path(root_data_dir)
# Traverse all .h5 files under the root directory.
for h5_file in tqdm(root_path.rglob("*.h5")):
movie_name = h5_file.stem
# Remove the "friends_" prefix if it exists.
if movie_name.startswith("friends_"):
movie_name = movie_name[len("friends_"):]
datasets = []
with h5py.File(h5_file, 'r') as f:
# Iterate over all datasets (layers) in the file.
for layer in f.keys():
data = f[layer][:]
datasets.append(data)
# If multiple layers exist, concatenate along axis=1.
if len(datasets) > 1:
concatenated_features = np.concatenate(datasets, axis=1)
elif datasets:
concatenated_features = datasets[0]
else:
continue
features[modality][movie_name] = concatenated_features[:,0,0:200]
return features
# --- Example usage ---
# Suppose the root_data_dir points to the directory containing all the .h5 files.
whisper_root_data_dir = "/content/drive/MyDrive/features/whisper" # adjust this path as needed
modality = "audio" # For example, we're loading audio features
# Load the stimulus features.
features = load_stimulus_features(whisper_root_data_dir, modality)
# Print all available movie splits for the specified modality along with their shapes.
for key_modality, value_modality in features.items():
print(f"\n{key_modality} features movie splits name and shape:")
for key_movie, value_movie in value_modality.items():
print(f"{key_movie} {value_movie.shape}")
|