File size: 6,161 Bytes
759f08b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
import io
import json
from pathlib import Path
from typing import Any, Dict, List, Union
import pandas as pd
from datasets import load_dataset
class ADTDatasetLoader:
"""Loader for ADT Dataset from Hugging Face."""
def __init__(self, repo_id: str = "ariakang/ADT-test", sequence_name: str = None):
"""
Initialize the dataset loader.
Args:
repo_id: Hugging Face repository ID
sequence_name: Specific sequence to load. If None, loads first available sequence
"""
self.dataset = load_dataset(repo_id)
if sequence_name is None:
sequence_name = list(self.dataset.keys())[0]
self.sequence_name = sequence_name
self.sequence = self.dataset[sequence_name]
# Create a mapping of filenames to indices for faster access
self.file_index = {
filename: idx for idx, filename in enumerate(self.sequence["filename"])
}
def _deserialize_csv(self, csv_string: str) -> pd.DataFrame:
"""Convert CSV string to DataFrame."""
return pd.read_csv(io.StringIO(csv_string))
def _deserialize_json(self, json_string: str) -> Union[Dict, List]:
"""Convert JSON string to Python object."""
return json.loads(json_string)
def get_available_files(self) -> List[Dict[str, str]]:
"""Get list of all available files and their types."""
return [
{"filename": filename, "type": dtype}
for filename, dtype in zip(
self.sequence["filename"], self.sequence["data_type"]
)
]
def load_file_by_name(self, filename: str) -> Any:
"""
Load specific file by name.
Args:
filename: Name of the file to load (e.g., "2d_bounding_box.csv")
Returns:
DataFrame for CSV files, dict/list for JSON files
"""
if filename not in self.file_index:
raise ValueError(f"File {filename} not found in dataset")
idx = self.file_index[filename]
data_type = self.sequence["data_type"][idx]
data = self.sequence["data"][idx]
if data_type == "csv":
return self._deserialize_csv(data)
elif data_type in ["json", "jsonl"]:
return self._deserialize_json(data)
else:
return data
def load_2d_bounding_boxes(self) -> pd.DataFrame:
"""Load 2D bounding box data."""
return self.load_file_by_name("2d_bounding_box.csv")
def load_3d_bounding_boxes(self) -> pd.DataFrame:
"""Load 3D bounding box data."""
return self.load_file_by_name("3d_bounding_box.csv")
def load_aria_trajectory(self) -> pd.DataFrame:
"""Load Aria device trajectory data."""
return self.load_file_by_name("aria_trajectory.csv")
def load_eyegaze(self) -> pd.DataFrame:
"""Load eye gaze data."""
return self.load_file_by_name("eyegaze.csv")
def load_scene_objects(self) -> pd.DataFrame:
"""Load scene objects data."""
return self.load_file_by_name("scene_objects.csv")
def load_instances(self) -> Dict:
"""Load instances data."""
return self.load_file_by_name("instances.json")
def load_metadata(self) -> Dict:
"""Load metadata."""
return self.load_file_by_name("metadata.json")
def load_mps_eye_gaze(self) -> Dict[str, Union[pd.DataFrame, Dict]]:
"""Load MPS eye gaze data."""
return {
"general": self.load_file_by_name("mps/eye_gaze/general_eye_gaze.csv"),
"summary": self.load_file_by_name("mps/eye_gaze/summary.json"),
}
def load_mps_slam(self) -> Dict[str, Union[pd.DataFrame, List]]:
"""Load MPS SLAM data."""
return {
"closed_loop": self.load_file_by_name(
"mps/slam/closed_loop_trajectory.csv"
),
"open_loop": self.load_file_by_name("mps/slam/open_loop_trajectory.csv"),
"calibration": self.load_file_by_name("mps/slam/online_calibration.jsonl"),
}
def get_vrs_files_info(self) -> List[Dict]:
"""Get information about VRS files."""
vrs_info = self.load_file_by_name("vrs_files_info.json")
return self._deserialize_json(vrs_info)
def print_dataset_summary(data: Any, name: str):
"""Print summary of loaded data."""
if isinstance(data, pd.DataFrame):
print(f"\n{name}:")
print(f"Shape: {data.shape}")
print("Columns:", list(data.columns))
print("Sample data:")
print(data.head(2))
elif isinstance(data, dict):
print(f"\n{name}:")
print("Keys:", list(data.keys()))
elif isinstance(data, list):
print(f"\n{name}:")
print(f"Number of items: {len(data)}")
if data:
print("First item sample:", data[0])
def main():
"""Example usage of the dataset loader."""
loader = ADTDatasetLoader()
print(f"Loading sequence: {loader.sequence_name}")
# Print available files
print("\nAvailable files:")
for file_info in loader.get_available_files():
print(f"- {file_info['filename']} ({file_info['type']})")
# Load and print summaries of all data types
print("\n=== Loading all data types ===")
# Bounding boxes
print_dataset_summary(loader.load_2d_bounding_boxes(), "2D Bounding Boxes")
print_dataset_summary(loader.load_3d_bounding_boxes(), "3D Bounding Boxes")
# Trajectory and eye gaze
print_dataset_summary(loader.load_aria_trajectory(), "Aria Trajectory")
print_dataset_summary(loader.load_eyegaze(), "Eye Gaze")
# Scene objects
print_dataset_summary(loader.load_scene_objects(), "Scene Objects")
# JSON data
print_dataset_summary(loader.load_instances(), "Instances")
print_dataset_summary(loader.load_metadata(), "Metadata")
# MPS data
print_dataset_summary(loader.load_mps_eye_gaze(), "MPS Eye Gaze")
print_dataset_summary(loader.load_mps_slam(), "MPS SLAM")
# VRS files info
print_dataset_summary(loader.get_vrs_files_info(), "VRS Files")
if __name__ == "__main__":
main()
|