ariakang commited on
Commit
759f08b
·
1 Parent(s): 6469682

update readme

Browse files
Files changed (3) hide show
  1. README.md +171 -40
  2. load_dataset.py +182 -0
  3. upload_to_HF.py +227 -0
README.md CHANGED
@@ -8,43 +8,174 @@ license:
8
  # ADT Dataset
9
 
10
  ## Dataset Description
11
- This dataset contains Aria Digital Twin (ADT) sequences with various sensor data and annotations.
12
-
13
- ## Usage Example
14
- ```python
15
- from datasets import load_dataset
16
- import pandas as pd
17
- import json
18
- import io
19
-
20
- def deserialize_csv(csv_string):
21
- return pd.read_csv(io.StringIO(csv_string))
22
-
23
- def deserialize_json(json_string):
24
- return json.loads(json_string)
25
-
26
- # Load the dataset
27
- dataset = load_dataset("ariakang/ADT-test")
28
- sequence = dataset["{sequence_name}"]
29
-
30
- # Get list of available files
31
- files = list(zip(sequence["filename"], sequence["data_type"]))
32
- print("Available files:", files)
33
-
34
- # Load specific data
35
- for i, (filename, data_type, data) in enumerate(zip(
36
- sequence["filename"], sequence["data_type"], sequence["data"]
37
- )):
38
- if data_type == "csv":
39
- df = deserialize_csv(data)
40
- print(f"Loaded CSV {filename}: {len(df)} rows")
41
- elif data_type in ["json", "jsonl"]:
42
- json_data = deserialize_json(data)
43
- print(f"Loaded JSON {filename}")
44
- elif data_type == "vrs_info":
45
- vrs_info = deserialize_json(data)
46
- print(f"VRS files: {[f['filename'] for f in vrs_info]}")
47
- ```
48
-
49
- ## VRS Files
50
- VRS files are stored in: sequences/{sequence_name}/vrs_files/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  # ADT Dataset
9
 
10
  ## Dataset Description
11
+ This dataset contains Aria Digital Twin (ADT) sequences with various sensor data and annotations, including 2D/3D bounding boxes, trajectories, eye gaze data, and VRS recordings.
12
+
13
+ ## Quick Start
14
+ ```python
15
+ from adt_dataset_loader import ADTDatasetLoader
16
+
17
+ # Load entire dataset
18
+ loader = ADTDatasetLoader("ariakang/ADT-test")
19
+
20
+ # Load specific sequence
21
+ loader = ADTDatasetLoader("ariakang/ADT-test", sequence_name="Apartment_release_clean_seq131_M1292")
22
+ ```
23
+
24
+ ## Installation
25
+ ```bash
26
+ # Install required packages
27
+ pip install datasets pandas
28
+ ```
29
+
30
+ ## Dataset Structure
31
+ Each sequence contains:
32
+ - VRS Files:
33
+ - video.vrs
34
+ - synthetic_video.vrs
35
+ - segmentations.vrs
36
+ - depth_images.vrs
37
+ - CSV Data:
38
+ - 2D/3D bounding boxes
39
+ - Aria device trajectories
40
+ - Eye gaze data
41
+ - Scene objects
42
+ - JSON Data:
43
+ - Instance annotations
44
+ - Metadata
45
+ - MPS Data:
46
+ - Eye gaze processing
47
+ - SLAM results
48
+
49
+ ## Flexible Loading Options
50
+
51
+ ### 1. Load Entire Dataset
52
+ ```python
53
+ # Initialize loader with all sequences
54
+ loader = ADTDatasetLoader("ariakang/ADT-test")
55
+
56
+ # See available sequences and data types
57
+ available_files = loader.get_available_files()
58
+ print("Available files:", available_files)
59
+
60
+ # Load all data types
61
+ bbox_2d = loader.load_2d_bounding_boxes()
62
+ bbox_3d = loader.load_3d_bounding_boxes()
63
+ trajectory = loader.load_aria_trajectory()
64
+ eyegaze = loader.load_eyegaze()
65
+ metadata = loader.load_metadata()
66
+ slam_data = loader.load_mps_slam()
67
+ ```
68
+
69
+ ### 2. Load Specific Sequences
70
+ ```python
71
+ # Load a specific sequence
72
+ loader = ADTDatasetLoader(
73
+ "ariakang/ADT-test",
74
+ sequence_name="Apartment_release_clean_seq131_M1292"
75
+ )
76
+
77
+ # Load data from this sequence
78
+ bbox_2d = loader.load_2d_bounding_boxes()
79
+ trajectory = loader.load_aria_trajectory()
80
+ ```
81
+
82
+ ### 3. Load Selected Data Types
83
+ ```python
84
+ # Initialize loader for specific sequence
85
+ loader = ADTDatasetLoader("ariakang/ADT-test", "Apartment_release_clean_seq131_M1292")
86
+
87
+ # Load only 2D bounding boxes and VRS info
88
+ bbox_2d = loader.load_2d_bounding_boxes()
89
+ vrs_info = loader.get_vrs_files_info()
90
+
91
+ # Get paths to specific VRS files
92
+ video_vrs = [f for f in vrs_info if f['filename'] == 'video.vrs'][0]
93
+ print(f"Video VRS path: {video_vrs['path']}")
94
+
95
+ # Load only SLAM data
96
+ slam_data = loader.load_mps_slam()
97
+ closed_loop = slam_data['closed_loop'] # Get specific SLAM component
98
+ ```
99
+
100
+ ## Available Data Types and Methods
101
+
102
+ ### Main Data Types
103
+ ```python
104
+ # Bounding Boxes and Trajectories
105
+ bbox_2d = loader.load_2d_bounding_boxes()
106
+ bbox_3d = loader.load_3d_bounding_boxes()
107
+ trajectory = loader.load_aria_trajectory()
108
+
109
+ # Eye Gaze and Scene Data
110
+ eyegaze = loader.load_eyegaze()
111
+ scene_objects = loader.load_scene_objects()
112
+
113
+ # Metadata and Instances
114
+ metadata = loader.load_metadata()
115
+ instances = loader.load_instances()
116
+
117
+ # MPS Data
118
+ eye_gaze_data = loader.load_mps_eye_gaze() # Returns dict with 'general' and 'summary'
119
+ slam_data = loader.load_mps_slam() # Returns dict with various SLAM components
120
+ ```
121
+
122
+ ### VRS Files
123
+ ```python
124
+ # Get VRS file information
125
+ vrs_info = loader.get_vrs_files_info()
126
+
127
+ # Example: Access specific VRS file info
128
+ for vrs_file in vrs_info:
129
+ print(f"File: {vrs_file['filename']}")
130
+ print(f"Path: {vrs_file['path']}")
131
+ print(f"Size: {vrs_file['size_bytes'] / 1024 / 1024:.2f} MB")
132
+ ```
133
+
134
+ ### Custom Loading
135
+ ```python
136
+ # Load any file by name
137
+ data = loader.load_file_by_name("your_file_name.csv")
138
+ ```
139
+
140
+ ## Data Format Examples
141
+
142
+ ### 2D Bounding Boxes
143
+ ```python
144
+ bbox_2d = loader.load_2d_bounding_boxes()
145
+ print(bbox_2d.columns)
146
+ # Columns: ['object_uid', 'timestamp[ns]', 'x_min[pixel]', 'x_max[pixel]', 'y_min[pixel]', 'y_max[pixel]']
147
+ ```
148
+
149
+ ### Aria Trajectory
150
+ ```python
151
+ trajectory = loader.load_aria_trajectory()
152
+ print(trajectory.columns)
153
+ # Columns: ['timestamp[ns]', 'x', 'y', 'z', 'qx', 'qy', 'qz', 'qw']
154
+ ```
155
+
156
+ ### MPS SLAM Data
157
+ ```python
158
+ slam_data = loader.load_mps_slam()
159
+ # Components:
160
+ # - closed_loop: DataFrame with closed-loop trajectory
161
+ # - open_loop: DataFrame with open-loop trajectory
162
+ # - calibration: Calibration parameters
163
+ ```
164
+
165
+ ## Error Handling
166
+ ```python
167
+ try:
168
+ data = loader.load_file_by_name("non_existent_file.csv")
169
+ except ValueError as e:
170
+ print(f"Error: {e}")
171
+ ```
172
+
173
+ ## Notes
174
+ - All CSV files are loaded as pandas DataFrames
175
+ - JSON/JSONL files are loaded as Python dictionaries/lists
176
+ - VRS files are not loaded into memory, only their metadata and paths are provided
177
+ - Use `get_available_files()` to see all available data in your sequence
178
+
179
+ ## Repository Structure
180
+ VRS files are stored in sequence-specific folders:
181
+ `sequences/{sequence_name}/vrs_files/`
load_dataset.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import json
3
+ from pathlib import Path
4
+ from typing import Any, Dict, List, Union
5
+
6
+ import pandas as pd
7
+ from datasets import load_dataset
8
+
9
+
10
+ class ADTDatasetLoader:
11
+ """Loader for ADT Dataset from Hugging Face."""
12
+
13
+ def __init__(self, repo_id: str = "ariakang/ADT-test", sequence_name: str = None):
14
+ """
15
+ Initialize the dataset loader.
16
+
17
+ Args:
18
+ repo_id: Hugging Face repository ID
19
+ sequence_name: Specific sequence to load. If None, loads first available sequence
20
+ """
21
+ self.dataset = load_dataset(repo_id)
22
+
23
+ if sequence_name is None:
24
+ sequence_name = list(self.dataset.keys())[0]
25
+
26
+ self.sequence_name = sequence_name
27
+ self.sequence = self.dataset[sequence_name]
28
+
29
+ # Create a mapping of filenames to indices for faster access
30
+ self.file_index = {
31
+ filename: idx for idx, filename in enumerate(self.sequence["filename"])
32
+ }
33
+
34
+ def _deserialize_csv(self, csv_string: str) -> pd.DataFrame:
35
+ """Convert CSV string to DataFrame."""
36
+ return pd.read_csv(io.StringIO(csv_string))
37
+
38
+ def _deserialize_json(self, json_string: str) -> Union[Dict, List]:
39
+ """Convert JSON string to Python object."""
40
+ return json.loads(json_string)
41
+
42
+ def get_available_files(self) -> List[Dict[str, str]]:
43
+ """Get list of all available files and their types."""
44
+ return [
45
+ {"filename": filename, "type": dtype}
46
+ for filename, dtype in zip(
47
+ self.sequence["filename"], self.sequence["data_type"]
48
+ )
49
+ ]
50
+
51
+ def load_file_by_name(self, filename: str) -> Any:
52
+ """
53
+ Load specific file by name.
54
+
55
+ Args:
56
+ filename: Name of the file to load (e.g., "2d_bounding_box.csv")
57
+
58
+ Returns:
59
+ DataFrame for CSV files, dict/list for JSON files
60
+ """
61
+ if filename not in self.file_index:
62
+ raise ValueError(f"File {filename} not found in dataset")
63
+
64
+ idx = self.file_index[filename]
65
+ data_type = self.sequence["data_type"][idx]
66
+ data = self.sequence["data"][idx]
67
+
68
+ if data_type == "csv":
69
+ return self._deserialize_csv(data)
70
+ elif data_type in ["json", "jsonl"]:
71
+ return self._deserialize_json(data)
72
+ else:
73
+ return data
74
+
75
+ def load_2d_bounding_boxes(self) -> pd.DataFrame:
76
+ """Load 2D bounding box data."""
77
+ return self.load_file_by_name("2d_bounding_box.csv")
78
+
79
+ def load_3d_bounding_boxes(self) -> pd.DataFrame:
80
+ """Load 3D bounding box data."""
81
+ return self.load_file_by_name("3d_bounding_box.csv")
82
+
83
+ def load_aria_trajectory(self) -> pd.DataFrame:
84
+ """Load Aria device trajectory data."""
85
+ return self.load_file_by_name("aria_trajectory.csv")
86
+
87
+ def load_eyegaze(self) -> pd.DataFrame:
88
+ """Load eye gaze data."""
89
+ return self.load_file_by_name("eyegaze.csv")
90
+
91
+ def load_scene_objects(self) -> pd.DataFrame:
92
+ """Load scene objects data."""
93
+ return self.load_file_by_name("scene_objects.csv")
94
+
95
+ def load_instances(self) -> Dict:
96
+ """Load instances data."""
97
+ return self.load_file_by_name("instances.json")
98
+
99
+ def load_metadata(self) -> Dict:
100
+ """Load metadata."""
101
+ return self.load_file_by_name("metadata.json")
102
+
103
+ def load_mps_eye_gaze(self) -> Dict[str, Union[pd.DataFrame, Dict]]:
104
+ """Load MPS eye gaze data."""
105
+ return {
106
+ "general": self.load_file_by_name("mps/eye_gaze/general_eye_gaze.csv"),
107
+ "summary": self.load_file_by_name("mps/eye_gaze/summary.json"),
108
+ }
109
+
110
+ def load_mps_slam(self) -> Dict[str, Union[pd.DataFrame, List]]:
111
+ """Load MPS SLAM data."""
112
+ return {
113
+ "closed_loop": self.load_file_by_name(
114
+ "mps/slam/closed_loop_trajectory.csv"
115
+ ),
116
+ "open_loop": self.load_file_by_name("mps/slam/open_loop_trajectory.csv"),
117
+ "calibration": self.load_file_by_name("mps/slam/online_calibration.jsonl"),
118
+ }
119
+
120
+ def get_vrs_files_info(self) -> List[Dict]:
121
+ """Get information about VRS files."""
122
+ vrs_info = self.load_file_by_name("vrs_files_info.json")
123
+ return self._deserialize_json(vrs_info)
124
+
125
+
126
+ def print_dataset_summary(data: Any, name: str):
127
+ """Print summary of loaded data."""
128
+ if isinstance(data, pd.DataFrame):
129
+ print(f"\n{name}:")
130
+ print(f"Shape: {data.shape}")
131
+ print("Columns:", list(data.columns))
132
+ print("Sample data:")
133
+ print(data.head(2))
134
+ elif isinstance(data, dict):
135
+ print(f"\n{name}:")
136
+ print("Keys:", list(data.keys()))
137
+ elif isinstance(data, list):
138
+ print(f"\n{name}:")
139
+ print(f"Number of items: {len(data)}")
140
+ if data:
141
+ print("First item sample:", data[0])
142
+
143
+
144
+ def main():
145
+ """Example usage of the dataset loader."""
146
+ loader = ADTDatasetLoader()
147
+
148
+ print(f"Loading sequence: {loader.sequence_name}")
149
+
150
+ # Print available files
151
+ print("\nAvailable files:")
152
+ for file_info in loader.get_available_files():
153
+ print(f"- {file_info['filename']} ({file_info['type']})")
154
+
155
+ # Load and print summaries of all data types
156
+ print("\n=== Loading all data types ===")
157
+
158
+ # Bounding boxes
159
+ print_dataset_summary(loader.load_2d_bounding_boxes(), "2D Bounding Boxes")
160
+ print_dataset_summary(loader.load_3d_bounding_boxes(), "3D Bounding Boxes")
161
+
162
+ # Trajectory and eye gaze
163
+ print_dataset_summary(loader.load_aria_trajectory(), "Aria Trajectory")
164
+ print_dataset_summary(loader.load_eyegaze(), "Eye Gaze")
165
+
166
+ # Scene objects
167
+ print_dataset_summary(loader.load_scene_objects(), "Scene Objects")
168
+
169
+ # JSON data
170
+ print_dataset_summary(loader.load_instances(), "Instances")
171
+ print_dataset_summary(loader.load_metadata(), "Metadata")
172
+
173
+ # MPS data
174
+ print_dataset_summary(loader.load_mps_eye_gaze(), "MPS Eye Gaze")
175
+ print_dataset_summary(loader.load_mps_slam(), "MPS SLAM")
176
+
177
+ # VRS files info
178
+ print_dataset_summary(loader.get_vrs_files_info(), "VRS Files")
179
+
180
+
181
+ if __name__ == "__main__":
182
+ main()
upload_to_HF.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import json
3
+ import os
4
+ from pathlib import Path
5
+
6
+ import pandas as pd
7
+ from datasets import Dataset, DatasetDict
8
+ from huggingface_hub import HfApi
9
+
10
+
11
+ def serialize_dataframe(df):
12
+ """Convert DataFrame to string."""
13
+ buffer = io.StringIO()
14
+ df.to_csv(buffer, index=False)
15
+ return buffer.getvalue()
16
+
17
+
18
+ def load_csv_safely(file_path):
19
+ """Load CSV file and convert to string."""
20
+ if os.path.exists(file_path) and os.path.getsize(file_path) > 0:
21
+ df = pd.read_csv(file_path)
22
+ return serialize_dataframe(df)
23
+ return ""
24
+
25
+
26
+ def load_json_safely(file_path):
27
+ """Load JSON/JSONL file and convert to string."""
28
+ if os.path.exists(file_path) and os.path.getsize(file_path) > 0:
29
+ with open(file_path, "r") as f:
30
+ if file_path.endswith(".jsonl"):
31
+ data = [json.loads(line) for line in f if line.strip()]
32
+ else:
33
+ try:
34
+ data = json.load(f)
35
+ except json.JSONDecodeError:
36
+ f.seek(0)
37
+ data = [json.loads(line) for line in f if line.strip()]
38
+ return json.dumps(data)
39
+ return ""
40
+
41
+
42
+ def upload_sequence(sequence_path, sequence_name, repo_id="ariakang/ADT-test"):
43
+ """Upload a single sequence to Hugging Face Hub."""
44
+
45
+ print(f"Starting upload process for sequence: {sequence_name}")
46
+
47
+ # Initialize Hugging Face API
48
+ api = HfApi()
49
+
50
+ # Upload VRS files first
51
+ print("Uploading VRS files...")
52
+ vrs_files = list(Path(sequence_path).glob("*.vrs"))
53
+ print(f"Found VRS files:", [f.name for f in vrs_files])
54
+
55
+ vrs_info = []
56
+ for vrs_file in vrs_files:
57
+ print(f"Uploading {vrs_file.name}...")
58
+ path_in_repo = f"sequences/{sequence_name}/vrs_files/{vrs_file.name}"
59
+
60
+ try:
61
+ api.upload_file(
62
+ path_or_fileobj=str(vrs_file),
63
+ path_in_repo=path_in_repo,
64
+ repo_id=repo_id,
65
+ repo_type="dataset",
66
+ )
67
+ print(f"Uploaded {vrs_file.name}")
68
+ vrs_info.append(
69
+ {
70
+ "filename": vrs_file.name,
71
+ "path": path_in_repo,
72
+ "size_bytes": vrs_file.stat().st_size,
73
+ }
74
+ )
75
+ except Exception as e:
76
+ print(f"Error uploading {vrs_file.name}: {str(e)}")
77
+ raise
78
+
79
+ # Prepare sequence data
80
+ sequence_data = {
81
+ "data_type": [], # To identify what type of data each entry is
82
+ "data": [], # The serialized data
83
+ "filename": [], # Original filename
84
+ }
85
+
86
+ # Load CSV files
87
+ csv_files = [
88
+ "2d_bounding_box.csv",
89
+ "3d_bounding_box.csv",
90
+ "aria_trajectory.csv",
91
+ "eyegaze.csv",
92
+ "scene_objects.csv",
93
+ ]
94
+
95
+ for file in csv_files:
96
+ file_path = os.path.join(sequence_path, file)
97
+ data = load_csv_safely(file_path)
98
+ if data:
99
+ sequence_data["data_type"].append("csv")
100
+ sequence_data["data"].append(data)
101
+ sequence_data["filename"].append(file)
102
+ print(f"Loaded {file}")
103
+
104
+ # Load JSON files
105
+ json_files = ["instances.json", "metadata.json"]
106
+ for file in json_files:
107
+ file_path = os.path.join(sequence_path, file)
108
+ data = load_json_safely(file_path)
109
+ if data:
110
+ sequence_data["data_type"].append("json")
111
+ sequence_data["data"].append(data)
112
+ sequence_data["filename"].append(file)
113
+ print(f"Loaded {file}")
114
+
115
+ # Load MPS folder data
116
+ mps_path = os.path.join(sequence_path, "mps")
117
+ if os.path.exists(mps_path):
118
+ # Eye gaze data
119
+ eye_gaze_path = os.path.join(mps_path, "eye_gaze")
120
+ if os.path.exists(eye_gaze_path):
121
+ data = load_csv_safely(os.path.join(eye_gaze_path, "general_eye_gaze.csv"))
122
+ if data:
123
+ sequence_data["data_type"].append("csv")
124
+ sequence_data["data"].append(data)
125
+ sequence_data["filename"].append("mps/eye_gaze/general_eye_gaze.csv")
126
+
127
+ data = load_json_safely(os.path.join(eye_gaze_path, "summary.json"))
128
+ if data:
129
+ sequence_data["data_type"].append("json")
130
+ sequence_data["data"].append(data)
131
+ sequence_data["filename"].append("mps/eye_gaze/summary.json")
132
+
133
+ # SLAM data
134
+ slam_path = os.path.join(mps_path, "slam")
135
+ if os.path.exists(slam_path):
136
+ for file in ["closed_loop_trajectory.csv", "open_loop_trajectory.csv"]:
137
+ data = load_csv_safely(os.path.join(slam_path, file))
138
+ if data:
139
+ sequence_data["data_type"].append("csv")
140
+ sequence_data["data"].append(data)
141
+ sequence_data["filename"].append(f"mps/slam/{file}")
142
+
143
+ data = load_json_safely(os.path.join(slam_path, "online_calibration.jsonl"))
144
+ if data:
145
+ sequence_data["data_type"].append("jsonl")
146
+ sequence_data["data"].append(data)
147
+ sequence_data["filename"].append("mps/slam/online_calibration.jsonl")
148
+
149
+ # Add VRS file information
150
+ sequence_data["data_type"].append("vrs_info")
151
+ sequence_data["data"].append(json.dumps(vrs_info))
152
+ sequence_data["filename"].append("vrs_files_info.json")
153
+
154
+ # Create dataset
155
+ dataset_dict = DatasetDict({sequence_name: Dataset.from_dict(sequence_data)})
156
+
157
+ print("\nPushing dataset to hub...")
158
+ dataset_dict.push_to_hub(repo_id=repo_id, private=True)
159
+
160
+ # Update README
161
+ readme_content = """---
162
+ language:
163
+ - en
164
+ license:
165
+ - mit
166
+ ---
167
+
168
+ # ADT Dataset
169
+
170
+ ## Dataset Description
171
+ This dataset contains Aria Digital Twin (ADT) sequences with various sensor data and annotations.
172
+
173
+ ## Usage Example
174
+ ```python
175
+ from datasets import load_dataset
176
+ import pandas as pd
177
+ import json
178
+ import io
179
+
180
+ def deserialize_csv(csv_string):
181
+ return pd.read_csv(io.StringIO(csv_string))
182
+
183
+ def deserialize_json(json_string):
184
+ return json.loads(json_string)
185
+
186
+ # Load the dataset
187
+ dataset = load_dataset("ariakang/ADT-test")
188
+ sequence = dataset["{sequence_name}"]
189
+
190
+ # Get list of available files
191
+ files = list(zip(sequence["filename"], sequence["data_type"]))
192
+ print("Available files:", files)
193
+
194
+ # Load specific data
195
+ for i, (filename, data_type, data) in enumerate(zip(
196
+ sequence["filename"], sequence["data_type"], sequence["data"]
197
+ )):
198
+ if data_type == "csv":
199
+ df = deserialize_csv(data)
200
+ print(f"Loaded CSV {filename}: {len(df)} rows")
201
+ elif data_type in ["json", "jsonl"]:
202
+ json_data = deserialize_json(data)
203
+ print(f"Loaded JSON {filename}")
204
+ elif data_type == "vrs_info":
205
+ vrs_info = deserialize_json(data)
206
+ print(f"VRS files: {[f['filename'] for f in vrs_info]}")
207
+ ```
208
+
209
+ ## VRS Files
210
+ VRS files are stored in: sequences/{sequence_name}/vrs_files/
211
+ """
212
+
213
+ api.upload_file(
214
+ path_or_fileobj=readme_content.encode(),
215
+ path_in_repo="README.md",
216
+ repo_id=repo_id,
217
+ repo_type="dataset",
218
+ )
219
+
220
+ return f"https://huggingface.co/datasets/{repo_id}"
221
+
222
+
223
+ if __name__ == "__main__":
224
+ sequence_path = "/Users/ariak/Documents/projectaria_tools_adt_data/Apartment_release_clean_seq131_M1292"
225
+ sequence_name = "Apartment_release_clean_seq131_M1292"
226
+ repo_url = upload_sequence(sequence_path, sequence_name)
227
+ print(f"Dataset uploaded successfully to: {repo_url}")