Datasets:

Formats:
parquet
Libraries:
Datasets
Dask
License:
jwkirchenbauer commited on
Commit
b3d2c6b
·
verified ·
1 Parent(s): 447ac77

Upload aux_files/packed_cycle_dataset.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. aux_files/packed_cycle_dataset.py +380 -1
aux_files/packed_cycle_dataset.py CHANGED
@@ -1 +1,380 @@
1
- Gemstones Training Dataset - Worker sharded version
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
2
+
3
+ # Very loosely inspired by indexed_dataset in Fairseq, Megatron
4
+ # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/indexed_dataset.py
5
+
6
+
7
+ import os
8
+ import random
9
+ import struct
10
+ import hashlib
11
+
12
+ import numpy as np
13
+ import torch
14
+ from torch.utils.data import IterableDataset, get_worker_info
15
+ from litgpt.data_scheduler_utils import DataSchedulerTracker
16
+ from typing import Optional, Sequence, Any
17
+
18
+ dtypes = {
19
+ 1: np.uint8,
20
+ 2: np.int8,
21
+ 3: np.int16,
22
+ 4: np.int32,
23
+ 5: np.int64,
24
+ 6: np.float32,
25
+ 7: np.float64,
26
+ 8: np.uint16,
27
+ }
28
+
29
+
30
+ def code(dtype):
31
+ for k in dtypes:
32
+ if dtypes[k] == dtype:
33
+ return k
34
+ raise ValueError(dtype)
35
+
36
+
37
+ HDR_MAGIC = b"LITPKDS"
38
+ HDR_SIZE = 24 # bytes
39
+
40
+
41
+ class PackedDataset(IterableDataset):
42
+ def __init__(
43
+ self,
44
+ filenames,
45
+ n_chunks,
46
+ block_size,
47
+ seed=12345,
48
+ shuffle=True,
49
+ wrap=False,
50
+ num_processes=1,
51
+ process_rank=0,
52
+ data_id=None,
53
+ return_data_id=False,
54
+ ):
55
+ self._filenames = filenames
56
+ self._n_chunks = n_chunks
57
+ self._block_size = block_size
58
+ self._seed = seed
59
+ self._shuffle = shuffle
60
+ self._wrap = wrap
61
+ self._num_processes = num_processes
62
+ self._process_rank = process_rank
63
+ self._ds_fingerprint = None
64
+ self._data_id = data_id # This is human readble, correps to the full file list.
65
+ if return_data_id:
66
+ raise NotImplementedError("return_data_id is not implemented for PackedDataset")
67
+
68
+ def __iter__(self):
69
+ worker_info = get_worker_info()
70
+ num_workers = worker_info.num_workers if worker_info is not None else 1
71
+ worker_id = worker_info.id if worker_info is not None else 0
72
+ num_shards = num_workers * self._num_processes
73
+ shard_id = self._process_rank * num_workers + worker_id
74
+
75
+ total_num_files = len(self._filenames)
76
+ max_num_files = total_num_files // num_shards * num_shards
77
+ filenames = self._filenames[shard_id:max_num_files:num_shards]
78
+
79
+ self._ds_fingerprint = hashlib.shake_128(str(filenames).encode()).hexdigest(
80
+ 4
81
+ ) # This is not human readable, corresp to the file list _this_ process is using.
82
+
83
+ print(
84
+ f"Rank {self._process_rank}/{self._num_processes}, worker {worker_id} has {len(filenames)}/{total_num_files} files | "
85
+ f"identifier={self._data_id}:{self._ds_fingerprint}"
86
+ )
87
+
88
+ return PackedDatasetIterator(
89
+ filenames=filenames,
90
+ n_chunks=self._n_chunks,
91
+ block_size=self._block_size,
92
+ seed=self._seed,
93
+ shuffle=self._shuffle,
94
+ wrap=self._wrap,
95
+ data_id=self._data_id,
96
+ fingerprint=self._ds_fingerprint,
97
+ worker_id=worker_id,
98
+ process_rank=self._process_rank,
99
+ num_processes=self._num_processes,
100
+ )
101
+
102
+
103
+ class PackedDatasetBuilder(object):
104
+ def __init__(self, outdir, prefix, chunk_size, sep_token, dtype="auto", vocab_size=None):
105
+ if dtype == "auto":
106
+ if vocab_size is None:
107
+ raise ValueError("vocab_size cannot be None when dtype='auto'")
108
+ if vocab_size is not None and vocab_size < 65500:
109
+ self._dtype = np.uint16
110
+ else:
111
+ self._dtype = np.int32
112
+ else:
113
+ self._dtype = dtype
114
+ self._counter = 0
115
+ self._chunk_size = chunk_size
116
+ self._outdir = outdir
117
+ self._prefix = prefix
118
+ self._sep_token = sep_token
119
+ self._arr = np.zeros(self._chunk_size, dtype=self._dtype)
120
+ self._arr.fill(self._sep_token)
121
+ self._idx = 0
122
+ self._version = 1
123
+ self._filenames = []
124
+ self._total_tokens_exact = 0
125
+ self._filler_sep_tokens = 0
126
+
127
+ def _write_chunk(self, skip_write=False):
128
+ filename = f"{self._prefix}_{self._counter:010d}.bin"
129
+ filename = os.path.join(self._outdir, filename)
130
+
131
+ # right before we write, we can compute the number of tokens being written
132
+ # and update the total number of tokens
133
+ last_non_sep_idx = np.argwhere((self._arr != self._sep_token)).squeeze()[-1]
134
+ tokens_in_chunk = last_non_sep_idx + 1 # +1 for zero-indexing
135
+
136
+ if skip_write:
137
+ self._arr.fill(self._sep_token)
138
+ self._idx = 0
139
+ return tokens_in_chunk # amount we are skipping
140
+
141
+ self._filler_sep_tokens += self._chunk_size - tokens_in_chunk
142
+ self._total_tokens_exact += tokens_in_chunk
143
+ # print(
144
+ # f"Chunk written with {tokens_in_chunk} tokens and {self._filler_sep_tokens} filler sep tokens"
145
+ # )
146
+
147
+ with open(filename, "wb") as f:
148
+ f.write(HDR_MAGIC)
149
+ f.write(struct.pack("<Q", self._version))
150
+ f.write(struct.pack("<B", code(self._dtype)))
151
+ f.write(struct.pack("<Q", self._chunk_size))
152
+ f.write(self._arr.tobytes(order="C"))
153
+
154
+ self._filenames.append(filename)
155
+ self._counter += 1
156
+ self._arr.fill(self._sep_token)
157
+ self._idx = 0
158
+
159
+ @property
160
+ def dtype(self):
161
+ return self._dtype
162
+
163
+ @property
164
+ def filenames(self):
165
+ return self._filenames.copy()
166
+
167
+ def add_array(self, arr):
168
+ while self._idx + arr.shape[0] > self._chunk_size:
169
+ part_len = self._chunk_size - self._idx
170
+ self._arr[self._idx : self._idx + part_len] = arr[:part_len]
171
+ self._write_chunk()
172
+ arr = arr[part_len:]
173
+
174
+ arr_len = arr.shape[0]
175
+ self._arr[self._idx : self._idx + arr_len] = arr
176
+ self._idx += arr_len
177
+
178
+ def write_remainder(self):
179
+ self._write_chunk()
180
+
181
+ def skip_write_remainder(self):
182
+ return self._write_chunk(skip_write=True)
183
+
184
+
185
+ BlockIdxType = Sequence[int] | np.ndarray[Any, np.dtype[np.int64]]
186
+
187
+
188
+ class PackedDatasetIterator:
189
+ def __init__(
190
+ self,
191
+ filenames,
192
+ n_chunks,
193
+ block_size,
194
+ seed,
195
+ shuffle,
196
+ wrap,
197
+ data_id=None,
198
+ fingerprint=None,
199
+ worker_id=None,
200
+ process_rank=None,
201
+ num_processes=None,
202
+ ):
203
+ self._data_id = data_id
204
+ self._ds_fingerprint = fingerprint
205
+ self._worker_id = worker_id
206
+ self._process_rank = process_rank
207
+ self._num_processes = num_processes
208
+
209
+ self._seed = seed
210
+ self._shuffle = shuffle
211
+ self._rng = np.random.default_rng(seed) # if shuffle else None
212
+
213
+ self._wrap = wrap
214
+
215
+ # TODO: instead of filenames, we could have a single text stream
216
+ # (or text file) with the sequence of all files to be
217
+ # fetched/loaded.
218
+ self._filenames = filenames
219
+ self._file_idx = 0
220
+
221
+ self._n_chunks = n_chunks
222
+
223
+ self._dtype: Optional[np.dtype] = None
224
+ self._block_size = block_size
225
+ # self._n_blocks: Optional[int] = None
226
+
227
+ self._mmaps = []
228
+ self._buffers = []
229
+ self._curr_idx = 0
230
+
231
+ self._load_n_chunks()
232
+
233
+ def _read_header(self, path):
234
+ with open(path, "rb") as f:
235
+ magic = f.read(len(HDR_MAGIC))
236
+ assert magic == HDR_MAGIC, "File doesn't match expected format."
237
+ version = struct.unpack("<Q", f.read(8))
238
+ assert version == (1,)
239
+ (dtype_code,) = struct.unpack("<B", f.read(1))
240
+ dtype = dtypes[dtype_code]
241
+ (chunk_size,) = struct.unpack("<Q", f.read(8))
242
+ return dtype, chunk_size
243
+
244
+ def _close_mmaps(self):
245
+ for mmap in self._mmaps:
246
+ mmap._mmap.close()
247
+
248
+ def fast_forward(self, block_idx):
249
+ """Stub for eventual fast-forward"""
250
+ pass
251
+
252
+ def _load_n_chunks(self):
253
+ self._close_mmaps()
254
+ self._mmaps = []
255
+ self._buffers = []
256
+
257
+ if self._n_chunks > len(self._filenames[self._file_idx :]):
258
+ if not self._wrap:
259
+ raise StopIteration
260
+ self._file_idx = 0
261
+
262
+ # only print on the first 3 times we load chunks
263
+ if (self._file_idx * self._n_chunks) < (3 * self._n_chunks):
264
+ print(
265
+ f"({self._process_rank}/{self._num_processes}) will load {self._n_chunks} chunks: {self._filenames[self._file_idx:self._file_idx+self._n_chunks]}"
266
+ )
267
+
268
+ for i in range(self._n_chunks):
269
+ filename = self._filenames[self._file_idx + i]
270
+ if self._dtype is None:
271
+ self._dtype, self._chunk_size = self._read_header(filename)
272
+ self._n_blocks = self._chunk_size // self._block_size
273
+ # TODO: check header matches with previous files
274
+ mmap = np.memmap(filename, mode="r", order="C", offset=HDR_SIZE)
275
+ self._mmaps.append(mmap)
276
+ self._buffers.append(memoryview(mmap)) # type: ignore
277
+
278
+ self._file_idx += self._n_chunks
279
+ n_all_blocks = self._n_chunks * self._n_blocks
280
+
281
+ self._block_idxs: BlockIdxType = self._rng.permutation(n_all_blocks) if self._shuffle else range(n_all_blocks)
282
+
283
+ # only print on the first 3 times we load chunks
284
+ if (self._file_idx * self._n_chunks) < (3 * self._n_chunks):
285
+ print(f"({self._process_rank}/{self._num_processes}) block read order: {self._block_idxs}")
286
+
287
+ self._curr_idx = 0
288
+
289
+ def __del__(self):
290
+ self._close_mmaps()
291
+ del self._mmaps
292
+ del self._buffers
293
+
294
+ def __iter__(self):
295
+ return self
296
+
297
+ def __next__(self):
298
+ if self._curr_idx >= len(self._block_idxs):
299
+ self._load_n_chunks()
300
+ # TODO: trigger fetching next next n_chunks if remote
301
+ block_idx = self._block_idxs[self._curr_idx]
302
+ chunk_id = block_idx // self._n_blocks
303
+ buffer = self._buffers[chunk_id]
304
+ elem_id = (block_idx % self._n_blocks) * self._block_size
305
+ offset = np.dtype(self._dtype).itemsize * elem_id
306
+ arr = np.frombuffer(buffer, dtype=self._dtype, count=self._block_size, offset=offset)
307
+ self._curr_idx += 1
308
+ return torch.from_numpy(arr.astype(np.int64))
309
+
310
+
311
+ class CombinedDataset(IterableDataset):
312
+ def __init__(self, datasets, seed, data_scheduler_tracker=None, data_telemetry=False):
313
+ self._seed = seed
314
+ self._datasets = datasets
315
+ self._data_scheduler_tracker = data_scheduler_tracker
316
+ self._data_telemetry = data_telemetry
317
+ n_datasets = len(datasets)
318
+ if data_scheduler_tracker is None:
319
+ self._data_scheduler_tracker = DataSchedulerTracker([1 / n_datasets] * n_datasets)
320
+
321
+ def __iter__(self):
322
+ return CombinedDatasetIterator(self._datasets, self._seed, self._data_scheduler_tracker, self._data_telemetry)
323
+
324
+
325
+ class CombinedDatasetIterator:
326
+ def __init__(self, datasets, seed, data_scheduler_tracker, data_telemetry=False):
327
+ self._datasets = datasets
328
+ self._datasets_iterators = [iter(el) for el in datasets]
329
+ self._num_datasets = len(datasets)
330
+ self._data_scheduler_tracker = data_scheduler_tracker
331
+ self._rng = random.Random(seed)
332
+ self._iter_ct = 0
333
+ self._data_telemetry = data_telemetry
334
+
335
+ def __next__(self):
336
+ if sum(self._data_scheduler_tracker.weights) == 0:
337
+ if self._data_scheduler_tracker.base_id is not None:
338
+ # if all buckets have 0 weight, return the base dataset
339
+ self._data_scheduler_tracker.weights[self._data_scheduler_tracker.base_id] = 100
340
+ return self.__next__()
341
+ else:
342
+ # if all buckets have 0 weight and no base dataset, return empty
343
+ return torch.tensor([])
344
+
345
+ (dataset_idx,) = self._rng.choices(range(self._num_datasets), weights=self._data_scheduler_tracker.weights, k=1)
346
+ dataset = self._datasets_iterators[dataset_idx]
347
+
348
+ try:
349
+ curr_data = next(dataset)
350
+ self._data_scheduler_tracker.sample_count[dataset_idx] += 1
351
+
352
+ self._iter_ct += 1
353
+
354
+ # this is the very beginning of data telemetry
355
+ if self._data_telemetry and self._iter_ct < 5:
356
+ print(
357
+ f"Draw result i={self._iter_ct} for rank={dataset._process_rank}/{dataset._num_processes}, "
358
+ f"worker={dataset._worker_id} | {dataset._data_id}:{dataset._ds_fingerprint}"
359
+ )
360
+ elif self._data_telemetry and self._iter_ct == 5:
361
+ print("Data telemetry off ...")
362
+
363
+ return curr_data
364
+ except Exception as e: # which one? yea this is a problem.
365
+ self._data_scheduler_tracker.epoch_count[dataset_idx] += 1
366
+ self._datasets_iterators[dataset_idx] = iter(self._datasets[dataset_idx])
367
+
368
+ if (self._data_scheduler_tracker.max_epochs is not None) and (
369
+ self._data_scheduler_tracker.max_epochs[dataset_idx]
370
+ <= self._data_scheduler_tracker.epoch_count[dataset_idx]
371
+ ):
372
+ # if exceeds max epoch
373
+ self._data_scheduler_tracker.weights[dataset_idx] = 0
374
+ return self.__next__()
375
+ else:
376
+ dataset = self._datasets_iterators[dataset_idx]
377
+ curr_data = next(dataset)
378
+ self._data_scheduler_tracker.sample_count[dataset_idx] += 1
379
+
380
+ return curr_data