|
[34m[1mwandb[39m[22m: [33mWARNING[39m Saving files without folders. If you want to preserve subdirectories pass base_path to wandb.save, i.e. wandb.save("/mnt/folder/file.h5", base_path="/mnt") |
|
0%| | 0/12382 [00:00<?, ?it/s]You |
|
/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/torch/utils/checkpoint.py:429: UserWarning: torch.utils.checkpoint: please pass in use_reentrant=True or use_reentrant=False explicitly. The default value of use_reentrant will be updated to be False in the future. To maintain current behavior, pass use_reentrant=True. It is recommended that you use use_reentrant=False. Refer to docs for more details on the differences between the two variants. |
|
warnings.warn( |
|
[rank0]:[2024-05-12 10:23:54,974] [22/0] torch._dynamo.variables.higher_order_ops: [WARNING] speculate_subgraph: while introspecting the user-defined autograd.Function, we were unable to trace function `trampoline_autograd_fwd` into a single graph. This means that Dynamo was unable to prove safety for this API and will fall back to eager-mode PyTorch, which could lead to a slowdown. |
|
[rank0]:[2024-05-12 10:23:54,975] [22/0] torch._dynamo.variables.higher_order_ops: [ERROR] Tensor.data_ptr |
|
[rank0]:[2024-05-12 10:23:54,989] [23/0] torch._dynamo.variables.higher_order_ops: [WARNING] speculate_subgraph: while introspecting the user-defined autograd.Function, we were unable to trace function `trampoline_autograd_fwd` into a single graph. This means that Dynamo was unable to prove safety for this API and will fall back to eager-mode PyTorch, which could lead to a slowdown. |
|
[rank0]:[2024-05-12 10:23:54,989] [23/0] torch._dynamo.variables.higher_order_ops: [ERROR] Tensor.data_ptr |
|
[2024-05-12 10:23:41,802] [INFO] [axolotl.callbacks.on_train_begin:770] [PID:41518] [RANK:0] The Axolotl config has been saved to the WandB run under files. |
|
[2024-05-12 10:23:42,956] [INFO] [axolotl.utils.samplers.multipack._len_est:184] [PID:41518] [RANK:0] packing_efficiency_estimate: 0.92 total_num_tokens per device: 47141994 |
|
[2024-05-12 10:23:44,000] [INFO] [axolotl.utils.samplers.multipack._len_est:184] [PID:41518] [RANK:0] packing_efficiency_estimate: 0.92 total_num_tokens per device: 47141994 |
|
{ |
|
0%| | 1/12382 [00:34<115:27:20, 33.57s/it]Traceback (most recent call last): |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/runpy.py", line 196, in _run_module_as_main |
|
return _run_code(code, main_globals, None, |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/runpy.py", line 86, in _run_code |
|
exec(code, run_globals) |
|
File "/workspace/disk1/axolotl/src/axolotl/cli/train.py", line 70, in <module> |
|
fire.Fire(do_cli) |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/fire/core.py", line 143, in Fire |
|
component_trace = _Fire(component, args, parsed_flag_args, context, name) |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/fire/core.py", line 477, in _Fire |
|
component, remaining_args = _CallAndUpdateTrace( |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/fire/core.py", line 693, in _CallAndUpdateTrace |
|
component = fn(*varargs, **kwargs) |
|
File "/workspace/disk1/axolotl/src/axolotl/cli/train.py", line 38, in do_cli |
|
return do_train(parsed_cfg, parsed_cli_args) |
|
File "/workspace/disk1/axolotl/src/axolotl/cli/train.py", line 66, in do_train |
|
return train(cfg=cfg, cli_args=cli_args, dataset_meta=dataset_meta) |
|
File "/workspace/disk1/axolotl/src/axolotl/train.py", line 170, in train |
|
trainer.train(resume_from_checkpoint=resume_from_checkpoint) |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/transformers/trainer.py", line 1828, in train |
|
return inner_training_loop( |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/transformers/trainer.py", line 2256, in _inner_training_loop |
|
self._maybe_log_save_evaluate(tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval) |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/transformers/trainer.py", line 2640, in _maybe_log_save_evaluate |
|
metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/transformers/trainer.py", line 3445, in evaluate |
|
output = eval_loop( |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/transformers/trainer.py", line 3624, in evaluation_loop |
|
for step, inputs in enumerate(dataloader): |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/accelerate/data_loader.py", line 452, in __iter__ |
|
current_batch = next(dataloader_iter) |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/torch/utils/data/dataloader.py", line 630, in __next__ |
|
data = self._next_data() |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/torch/utils/data/dataloader.py", line 674, in _next_data |
|
data = self._dataset_fetcher.fetch(index) # may raise StopIteration |
|
File "/workspace/disk1/axolotl/src/axolotl/monkeypatch/data/batch_dataset_fetcher.py", line 32, in fetch |
|
return self.collate_fn(data) |
|
File "/workspace/disk1/axolotl/src/axolotl/utils/collators.py", line 106, in __call__ |
|
features = self.tokenizer.pad( |
|
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 3274, in pad |
|
raise ValueError( |
|
ValueError: You should supply an encoding or a list of encodings to this method that includes input_ids, but you provided [ |