muellerzr HF staff commited on
Commit
ecb9f74
1 Parent(s): 7db3210

Remove whitespace

Browse files
README.md CHANGED
@@ -50,4 +50,9 @@ To:
50
  ```
51
  Then remove all import statements (as we only care about the content).
52
 
 
 
 
 
 
53
  **WARNING**: It is known that this will seperate out the `_inner()` in the source code and use it as a seperate function losing the context. Trying out with this issue for now.
 
50
  ```
51
  Then remove all import statements (as we only care about the content).
52
 
53
+ Strip all blank spaces/whitespace:
54
+ ```regex
55
+ ^(?:[\t ]*(?:\r?\n|\r))+
56
+ ```
57
+
58
  **WARNING**: It is known that this will seperate out the `_inner()` in the source code and use it as a seperate function losing the context. Trying out with this issue for now.
src/accelerator.py CHANGED
@@ -1,11 +1,7 @@
1
-
2
  logger = get_logger(__name__)
3
-
4
-
5
  class Accelerator:
6
  """
7
  Creates an instance of an accelerator for distributed training (on multi-GPU, TPU) or mixed precision training.
8
-
9
  Args:
10
  device_placement (`bool`, *optional*, defaults to `True`):
11
  Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model,
@@ -39,17 +35,14 @@ class Accelerator:
39
  rng_types (list of `str` or [`~utils.RNGType`]):
40
  The list of random number generators to synchronize at the beginning of each iteration in your prepared
41
  dataloaders. Should be one or several of:
42
-
43
  - `"torch"`: the base torch random number generator
44
  - `"cuda"`: the CUDA random number generator (GPU only)
45
  - `"xla"`: the XLA random number generator (TPU only)
46
  - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your
47
  dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.
48
-
49
  Will default to `["torch"]` for PyTorch versions <=1.5.1 and `["generator"]` for PyTorch versions >= 1.6.
50
  log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):
51
  A list of loggers to be setup for experiment tracking. Should be one or several of:
52
-
53
  - `"all"`
54
  - `"tensorboard"`
55
  - `"wandb"`
@@ -80,9 +73,7 @@ class Accelerator:
80
  gradient_accumulation_plugin (`GradientAccumulationPlugin`, *optional*):
81
  A configuration for how gradient accumulation should be handled, if more tweaking than just the
82
  `gradient_accumulation_steps` is needed.
83
-
84
  **Available attributes:**
85
-
86
  - **device** (`torch.device`) -- The device to use.
87
  - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration.
88
  - **local_process_index** (`int`) -- The process index on the current machine.
@@ -130,9 +121,7 @@ class Accelerator:
130
  raise ValueError(
131
  f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}"
132
  )
133
-
134
  dynamo_plugin = TorchDynamoPlugin() if dynamo_backend is None else TorchDynamoPlugin(backend=dynamo_backend)
135
-
136
  if deepspeed_plugin is None: # init from env variables
137
  deepspeed_plugin = (
138
  DeepSpeedPlugin() if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" else None
@@ -147,19 +136,16 @@ class Accelerator:
147
  raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.")
148
  if compare_versions("deepspeed", "<", "0.9.3"):
149
  raise ImportError("DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.")
150
-
151
  mixed_precision = (
152
  os.environ.get("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision
153
  )
154
  deepspeed_plugin.set_mixed_precision(mixed_precision)
155
  deepspeed_plugin.set_deepspeed_weakref()
156
-
157
  if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" or isinstance(
158
  fsdp_plugin, FullyShardedDataParallelPlugin
159
  ):
160
  if is_torch_version("<", FSDP_PYTORCH_VERSION):
161
  raise ValueError(f"FSDP requires PyTorch >= {FSDP_PYTORCH_VERSION}")
162
-
163
  if fsdp_plugin is None: # init from env variables
164
  fsdp_plugin = (
165
  FullyShardedDataParallelPlugin() if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" else None
@@ -168,7 +154,6 @@ class Accelerator:
168
  if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):
169
  raise TypeError("`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.")
170
  os.environ["ACCELERATE_USE_FSDP"] = "true" # use FSDP if plugin is provided
171
-
172
  if megatron_lm_plugin is None: # init from env variables
173
  megatron_lm_plugin = (
174
  MegatronLMPlugin() if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" else None
@@ -177,11 +162,9 @@ class Accelerator:
177
  if not isinstance(megatron_lm_plugin, MegatronLMPlugin):
178
  raise TypeError("`megatron_lm_plugin` must be a MegatronLMPlugin object.")
179
  os.environ["ACCELERATE_USE_MEGATRON_LM"] = "true" # use MegatronLM if plugin is provided
180
-
181
  if megatron_lm_plugin:
182
  if not is_megatron_lm_available():
183
  raise ImportError("Megatron is not installed. please build it from source.")
184
-
185
  # Kwargs handlers
186
  self.ddp_handler = None
187
  self.scaler_handler = None
@@ -220,7 +203,6 @@ class Accelerator:
220
  self.autocast_handler = handler
221
  if self.fp8_recipe_handler is None and mixed_precision == "fp8":
222
  self.fp8_recipe_handler = FP8RecipeKwargs()
223
-
224
  kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {}
225
  self.state = AcceleratorState(
226
  mixed_precision=mixed_precision,
@@ -232,19 +214,16 @@ class Accelerator:
232
  _from_accelerator=True,
233
  **kwargs,
234
  )
235
-
236
  trackers = filter_trackers(log_with, self.logging_dir)
237
  if len(trackers) < 1 and log_with is not None:
238
  warnings.warn(f"`log_with={log_with}` was passed but no supported trackers are currently installed.")
239
  self.log_with = trackers
240
-
241
  if (
242
  (mixed_precision != "bf16")
243
  and getattr(self.state, "downcast_bfloat", False)
244
  and (self.state.distributedType != DistributedType.TPU)
245
  ):
246
  raise ValueError("Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU")
247
-
248
  if gradient_accumulation_plugin is not None:
249
  if gradient_accumulation_steps != 1:
250
  raise ValueError(
@@ -263,13 +242,11 @@ class Accelerator:
263
  raise ValueError(
264
  "Gradient accumulation is not supported on TPU. Please set `gradient_accumulation_steps` to 1 and don't pass in a `GradientAccumulationPlugin` object."
265
  )
266
-
267
  self.device_placement = device_placement
268
  self.split_batches = split_batches
269
  self.dispatch_batches = dispatch_batches
270
  self.even_batches = even_batches
271
  self.step_scheduler_with_optimizer = step_scheduler_with_optimizer
272
-
273
  # Mixed precision attributes
274
  self.scaler = None
275
  self.native_amp = False
@@ -285,13 +262,11 @@ class Accelerator:
285
  kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}
286
  if self.distributed_type == DistributedType.FSDP:
287
  from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
288
-
289
  self.scaler = ShardedGradScaler(**kwargs)
290
  elif is_npu_available():
291
  self.scaler = torch.npu.amp.GradScaler(**kwargs)
292
  else:
293
  self.scaler = torch.cuda.amp.GradScaler(**kwargs)
294
-
295
  elif self.state.mixed_precision == "bf16" and self.distributed_type not in (
296
  DistributedType.DEEPSPEED,
297
  DistributedType.MEGATRON_LM,
@@ -302,80 +277,62 @@ class Accelerator:
302
  self.native_amp = is_bf16_available(True)
303
  if mixed_precision == "bf16" and not self.native_amp and not is_tpu_available():
304
  raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device."))
305
-
306
  # Start of internal step tracking
307
  self.step = 0
308
-
309
  # Internal references to the training objects
310
  self._optimizers = []
311
  self._models = []
312
  self._schedulers = []
313
  self._dataloaders = []
314
  self._custom_objects = []
315
-
316
  # Hooks
317
  self._load_model_state_pre_hook = OrderedDict()
318
  self._save_model_state_pre_hook = OrderedDict()
319
-
320
  # RNG Types
321
  self.rng_types = rng_types
322
  if self.rng_types is None:
323
  self.rng_types = ["generator"]
324
-
325
  # Set a flag tensor for early stopping and other breakpoints
326
  self.flag_tensor = None
327
-
328
  check_os_kernel()
329
-
330
  @property
331
  def use_distributed(self):
332
  """
333
  Whether the Accelerator is configured for distributed training
334
  """
335
  return self.state.use_distributed
336
-
337
  @property
338
  def distributed_type(self):
339
  return self.state.distributed_type
340
-
341
  @property
342
  def num_processes(self):
343
  return self.state.num_processes
344
-
345
  @property
346
  def process_index(self):
347
  return self.state.process_index
348
-
349
  @property
350
  def local_process_index(self):
351
  return self.state.local_process_index
352
-
353
  @property
354
  def device(self):
355
  return self.state.device
356
-
357
  @property
358
  def project_dir(self):
359
  return self.project_configuration.project_dir
360
-
361
  @property
362
  def logging_dir(self):
363
  return self.project_configuration.logging_dir
364
-
365
  @property
366
  def save_iteration(self):
367
  return self.project_configuration.iteration
368
-
369
  @property
370
  def is_main_process(self):
371
  """True for one process only."""
372
  return self.state.is_main_process
373
-
374
  @property
375
  def is_local_main_process(self):
376
  """True for one process per server."""
377
  return self.state.is_local_main_process
378
-
379
  @property
380
  def use_fp16(self):
381
  warnings.warn(
@@ -384,23 +341,18 @@ class Accelerator:
384
  FutureWarning,
385
  )
386
  return self.mixed_precision != "no"
387
-
388
  @property
389
  def is_last_process(self):
390
  return self.process_index == self.num_processes - 1
391
-
392
  @property
393
  def mixed_precision(self):
394
  return self.state.mixed_precision
395
-
396
  @contextmanager
397
  def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
398
  """
399
  Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
400
  distributed inference, such as with different prompts.
401
-
402
  Note that when using a `dict`, all keys need to have the same number of elements.
403
-
404
  Args:
405
  inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
406
  The input to split between processes.
@@ -409,13 +361,10 @@ class Accelerator:
409
  number of elements. Useful when trying to perform actions such as `Accelerator.gather()` on the outputs
410
  or passing in less inputs than there are processes. If so, just remember to drop the padded elements
411
  afterwards.
412
-
413
  Example:
414
-
415
  ```python
416
  # Assume there are two processes
417
  from accelerate import Accelerator
418
-
419
  accelerator = Accelerator()
420
  with accelerator.split_between_processes(["A", "B", "C"]) as inputs:
421
  print(inputs)
@@ -423,7 +372,6 @@ class Accelerator:
423
  ["A", "B"]
424
  # Process 1
425
  ["C"]
426
-
427
  with accelerator.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
428
  print(inputs)
429
  # Process 0
@@ -434,28 +382,19 @@ class Accelerator:
434
  """
435
  with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
436
  yield inputs
437
-
438
  def on_main_process(self, function: Callable[..., Any] = None):
439
  """
440
  A decorator that will run the decorated function on the main process only. Can also be called using the
441
  `PartialState` class.
442
-
443
  Args:
444
  function (`Callable`): The function to decorate.
445
-
446
  Example:
447
-
448
  ```python
449
  >>> from accelerate import Accelerator
450
-
451
  >>> accelerator = Accelerator()
452
-
453
-
454
  >>> @accelerator.on_main_process
455
  ... def print_something():
456
  ... print("This will be printed by process 0 only.")
457
-
458
-
459
  >>> print_something()
460
  "This will be printed by process 0 only"
461
  ```
@@ -468,33 +407,23 @@ class Accelerator:
468
  raise ValueError(
469
  "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
470
  )
471
-
472
  def _inner(*args, **kwargs):
473
  return PartialState().on_main_process(function)(*args, **kwargs)
474
-
475
  return _inner
476
-
477
  def on_local_main_process(self, function: Callable[..., Any] = None):
478
  """
479
  A decorator that will run the decorated function on the local main process only. Can also be called using the
480
  `PartialState` class.
481
-
482
  Args:
483
  function (`Callable`): The function to decorate.
484
-
485
  Example:
486
  ```python
487
  # Assume we have 2 servers with 4 processes each.
488
  from accelerate import Accelerator
489
-
490
  accelerator = Accelerator()
491
-
492
-
493
  @accelerator.on_local_main_process
494
  def print_something():
495
  print("This will be printed by process 0 only on each server.")
496
-
497
-
498
  print_something()
499
  # On server 1:
500
  "This will be printed by process 0 only"
@@ -510,33 +439,23 @@ class Accelerator:
510
  raise ValueError(
511
  "The `on_local_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
512
  )
513
-
514
  def _inner(*args, **kwargs):
515
  return PartialState().on_local_main_process(function)(*args, **kwargs)
516
-
517
  return _inner
518
-
519
  def on_last_process(self, function: Callable[..., Any]):
520
  """
521
  A decorator that will run the decorated function on the last process only. Can also be called using the
522
  `PartialState` class.
523
-
524
  Args:
525
  function (`Callable`): The function to decorate.
526
-
527
  Example:
528
  ```python
529
  # Assume we have 4 processes.
530
  from accelerate import Accelerator
531
-
532
  accelerator = Accelerator()
533
-
534
-
535
  @accelerator.on_last_process
536
  def print_something():
537
  print(f"Printed on process {accelerator.process_index}")
538
-
539
-
540
  print_something()
541
  "Printed on process 3"
542
  ```
@@ -549,36 +468,26 @@ class Accelerator:
549
  raise ValueError(
550
  "The `on_last_process` decorator must be called with a function on an instantiated `Accelerator` object."
551
  )
552
-
553
  def _inner(*args, **kwargs):
554
  return PartialState().on_last_process(function)(*args, **kwargs)
555
-
556
  return _inner
557
-
558
  def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
559
  """
560
  A decorator that will run the decorated function on a given process index only. Can also be called using the
561
  `PartialState` class.
562
-
563
  Args:
564
  function (`Callable`, `optional`):
565
  The function to decorate.
566
  process_index (`int`, `optional`):
567
  The index of the process on which to run the function.
568
-
569
  Example:
570
  ```python
571
  # Assume we have 4 processes.
572
  from accelerate import Accelerator
573
-
574
  accelerator = Accelerator()
575
-
576
-
577
  @accelerator.on_process(process_index=2)
578
  def print_something():
579
  print(f"Printed on process {accelerator.process_index}")
580
-
581
-
582
  print_something()
583
  "Printed on process 2"
584
  ```
@@ -594,36 +503,26 @@ class Accelerator:
594
  raise ValueError(
595
  "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
596
  )
597
-
598
  def _inner(*args, **kwargs):
599
  return PartialState().on_process(function, process_index)(*args, **kwargs)
600
-
601
  return _inner
602
-
603
  def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
604
  """
605
  A decorator that will run the decorated function on a given local process index only. Can also be called using
606
  the `PartialState` class.
607
-
608
  Args:
609
  function (`Callable`, *optional*):
610
  The function to decorate.
611
  local_process_index (`int`, *optional*):
612
  The index of the local process on which to run the function.
613
-
614
  Example:
615
  ```python
616
  # Assume we have 2 servers with 4 processes each.
617
  from accelerate import Accelerator
618
-
619
  accelerator = Accelerator()
620
-
621
-
622
  @accelerator.on_local_process(local_process_index=2)
623
  def print_something():
624
  print(f"Printed on process {accelerator.local_process_index}")
625
-
626
-
627
  print_something()
628
  # On server 1:
629
  "Printed on process 2"
@@ -642,24 +541,17 @@ class Accelerator:
642
  raise ValueError(
643
  "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
644
  )
645
-
646
  def _inner(*args, **kwargs):
647
  return PartialState().on_local_process(function, local_process_index)(*args, **kwargs)
648
-
649
  return _inner
650
-
651
  @contextmanager
652
  def main_process_first(self):
653
  """
654
  Lets the main process go first inside a with block.
655
-
656
  The other processes will enter the with block after the main process exits.
657
-
658
  Example:
659
-
660
  ```python
661
  >>> from accelerate import Accelerator
662
-
663
  >>> accelerator = Accelerator()
664
  >>> with accelerator.main_process_first():
665
  ... # This will be printed first by process 0 then in a seemingly
@@ -669,19 +561,14 @@ class Accelerator:
669
  """
670
  with self.state.main_process_first():
671
  yield
672
-
673
  @contextmanager
674
  def local_main_process_first(self):
675
  """
676
  Lets the local main process go inside a with block.
677
-
678
  The other processes will enter the with block after the main process exits.
679
-
680
  Example:
681
-
682
  ```python
683
  >>> from accelerate import Accelerator
684
-
685
  >>> accelerator = Accelerator()
686
  >>> with accelerator.local_main_process_first():
687
  ... # This will be printed first by local process 0 then in a seemingly
@@ -691,29 +578,22 @@ class Accelerator:
691
  """
692
  with self.state.local_main_process_first():
693
  yield
694
-
695
  @contextmanager
696
  def no_sync(self, model):
697
  """
698
  A context manager to disable gradient synchronizations across DDP processes by calling
699
  `torch.nn.parallel.DistributedDataParallel.no_sync`.
700
-
701
  If `model` is not in DDP, this context manager does nothing
702
-
703
  Args:
704
  model (`torch.nn.Module`):
705
  PyTorch Module that was prepared with `Accelerator.prepare`
706
-
707
  Example:
708
-
709
  ```python
710
  >>> from accelerate import Accelerator
711
-
712
  >>> accelerator = Accelerator()
713
  >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer)
714
  >>> input_a = next(iter(dataloader))
715
  >>> input_b = next(iter(dataloader))
716
-
717
  >>> with accelerator.no_sync():
718
  ... outputs = model(input_a)
719
  ... loss = loss_func(outputs)
@@ -729,30 +609,22 @@ class Accelerator:
729
  context = contextlib.nullcontext
730
  if self.use_distributed:
731
  context = getattr(model, "no_sync", context)
732
-
733
  with context():
734
  yield
735
-
736
  @staticmethod
737
  @contextmanager
738
  def trigger_sync_in_backward(model):
739
  """Trigger the sync of the gradients in the next backward pass of the model after multiple forward passes under
740
  `Accelerator.no_sync` (only applicable in multi-GPU scenarios).
741
-
742
  If the script is not launched in distributed mode, this context manager does nothing.
743
-
744
  Args:
745
  model (`torch.nn.Module`):
746
  The model for which to trigger the gradient synchronization.
747
-
748
  Example:
749
-
750
  ```python
751
  >>> from accelerate import Accelerator
752
-
753
  >>> accelerator = Accelerator()
754
  >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer)
755
-
756
  >>> with accelerator.no_sync():
757
  ... loss_a = loss_func(model(input_a)) # first forward pass
758
  ... loss_b = loss_func(model(input_b)) # second forward pass
@@ -766,10 +638,8 @@ class Accelerator:
766
  if not isinstance(model, torch.nn.parallel.DistributedDataParallel):
767
  yield
768
  return
769
-
770
  old_require_backward_grad_sync = model.require_backward_grad_sync
771
  old_require_forward_param_sync = model.require_forward_param_sync
772
-
773
  # EXPERIMENTAL: This will force grad sync during `backward()`, but it is unknown if it breaks other DDP features.
774
  # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/nn/parallel/distributed.py#L1453-L1466
775
  model.require_backward_grad_sync = True
@@ -781,7 +651,6 @@ class Accelerator:
781
  finally:
782
  model.require_backward_grad_sync = old_require_backward_grad_sync
783
  model.require_forward_param_sync = old_require_forward_param_sync
784
-
785
  def _do_sync(self):
786
  "Sets the right `sync_gradients` context and either resets or increases `self.step`"
787
  if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader:
@@ -790,41 +659,31 @@ class Accelerator:
790
  else:
791
  self.step += 1
792
  self.gradient_state._set_sync_gradients((self.step % self.gradient_state.num_steps) == 0)
793
-
794
  @property
795
  def sync_gradients(self):
796
  return self.gradient_state.sync_gradients
797
-
798
  @sync_gradients.setter
799
  def sync_gradients(self, sync_gradients):
800
  self.gradient_state.sync_gradients = sync_gradients
801
-
802
  @property
803
  def gradient_accumulation_steps(self):
804
  return self.gradient_state.num_steps
805
-
806
  @gradient_accumulation_steps.setter
807
  def gradient_accumulation_steps(self, gradient_accumulation_steps):
808
  self.gradient_state.plugin_kwargs.update({"num_steps": gradient_accumulation_steps})
809
-
810
  @contextmanager
811
  def accumulate(self, *models):
812
  """
813
  A context manager that will lightly wrap around and perform gradient accumulation automatically
814
-
815
  Args:
816
  *models (list of `torch.nn.Module`):
817
  PyTorch Modules that were prepared with `Accelerator.prepare`. Models passed to `accumulate()` will
818
  skip gradient syncing during backward pass in distributed training
819
-
820
  Example:
821
-
822
  ```python
823
  >>> from accelerate import Accelerator
824
-
825
  >>> accelerator = Accelerator(gradient_accumulation_steps=1)
826
  >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
827
-
828
  >>> for input, output in dataloader:
829
  ... with accelerator.accumulate(model):
830
  ... outputs = model(input)
@@ -840,14 +699,12 @@ class Accelerator:
840
  for m in models:
841
  cm_stack.enter_context(contextlib.nullcontext() if self.sync_gradients else self.no_sync(m))
842
  yield
843
-
844
  @contextmanager
845
  def join_uneven_inputs(self, joinables, even_batches=None):
846
  """
847
  A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper
848
  around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the
849
  length of the dataset.
850
-
851
  Args:
852
  joinables (`list[torch.distributed.algorithms.Joinable]`):
853
  A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a
@@ -855,28 +712,18 @@ class Accelerator:
855
  even_batches (`bool`, *optional*)
856
  If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,
857
  the default `Accelerator` value wil be used.
858
-
859
  <Tip warning={true}>
860
-
861
  `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other
862
  configuration, this method will have no effect.
863
-
864
  </Tip>
865
-
866
  <Tip warning={true}>
867
-
868
  Overidding `even_batches` will not affect iterable-style data loaders.
869
-
870
  </Tip>
871
-
872
  Example:
873
-
874
  ```python
875
  >>> from accelerate import Accelerator
876
-
877
  >>> accelerator = Accelerator(even_batches=True)
878
  >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)
879
-
880
  >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):
881
  ... for input, output in dataloader:
882
  ... outputs = model(input)
@@ -888,7 +735,6 @@ class Accelerator:
888
  """
889
  if self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU):
890
  dl_even_batches_values = []
891
-
892
  if even_batches is not None:
893
  iterable_dl_seen = False
894
  # override value in batch sampler for map-style datasets
@@ -898,14 +744,12 @@ class Accelerator:
898
  continue
899
  dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches))
900
  dl.batch_sampler.even_batches = even_batches
901
-
902
  if iterable_dl_seen:
903
  warnings.warn(
904
  "Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable"
905
  )
906
  else:
907
  even_batches = self.even_batches
908
-
909
  enable_join = False if even_batches else True
910
  try:
911
  with Join(joinables, enable=enable_join, throw_on_early_termination=False):
@@ -920,25 +764,19 @@ class Accelerator:
920
  warnings.warn(
921
  "Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect."
922
  )
923
-
924
  with contextlib.nullcontext(joinables):
925
  yield
926
-
927
  def print(self, *args, **kwargs):
928
  """
929
  Drop in replacement of `print()` to only print once per server.
930
-
931
  Example:
932
-
933
  ```python
934
  >>> from accelerate import Accelerator
935
-
936
  >>> accelerator = Accelerator()
937
  >>> accelerator.print("Hello world!")
938
  ```
939
  """
940
  self.state.print(*args, **kwargs)
941
-
942
  def _prepare_one(self, obj, first_pass=False, device_placement=None):
943
  # First pass of preparation: DataLoader, model, optimizer
944
  if first_pass:
@@ -955,44 +793,32 @@ class Accelerator:
955
  return scheduler
956
  # Return the unprocessed object if previous criteria was not met
957
  return obj
958
-
959
  def prepare(self, *args, device_placement=None):
960
  """
961
  Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same
962
  order.
963
-
964
  Args:
965
  *args (list of objects):
966
  Any of the following type of objects:
967
-
968
  - `torch.utils.data.DataLoader`: PyTorch Dataloader
969
  - `torch.nn.Module`: PyTorch Module
970
  - `torch.optim.Optimizer`: PyTorch Optimizer
971
  - `torch.optim.lr_scheduler.LRScheduler`: PyTorch LR Scheduler
972
-
973
  device_placement (`list[bool]`, *optional*):
974
  Used to customize whether automatic device placement should be performed for each object passed. Needs
975
  to be a list of the same length as `args`. Not compatible with DeepSpeed or FSDP.
976
-
977
  <Tip>
978
-
979
  You don't need to prepare a model if you only use it for inference without any kind of mixed precision
980
-
981
  </Tip>
982
-
983
  Examples:
984
-
985
  ```python
986
  >>> from accelerate import Accelerator
987
-
988
  >>> accelerator = Accelerator()
989
  >>> # Assume a model, optimizer, data_loader and scheduler are defined
990
  >>> model, optimizer, data_loader, scheduler = accelerator.prepare(model, optimizer, data_loader, scheduler)
991
  ```
992
-
993
  ```python
994
  >>> from accelerate import Accelerator
995
-
996
  >>> accelerator = Accelerator()
997
  >>> # Assume a model, optimizer, data_loader and scheduler are defined
998
  >>> device_placement = [True, True, False, False]
@@ -1010,7 +836,6 @@ class Accelerator:
1010
  raise ValueError(
1011
  f"`device_placement` should be a list with {len(args)} elements (the number of objects passed)."
1012
  )
1013
-
1014
  for obj in args:
1015
  # TODO: Look at enabling native TP training directly with a proper config
1016
  if (
@@ -1023,7 +848,6 @@ class Accelerator:
1023
  "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode."
1024
  " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`."
1025
  )
1026
-
1027
  if self.distributed_type == DistributedType.DEEPSPEED:
1028
  model_count = 0
1029
  for obj in args:
@@ -1033,7 +857,6 @@ class Accelerator:
1033
  raise AssertionError(
1034
  "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed"
1035
  )
1036
-
1037
  # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will
1038
  # have parameters disconnected from the model (so no training :-( ).
1039
  # If the model and optimizer have parameters on different devices we raise an error.
@@ -1047,13 +870,11 @@ class Accelerator:
1047
  "the flag default value for `device_placement` in your `Accelerator` to let it handle that "
1048
  "part for you."
1049
  )
1050
-
1051
  # If we're dealing with device placement, this deals with that by...
1052
  tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.TPU
1053
  if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"):
1054
  # 1. grabbing old model parameters
1055
  old_named_params = self._get_named_parameters(*args)
1056
-
1057
  if self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
1058
  if self.device.type == "cpu" and self.state.use_ipex:
1059
  args = self._prepare_ipex(*args)
@@ -1072,7 +893,6 @@ class Accelerator:
1072
  self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement)
1073
  )
1074
  result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement))
1075
-
1076
  if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"):
1077
  # 2. grabbing new model parameters
1078
  new_named_params = self._get_named_parameters(*result)
@@ -1082,21 +902,17 @@ class Accelerator:
1082
  for obj in result:
1083
  if isinstance(obj, torch.optim.Optimizer):
1084
  obj._switch_parameters(mapping)
1085
-
1086
  for item in result:
1087
  if any(
1088
  item in container
1089
  for container in (self._dataloaders, self._models, self._optimizers, self._schedulers)
1090
  ):
1091
  setattr(item, "_is_accelerate_prepared", True)
1092
-
1093
  return result if len(result) > 1 else result[0]
1094
-
1095
  def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, evaluation_mode: bool = False):
1096
  """
1097
  Prepares a PyTorch model for training in any distributed setup. It is recommended to use
1098
  [`Accelerator.prepare`] instead.
1099
-
1100
  Args:
1101
  model (`torch.nn.Module`):
1102
  A PyTorch model to prepare. You don't need to prepare a model if it is used only for inference without
@@ -1106,12 +922,9 @@ class Accelerator:
1106
  evaluation_mode (`bool`, *optional*, defaults to `False`):
1107
  Whether or not to set the model for evaluation only, by just applying mixed precision and
1108
  `torch.compile` (if configured in the `Accelerator` object).
1109
-
1110
  Example:
1111
-
1112
  ```python
1113
  >>> from accelerate import Accelerator
1114
-
1115
  >>> accelerator = Accelerator()
1116
  >>> # Assume a model is defined
1117
  >>> model = accelerator.prepare_model(model)
@@ -1120,7 +933,6 @@ class Accelerator:
1120
  if device_placement is None:
1121
  device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP
1122
  self._models.append(model)
1123
-
1124
  # TODO: Look at enabling native TP training directly with a proper config
1125
  if (
1126
  self.verify_device_map(model)
@@ -1131,7 +943,6 @@ class Accelerator:
1131
  "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode."
1132
  " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`."
1133
  )
1134
-
1135
  if self.native_amp:
1136
  model._original_forward = model.forward
1137
  model_forward_func = model.forward.__func__ if hasattr(model.forward, "__func__") else model.forward
@@ -1148,13 +959,11 @@ class Accelerator:
1148
  convert_model(model)
1149
  model._converted_to_transformer_engine = True
1150
  model._original_forward = model.forward
1151
-
1152
  kwargs = self.fp8_recipe_handler.to_kwargs() if self.fp8_recipe_handler is not None else {}
1153
  if "fp8_format" in kwargs:
1154
  kwargs["fp8_format"] = getattr(te_recipe.Format, kwargs["fp8_format"])
1155
  fp8_recipe = te_recipe.DelayedScaling(**kwargs)
1156
  model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe)(model.forward)
1157
-
1158
  if (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)) and getattr(
1159
  model, "hf_device_map", False
1160
  ):
@@ -1167,7 +976,6 @@ class Accelerator:
1167
  )
1168
  current_device = list(model_devices)[0]
1169
  current_device_index = current_device.index if isinstance(current_device, torch.device) else current_device
1170
-
1171
  if torch.device(current_device_index) != self.device:
1172
  # if on the first device (GPU 0) we don't care
1173
  if (self.device.index is not None) or (current_device_index != 0):
@@ -1175,7 +983,6 @@ class Accelerator:
1175
  "You can't train a model that has been loaded in 8-bit precision on a different device than the one "
1176
  "you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device() or device_map={'':torch.xpu.current_device()}"
1177
  )
1178
-
1179
  if "cpu" in model_devices or "disk" in model_devices:
1180
  raise ValueError(
1181
  "You can't train a model that has been loaded in 8-bit precision with CPU or disk offload."
@@ -1195,13 +1002,11 @@ class Accelerator:
1195
  device_ids, output_device = [self.local_process_index], self.local_process_index
1196
  else:
1197
  device_ids, output_device = None, None
1198
-
1199
  model = torch.nn.parallel.DistributedDataParallel(
1200
  model, device_ids=device_ids, output_device=output_device, **kwargs
1201
  )
1202
  elif self.distributed_type == DistributedType.FSDP:
1203
  from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
1204
-
1205
  # Check if the model is already a FSDP model due to `Manual Wrapping` and if so,
1206
  # don't wrap it again
1207
  # In case the model is already compiled using PyTorch 2.0 and the wrapped model in it
@@ -1209,7 +1014,6 @@ class Accelerator:
1209
  is_type_fsdp = isinstance(model, FSDP) or (
1210
  is_compiled_module(model) and isinstance(model._orig_mod, FSDP)
1211
  )
1212
-
1213
  if not is_type_fsdp:
1214
  self.state.fsdp_plugin.set_auto_wrap_policy(model)
1215
  fsdp_plugin = self.state.fsdp_plugin
@@ -1234,7 +1038,6 @@ class Accelerator:
1234
  apply_activation_checkpointing,
1235
  checkpoint_wrapper,
1236
  )
1237
-
1238
  apply_activation_checkpointing(
1239
  model,
1240
  checkpoint_wrapper_fn=functools.partial(
@@ -1258,18 +1061,14 @@ class Accelerator:
1258
  raise ValueError("Using `torch.compile` requires PyTorch 2.0 or higher.")
1259
  model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs())
1260
  return model
1261
-
1262
  def _prepare_deepspeed(self, *args):
1263
  import deepspeed
1264
-
1265
  deepspeed_plugin = self.state.deepspeed_plugin
1266
-
1267
  is_dataloader_present = any(isinstance(obj, torch.utils.data.DataLoader) for obj in args)
1268
  result = [
1269
  self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj
1270
  for obj in args
1271
  ]
1272
-
1273
  if deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] == "auto":
1274
  if is_dataloader_present:
1275
  batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")]
@@ -1281,7 +1080,6 @@ class Accelerator:
1281
  )
1282
  if self.split_batches:
1283
  batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]
1284
-
1285
  batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)
1286
  if len(batch_sizes) > 1:
1287
  logger.info(
@@ -1297,14 +1095,12 @@ class Accelerator:
1297
  )
1298
  else:
1299
  batch_size_per_device = deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"]
1300
-
1301
  # handle `gradient_accumulation_steps` when the value is `auto`
1302
  deepspeed_plugin.fill_match(
1303
  "gradient_accumulation_steps",
1304
  must_match=False,
1305
  gradient_accumulation_steps=self.gradient_accumulation_steps,
1306
  )
1307
-
1308
  config_kwargs = {
1309
  "train_micro_batch_size_per_gpu": batch_size_per_device,
1310
  "train_batch_size": batch_size_per_device
@@ -1313,7 +1109,6 @@ class Accelerator:
1313
  "gradient_clipping": 1.0,
1314
  "zero_optimization.stage3_gather_16bit_weights_on_model_save": False,
1315
  }
1316
-
1317
  model = None
1318
  optimizer = None
1319
  scheduler = None
@@ -1326,7 +1121,6 @@ class Accelerator:
1326
  type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES
1327
  ):
1328
  scheduler = obj
1329
-
1330
  if optimizer is not None:
1331
  if "optimizer" in deepspeed_plugin.deepspeed_config and not isinstance(optimizer, (DummyOptim)):
1332
  raise ValueError(
@@ -1338,10 +1132,8 @@ class Accelerator:
1338
  raise ValueError(
1339
  "You cannot create a `DummyOptim` without specifying an optimizer in the config file."
1340
  )
1341
-
1342
  if isinstance(optimizer, (torch.optim.Optimizer)):
1343
  deepspeed_plugin.deepspeed_config["zero_allow_untested_optimizer"] = True
1344
-
1345
  if scheduler is not None:
1346
  if "scheduler" in deepspeed_plugin.deepspeed_config and not isinstance(scheduler, (DummyScheduler)):
1347
  raise ValueError(
@@ -1358,14 +1150,12 @@ class Accelerator:
1358
  "Either specify a scheduler in the config file or "
1359
  "pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`."
1360
  )
1361
-
1362
  if optimizer is not None and scheduler is not None:
1363
  if isinstance(optimizer, (DummyOptim)) and not isinstance(scheduler, (DummyScheduler)):
1364
  raise ValueError(
1365
  "You can only specify `accelerate.utils.DummyScheduler` in the code when using "
1366
  "`accelerate.utils.DummyOptim`."
1367
  )
1368
-
1369
  if model is not None:
1370
  if hasattr(model, "config"):
1371
  hidden_size = (
@@ -1381,7 +1171,6 @@ class Accelerator:
1381
  "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
1382
  }
1383
  )
1384
-
1385
  if isinstance(optimizer, (DummyOptim)):
1386
  config_kwargs.update(
1387
  {"optimizer.params.lr": optimizer.lr, "optimizer.params.weight_decay": optimizer.weight_decay}
@@ -1418,7 +1207,6 @@ class Accelerator:
1418
  "device", "none"
1419
  ) != "none" and self.deepspeed_config.get("zero_force_ds_cpu_optimizer", True):
1420
  from deepspeed.ops.adam import DeepSpeedCPUAdam
1421
-
1422
  defaults = {k: v for k, v in optimizer.defaults.items() if k in ["lr", "weight_decay"]}
1423
  optimizer = DeepSpeedCPUAdam(optimizer.param_groups, **defaults)
1424
  kwargs["optimizer"] = optimizer
@@ -1428,7 +1216,6 @@ class Accelerator:
1428
  or type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES
1429
  ):
1430
  kwargs["lr_scheduler"] = scheduler
1431
-
1432
  engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs)
1433
  if optimizer is not None:
1434
  optimizer = DeepSpeedOptimizerWrapper(optimizer)
@@ -1442,7 +1229,6 @@ class Accelerator:
1442
  )
1443
  else:
1444
  scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer)
1445
-
1446
  for i in range(len(result)):
1447
  if isinstance(result[i], torch.nn.Module):
1448
  result[i] = engine
@@ -1464,7 +1250,6 @@ class Accelerator:
1464
  "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed"
1465
  )
1466
  return tuple(result)
1467
-
1468
  def _prepare_megatron_lm(self, *args):
1469
  megatron_lm_plugin = self.state.megatron_lm_plugin
1470
  if not megatron_lm_plugin.megatron_dataset_flag:
@@ -1473,7 +1258,6 @@ class Accelerator:
1473
  raise ValueError(
1474
  "You must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM."
1475
  )
1476
-
1477
  micro_batch_size = min(batch_sizes) if megatron_lm_plugin.is_train_batch_min else max(batch_sizes)
1478
  if len(batch_sizes) > 1:
1479
  logger.info(
@@ -1485,10 +1269,8 @@ class Accelerator:
1485
  if isinstance(obj, MegatronLMDummyDataLoader):
1486
  micro_batch_size = obj.dataset_args["micro_batch_size"]
1487
  break
1488
-
1489
  dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree)
1490
  megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree)
1491
-
1492
  model = None
1493
  optimizer = None
1494
  scheduler = None
@@ -1503,7 +1285,6 @@ class Accelerator:
1503
  optimizer = obj
1504
  elif isinstance(obj, (LRScheduler, MegatronLMDummyScheduler)):
1505
  scheduler = obj
1506
-
1507
  if model is not None:
1508
  megatron_lm_plugin.set_network_size_args(model, batch_data)
1509
  if optimizer is not None:
@@ -1515,7 +1296,6 @@ class Accelerator:
1515
  "You can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead."
1516
  )
1517
  megatron_lm_plugin.set_scheduler_args(scheduler)
1518
-
1519
  # initialize megatron-lm
1520
  megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args)
1521
  counter = 0
@@ -1532,21 +1312,18 @@ class Accelerator:
1532
  counter += 1
1533
  else:
1534
  result.append(obj)
1535
-
1536
  if model is not None:
1537
  model = megatron_lm_prepare_model(self)
1538
  if optimizer is not None:
1539
  optimizer = megatron_lm_prepare_optimizer(self, model)
1540
  if scheduler is not None:
1541
  scheduler = megatron_lm_prepare_scheduler(self, optimizer, scheduler)
1542
-
1543
  if model is not None:
1544
  model = MegatronEngine(self, model, optimizer, scheduler)
1545
  if optimizer is not None:
1546
  optimizer = MegatronLMOptimizerWrapper(optimizer)
1547
  if scheduler is not None:
1548
  scheduler = MegatronLMSchedulerWrapper(scheduler, optimizer)
1549
-
1550
  for i in range(len(result)):
1551
  if isinstance(result[i], torch.nn.Module):
1552
  result[i] = model
@@ -1565,7 +1342,6 @@ class Accelerator:
1565
  "You can't use same `Accelerator()` instance with multiple models when using Megatron-LM"
1566
  )
1567
  return tuple(result)
1568
-
1569
  def _prepare_ipex(self, *args):
1570
  if not is_ipex_available():
1571
  raise ImportError(
@@ -1574,7 +1350,6 @@ class Accelerator:
1574
  )
1575
  else:
1576
  import intel_extension_for_pytorch as ipex
1577
-
1578
  model = None
1579
  optimizer = None
1580
  result = [obj for obj in args]
@@ -1598,7 +1373,6 @@ class Accelerator:
1598
  elif isinstance(result[i], (torch.optim.Optimizer)):
1599
  result[i] = optimizer
1600
  return tuple(result)
1601
-
1602
  def _prepare_msamp(self, *args):
1603
  if not is_msamp_available():
1604
  raise ImportError(
@@ -1607,7 +1381,6 @@ class Accelerator:
1607
  )
1608
  else:
1609
  import msamp
1610
-
1611
  model, optimizer = None, None
1612
  num_models, num_optimizers = 0, 0
1613
  result = [obj for obj in args]
@@ -1634,14 +1407,12 @@ class Accelerator:
1634
  elif isinstance(result[i], (torch.optim.Optimizer)):
1635
  result[i] = optimizer
1636
  return tuple(result)
1637
-
1638
  def prepare_data_loader(
1639
  self, data_loader: torch.utils.data.DataLoader, device_placement=None, slice_fn_for_dispatch=None
1640
  ):
1641
  """
1642
  Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use
1643
  [`Accelerator.prepare`] instead.
1644
-
1645
  Args:
1646
  data_loader (`torch.utils.data.DataLoader`):
1647
  A vanilla PyTorch DataLoader to prepare
@@ -1652,13 +1423,10 @@ class Accelerator:
1652
  If passed, this function will be used to slice tensors across `num_processes`. Will default to
1653
  [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will
1654
  be ignored otherwise.
1655
-
1656
  Example:
1657
-
1658
  ```python
1659
  >>> import torch
1660
  >>> from accelerate import Accelerator
1661
-
1662
  >>> accelerator = Accelerator()
1663
  >>> data_loader = torch.utils.data.DataLoader(...)
1664
  >>> data_loader = accelerator.prepare_data_loader(data_loader, device_placement=True)
@@ -1685,24 +1453,19 @@ class Accelerator:
1685
  )
1686
  self._dataloaders.append(prepared_data_loader)
1687
  return prepared_data_loader
1688
-
1689
  def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None):
1690
  """
1691
  Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use
1692
  [`Accelerator.prepare`] instead.
1693
-
1694
  Args:
1695
  optimizer (`torch.optim.Optimizer`):
1696
  A vanilla PyTorch optimizer to prepare
1697
  device_placement (`bool`, *optional*):
1698
  Whether or not to place the optimizer on the proper device. Will default to `self.device_placement`.
1699
-
1700
  Example:
1701
-
1702
  ```python
1703
  >>> import torch
1704
  >>> from accelerate import Accelerator
1705
-
1706
  >>> accelerator = Accelerator()
1707
  >>> optimizer = torch.optim.Adam(...)
1708
  >>> optimizer = accelerator.prepare_optimizer(optimizer, device_placement=True)
@@ -1718,22 +1481,17 @@ class Accelerator:
1718
  optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler)
1719
  self._optimizers.append(optimizer)
1720
  return optimizer
1721
-
1722
  def prepare_scheduler(self, scheduler: LRScheduler):
1723
  """
1724
  Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use
1725
  [`Accelerator.prepare`] instead.
1726
-
1727
  Args:
1728
  scheduler (`torch.optim.lr_scheduler.LRScheduler`):
1729
  A vanilla PyTorch scheduler to prepare
1730
-
1731
  Example:
1732
-
1733
  ```python
1734
  >>> import torch
1735
  >>> from accelerate import Accelerator
1736
-
1737
  >>> accelerator = Accelerator()
1738
  >>> optimizer = torch.optim.Adam(...)
1739
  >>> scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...)
@@ -1759,19 +1517,14 @@ class Accelerator:
1759
  )
1760
  self._schedulers.append(scheduler)
1761
  return scheduler
1762
-
1763
  def backward(self, loss, **kwargs):
1764
  """
1765
  Scales the gradients in accordance to the `GradientAccumulationPlugin` and calls the correct `backward()` based
1766
  on the configuration.
1767
-
1768
  Should be used in lieu of `loss.backward()`.
1769
-
1770
  Example:
1771
-
1772
  ```python
1773
  >>> from accelerate import Accelerator
1774
-
1775
  >>> accelerator = Accelerator(gradient_accumulation_steps=2)
1776
  >>> outputs = model(inputs)
1777
  >>> loss = loss_fn(outputs, labels)
@@ -1789,20 +1542,15 @@ class Accelerator:
1789
  self.scaler.scale(loss).backward(**kwargs)
1790
  else:
1791
  loss.backward(**kwargs)
1792
-
1793
  def set_trigger(self):
1794
  """
1795
  Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which
1796
  will check across all processes.
1797
-
1798
  Note:
1799
  Does not require `wait_for_everyone()`
1800
-
1801
  Example:
1802
-
1803
  ```python
1804
  >>> from accelerate import Accelerator
1805
-
1806
  >>> accelerator = Accelerator()
1807
  >>> # Assume later in the training script
1808
  >>> # `should_do_breakpoint` is a custom function to monitor when to break,
@@ -1815,20 +1563,15 @@ class Accelerator:
1815
  ```
1816
  """
1817
  self.flag_tensor = torch.tensor(1, device=self.device)
1818
-
1819
  def check_trigger(self):
1820
  """
1821
  Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and
1822
  reset the trigger tensor to 0.
1823
-
1824
  Note:
1825
  Does not require `wait_for_everyone()`
1826
-
1827
  Example:
1828
-
1829
  ```python
1830
  >>> from accelerate import Accelerator
1831
-
1832
  >>> accelerator = Accelerator()
1833
  >>> # Assume later in the training script
1834
  >>> # `should_do_breakpoint` is a custom function to monitor when to break,
@@ -1848,23 +1591,17 @@ class Accelerator:
1848
  self.flag_tensor = torch.tensor(0, device=self.device)
1849
  return True
1850
  return False
1851
-
1852
  def unscale_gradients(self, optimizer=None):
1853
  """
1854
  Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings.
1855
-
1856
  Likely should be called through [`Accelerator.clip_grad_norm_`] or [`Accelerator.clip_grad_value_`]
1857
-
1858
  Args:
1859
  optimizer (`torch.optim.Optimizer` or `list[torch.optim.Optimizer]`, *optional*):
1860
  The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers
1861
  that were passed to [`~Accelerator.prepare`].
1862
-
1863
  Example:
1864
-
1865
  ```python
1866
  >>> from accelerate import Accelerator
1867
-
1868
  >>> accelerator = Accelerator()
1869
  >>> model, optimizer = accelerator.prepare(model, optimizer)
1870
  >>> outputs = model(inputs)
@@ -1887,22 +1624,16 @@ class Accelerator:
1887
  gradients = xm._fetch_gradients(opt)
1888
  self.reduce(gradients, scale=1.0 / self.num_processes)
1889
  self.scaler.unscale_(opt)
1890
-
1891
  def clip_grad_norm_(self, parameters, max_norm, norm_type=2):
1892
  """
1893
  Should be used in place of `torch.nn.utils.clip_grad_norm_`.
1894
-
1895
  Returns:
1896
  `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector).
1897
-
1898
  Example:
1899
-
1900
  ```python
1901
  >>> from accelerate import Accelerator
1902
-
1903
  >>> accelerator = Accelerator(gradient_accumulation_steps=2)
1904
  >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
1905
-
1906
  >>> for input, target in dataloader:
1907
  ... optimizer.zero_grad()
1908
  ... output = model(input)
@@ -1925,19 +1656,14 @@ class Accelerator:
1925
  return None
1926
  self.unscale_gradients()
1927
  return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)
1928
-
1929
  def clip_grad_value_(self, parameters, clip_value):
1930
  """
1931
  Should be used in place of `torch.nn.utils.clip_grad_value_`.
1932
-
1933
  Example:
1934
-
1935
  ```python
1936
  >>> from accelerate import Accelerator
1937
-
1938
  >>> accelerator = Accelerator(gradient_accumulation_steps=2)
1939
  >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
1940
-
1941
  >>> for input, target in dataloader:
1942
  ... optimizer.zero_grad()
1943
  ... output = model(input)
@@ -1952,30 +1678,23 @@ class Accelerator:
1952
  raise Exception("DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.")
1953
  self.unscale_gradients()
1954
  torch.nn.utils.clip_grad_value_(parameters, clip_value)
1955
-
1956
  def gather(self, tensor):
1957
  """
1958
  Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to
1959
  regroup the predictions from all processes when doing evaluation.
1960
-
1961
  Note:
1962
  This gather happens in all processes.
1963
-
1964
  Args:
1965
  tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):
1966
  The tensors to gather across all processes.
1967
-
1968
  Returns:
1969
  `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The gathered tensor(s). Note that the
1970
  first dimension of the result is *num_processes* multiplied by the first dimension of the input tensors.
1971
-
1972
  Example:
1973
-
1974
  ```python
1975
  >>> # Assuming four processes
1976
  >>> import torch
1977
  >>> from accelerate import Accelerator
1978
-
1979
  >>> accelerator = Accelerator()
1980
  >>> process_tensor = torch.tensor([accelerator.process_index])
1981
  >>> gathered_tensor = accelerator.gather(process_tensor)
@@ -1984,23 +1703,18 @@ class Accelerator:
1984
  ```
1985
  """
1986
  return gather(tensor)
1987
-
1988
  def gather_for_metrics(self, input_data):
1989
  """
1990
  Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be
1991
  used for gathering the inputs and targets for metric calculation.
1992
-
1993
  Args:
1994
  input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`):
1995
  The tensors or objects for calculating metrics across all processes
1996
-
1997
  Example:
1998
-
1999
  ```python
2000
  >>> # Assuming two processes, with a batch size of 5 on a dataset with 9 samples
2001
  >>> import torch
2002
  >>> from accelerate import Accelerator
2003
-
2004
  >>> accelerator = Accelerator()
2005
  >>> dataloader = torch.utils.data.DataLoader(range(9), batch_size=5)
2006
  >>> dataloader = accelerator.prepare(dataloader)
@@ -2015,12 +1729,10 @@ class Accelerator:
2015
  all_tensors = True
2016
  except TypeError:
2017
  all_tensors = False
2018
-
2019
  if not all_tensors:
2020
  data = gather_object(input_data)
2021
  else:
2022
  data = self.gather(input_data)
2023
-
2024
  try:
2025
  if self.gradient_state.end_of_dataloader:
2026
  # at the end of a dataloader, `gather_for_metrics` regresses to
@@ -2034,7 +1746,6 @@ class Accelerator:
2034
  # Last batch needs to be truncated on distributed systems as it contains additional samples
2035
  def _adjust_samples(tensor):
2036
  return tensor[: self.gradient_state.remainder]
2037
-
2038
  return recursively_apply(_adjust_samples, data)
2039
  else: # remainder is 0
2040
  # no remainder even though at end of dataloader, so nothing to do.
@@ -2045,14 +1756,11 @@ class Accelerator:
2045
  except Exception:
2046
  # Dataset had no length or raised an error
2047
  return data
2048
-
2049
  def reduce(self, tensor, reduction="sum", scale=1.0):
2050
  """
2051
  Reduce the values in *tensor* across all processes based on *reduction*.
2052
-
2053
  Note:
2054
  All processes get the reduced value.
2055
-
2056
  Args:
2057
  tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):
2058
  The tensors to reduce across all processes.
@@ -2060,18 +1768,14 @@ class Accelerator:
2060
  A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation.
2061
  scale (`float`, *optional*, defaults to 1.0):
2062
  A default scaling value to be applied after the reduce, only valied on XLA.
2063
-
2064
  Returns:
2065
  `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`:
2066
  The reduced tensor(s).
2067
-
2068
  Example:
2069
-
2070
  ```python
2071
  >>> # Assuming two processes
2072
  >>> import torch
2073
  >>> from accelerate import Accelerator
2074
-
2075
  >>> accelerator = Accelerator()
2076
  >>> process_tensor = torch.arange(accelerator.num_processes) + 1 + (2 * accelerator.process_index)
2077
  >>> process_tensor = process_tensor.to(accelerator.device)
@@ -2081,12 +1785,10 @@ class Accelerator:
2081
  ```
2082
  """
2083
  return reduce(tensor, reduction, scale)
2084
-
2085
  def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):
2086
  """
2087
  Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
2088
  they can safely be gathered.
2089
-
2090
  Args:
2091
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
2092
  The data to gather.
@@ -2096,18 +1798,14 @@ class Accelerator:
2096
  The value with which to pad.
2097
  pad_first (`bool`, *optional*, defaults to `False`):
2098
  Whether to pad at the beginning or the end.
2099
-
2100
  Returns:
2101
  `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`:
2102
  The padded tensor(s).
2103
-
2104
  Example:
2105
-
2106
  ```python
2107
  >>> # Assuming two processes, with the first processes having a tensor of size 1 and the second of size 2
2108
  >>> import torch
2109
  >>> from accelerate import Accelerator
2110
-
2111
  >>> accelerator = Accelerator()
2112
  >>> process_tensor = torch.arange(accelerator.process_index + 1).to(accelerator.device)
2113
  >>> padded_tensor = accelerator.pad_across_processes(process_tensor)
@@ -2116,52 +1814,41 @@ class Accelerator:
2116
  ```
2117
  """
2118
  return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first)
2119
-
2120
  def unwrap_model(self, model, keep_fp32_wrapper: bool = True):
2121
  """
2122
  Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving
2123
  the model.
2124
-
2125
  Args:
2126
  model (`torch.nn.Module`):
2127
  The model to unwrap.
2128
  keep_fp32_wrapper (`bool`, *optional*, defaults to `True`):
2129
  Whether to not remove the mixed precision hook if it was added.
2130
-
2131
  Returns:
2132
  `torch.nn.Module`: The unwrapped model.
2133
-
2134
  Example:
2135
-
2136
  ```python
2137
  >>> # Assuming two GPU processes
2138
  >>> from torch.nn.parallel import DistributedDataParallel
2139
  >>> from accelerate import Accelerator
2140
-
2141
  >>> accelerator = Accelerator()
2142
  >>> model = accelerator.prepare(MyModel())
2143
  >>> print(model.__class__.__name__)
2144
  DistributedDataParallel
2145
-
2146
  >>> model = accelerator.unwrap_model(model)
2147
  >>> print(model.__class__.__name__)
2148
  MyModel
2149
  ```
2150
  """
2151
  return extract_model_from_parallel(model, keep_fp32_wrapper)
2152
-
2153
  def wait_for_everyone(self):
2154
  """
2155
  Will stop the execution of the current process until every other process has reached that point (so this does
2156
  nothing when the script is only run in one process). Useful to do before saving a model.
2157
-
2158
  Example:
2159
-
2160
  ```python
2161
  >>> # Assuming two GPU processes
2162
  >>> import time
2163
  >>> from accelerate import Accelerator
2164
-
2165
  >>> accelerator = Accelerator()
2166
  >>> if accelerator.is_main_process:
2167
  ... time.sleep(2)
@@ -2173,12 +1860,10 @@ class Accelerator:
2173
  ```
2174
  """
2175
  wait_for_everyone()
2176
-
2177
  @on_main_process
2178
  def init_trackers(self, project_name: str, config: dict | None = None, init_kwargs: dict | None = {}):
2179
  """
2180
  Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations
2181
-
2182
  Args:
2183
  project_name (`str`):
2184
  The name of the project. All trackers will save their data based on this
@@ -2190,12 +1875,9 @@ class Accelerator:
2190
  ```python
2191
  {"wandb": {"tags": ["tag_a", "tag_b"]}}
2192
  ```
2193
-
2194
  Example:
2195
-
2196
  ```python
2197
  >>> from accelerate import Accelerator
2198
-
2199
  >>> accelerator = Accelerator(log_with="tensorboard")
2200
  >>> accelerator.init_trackers(
2201
  ... project_name="my_project",
@@ -2220,26 +1902,20 @@ class Accelerator:
2220
  if config is not None:
2221
  for tracker in self.trackers:
2222
  tracker.store_init_configuration(config)
2223
-
2224
  def get_tracker(self, name: str, unwrap: bool = False):
2225
  """
2226
  Returns a `tracker` from `self.trackers` based on `name` on the main process only.
2227
-
2228
  Args:
2229
  name (`str`):
2230
  The name of a tracker, corresponding to the `.name` property.
2231
  unwrap (`bool`):
2232
  Whether to return the internal tracking mechanism or to return the wrapped tracker instead
2233
  (recommended).
2234
-
2235
  Returns:
2236
  `GeneralTracker`: The tracker corresponding to `name` if it exists.
2237
-
2238
  Example:
2239
-
2240
  ```python
2241
  >>> from accelerate import Accelerator
2242
-
2243
  >>> accelerator = Accelerator(log_with="tensorboard")
2244
  >>> accelerator.init_trackers("my_project")
2245
  >>> tensorboard_tracker = accelerator.get_tracker("tensorboard")
@@ -2252,12 +1928,10 @@ class Accelerator:
2252
  raise ValueError(f"{name} is not an available tracker stored inside the `Accelerator`.")
2253
  # Handle tracker only made on main process
2254
  return GeneralTracker(_blank=True)
2255
-
2256
  @on_main_process
2257
  def log(self, values: dict, step: int | None = None, log_kwargs: dict | None = {}):
2258
  """
2259
  Logs `values` to all stored trackers in `self.trackers` on the main process only.
2260
-
2261
  Args:
2262
  values (`dict`):
2263
  Values should be a dictionary-like object containing only types `int`, `float`, or `str`.
@@ -2269,12 +1943,9 @@ class Accelerator:
2269
  ```python
2270
  {"wandb": {"tags": ["tag_a", "tag_b"]}}
2271
  ```
2272
-
2273
  Example:
2274
-
2275
  ```python
2276
  >>> from accelerate import Accelerator
2277
-
2278
  >>> accelerator = Accelerator(log_with="tensorboard")
2279
  >>> accelerator.init_trackers("my_project")
2280
  >>> accelerator.log({"loss": 0.5, "accuracy": 0.9})
@@ -2282,18 +1953,14 @@ class Accelerator:
2282
  """
2283
  for tracker in self.trackers:
2284
  tracker.log(values, step=step, **log_kwargs.get(tracker.name, {}))
2285
-
2286
  @on_main_process
2287
  def end_training(self):
2288
  """
2289
  Runs any special end training behaviors, such as stopping trackers on the main process only. Should always be
2290
  called at the end of your script if using experiment tracking.
2291
-
2292
  Example:
2293
-
2294
  ```python
2295
  >>> from accelerate import Accelerator
2296
-
2297
  >>> accelerator = Accelerator(log_with="tensorboard")
2298
  >>> accelerator.init_trackers("my_project")
2299
  >>> # Do training
@@ -2302,25 +1969,19 @@ class Accelerator:
2302
  """
2303
  for tracker in self.trackers:
2304
  tracker.finish()
2305
-
2306
  def save(self, obj, f, safe_serialization=False):
2307
  """
2308
  Save the object passed to disk once per machine. Use in place of `torch.save`.
2309
-
2310
  Args:
2311
  obj (`object`): The object to save.
2312
  f (`str` or `os.PathLike`): Where to save the content of `obj`.
2313
  safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors`
2314
-
2315
  Note:
2316
  If `save_on_each_node` was passed in as a `ProjectConfiguration`, will save the object once per node,
2317
  rather than only once on the main node.
2318
-
2319
  Example:
2320
-
2321
  ```python
2322
  >>> from accelerate import Accelerator
2323
-
2324
  >>> accelerator = Accelerator()
2325
  >>> arr = [0, 1, 2, 3]
2326
  >>> accelerator.save(arr, "array.pkl")
@@ -2332,7 +1993,6 @@ class Accelerator:
2332
  save_on_each_node=self.project_configuration.save_on_each_node,
2333
  safe_serialization=safe_serialization,
2334
  )
2335
-
2336
  def save_model(
2337
  self,
2338
  model: torch.nn.Module,
@@ -2342,7 +2002,6 @@ class Accelerator:
2342
  ):
2343
  """
2344
  Save a model so that it can be re-loaded using load_checkpoint_in_model
2345
-
2346
  Arguments:
2347
  model: (`torch.nn.Module`):
2348
  Model to be saved. The model can be wrapped or unwraped.
@@ -2351,22 +2010,15 @@ class Accelerator:
2351
  max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
2352
  The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
2353
  lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).
2354
-
2355
  <Tip warning={true}>
2356
-
2357
  If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard
2358
  which will be bigger than `max_shard_size`.
2359
-
2360
  </Tip>
2361
-
2362
  safe_serialization (`bool`, *optional*, defaults to `True`):
2363
  Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
2364
-
2365
  Example:
2366
-
2367
  ```python
2368
  >>> from accelerate import Accelerator
2369
-
2370
  >>> accelerator = Accelerator()
2371
  >>> model = ...
2372
  >>> accelerator.save_model(model, save_directory)
@@ -2375,9 +2027,7 @@ class Accelerator:
2375
  if os.path.isfile(save_directory):
2376
  logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
2377
  return
2378
-
2379
  os.makedirs(save_directory, exist_ok=True)
2380
-
2381
  # get the state_dict of the model
2382
  if any(
2383
  [
@@ -2391,25 +2041,20 @@ class Accelerator:
2391
  if any(param.device == torch.device("meta") for param in model.parameters()):
2392
  raise RuntimeError("You can't save the model since some parameters are on the meta device.")
2393
  state_dict = self.get_state_dict(model)
2394
-
2395
  if safe_serialization:
2396
  state_dict = clean_state_dict_for_safetensors(state_dict)
2397
  weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
2398
-
2399
  # Shard the model if it is too big.
2400
  shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name)
2401
-
2402
  # Clean the folder from a previous save
2403
  for filename in os.listdir(save_directory):
2404
  full_filename = os.path.join(save_directory, filename)
2405
  # If we have a shard file that is not going to be replaced, we delete it, but only from the main process
2406
  # in distributed settings to avoid race conditions.
2407
  weights_no_suffix = weights_name.replace(".bin", "")
2408
-
2409
  # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005
2410
  filename_no_suffix = filename.replace(".bin", "")
2411
  reg = re.compile(r"(.*?)-\d{5}-of-\d{5}")
2412
-
2413
  if (
2414
  filename.startswith(weights_no_suffix)
2415
  and os.path.isfile(full_filename)
@@ -2418,11 +2063,9 @@ class Accelerator:
2418
  and PartialState().is_main_process
2419
  ):
2420
  os.remove(full_filename)
2421
-
2422
  # Save the model
2423
  for shard_file, shard in shards.items():
2424
  self.save(shard, os.path.join(save_directory, shard_file), safe_serialization=safe_serialization)
2425
-
2426
  if index is None:
2427
  path_to_weights = os.path.join(save_directory, WEIGHTS_NAME)
2428
  logger.info(f"Model weights saved in {path_to_weights}")
@@ -2438,31 +2081,22 @@ class Accelerator:
2438
  f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
2439
  f"index located at {save_index_file}."
2440
  )
2441
-
2442
  def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
2443
  """
2444
  Registers a pre hook to be run before `save_checkpoint` is called in [`Accelerator.save_state`].
2445
-
2446
  Args:
2447
  hook (`Callable`):
2448
  A function to be called in [`Accelerator.save_state`] before `save_checkpoint`.
2449
-
2450
  The hook should have the following signature:
2451
-
2452
  `hook(models: list[torch.nn.Module], weights: list[dict[str, torch.Tensor]], input_dir: str) -> None`
2453
-
2454
  The `models` argument are the models as saved in the accelerator state under `accelerator._models`, `weigths`
2455
  argument are the state dicts of the `models`, and the `input_dir` argument is the `input_dir` argument passed
2456
  to [`Accelerator.load_state`].
2457
-
2458
  <Tip>
2459
-
2460
  Should only be used in conjunction with [`Accelerator.register_load_state_pre_hook`]. Can be useful to save
2461
  configurations in addition to model weights. Can also be used to overwrite model saving with a customized
2462
  method. In this case, make sure to remove already loaded weights from the weights list.
2463
-
2464
  </Tip>
2465
-
2466
  Returns:
2467
  `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling
2468
  `handle.remove()`
@@ -2470,25 +2104,18 @@ class Accelerator:
2470
  handle = hooks.RemovableHandle(self._save_model_state_pre_hook)
2471
  self._save_model_state_pre_hook[handle.id] = hook
2472
  return handle
2473
-
2474
  def save_state(self, output_dir: str = None, safe_serialization: bool = True, **save_model_func_kwargs):
2475
  """
2476
  Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder.
2477
-
2478
  If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled
2479
  then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater
2480
  than `total_limit` then the oldest save is deleted. Each checkpoint is saved in seperate folders named
2481
  `checkpoint_<iteration>`.
2482
-
2483
  Otherwise they are just saved to `output_dir`.
2484
-
2485
  <Tip>
2486
-
2487
  Should only be used when wanting to save a checkpoint during training and restoring the state in the same
2488
  environment.
2489
-
2490
  </Tip>
2491
-
2492
  Args:
2493
  output_dir (`str` or `os.PathLike`):
2494
  The name of the folder to save all relevant weights and states.
@@ -2497,12 +2124,9 @@ class Accelerator:
2497
  save_model_func_kwargs (`dict`, *optional*):
2498
  Additional keyword arguments for saving model which can be passed to the underlying save function, such
2499
  as optional arguments for DeepSpeed's `save_checkpoint` function.
2500
-
2501
  Example:
2502
-
2503
  ```python
2504
  >>> from accelerate import Accelerator
2505
-
2506
  >>> accelerator = Accelerator()
2507
  >>> model, optimizer, lr_scheduler = ...
2508
  >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
@@ -2519,10 +2143,8 @@ class Accelerator:
2519
  and (len(folders) + 1 > self.project_configuration.total_limit)
2520
  and self.is_main_process
2521
  ):
2522
-
2523
  def _inner(folder):
2524
  return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0]
2525
-
2526
  folders.sort(key=_inner)
2527
  logger.warning(
2528
  f"Deleting {len(folders) + 1 - self.project_configuration.total_limit} checkpoints to make room for new checkpoint."
@@ -2537,11 +2159,9 @@ class Accelerator:
2537
  self.wait_for_everyone()
2538
  os.makedirs(output_dir, exist_ok=True)
2539
  logger.info(f"Saving current state to {output_dir}")
2540
-
2541
  if self.distributed_type == DistributedType.TPU:
2542
  # Finish running the previous step before checkpointing
2543
  xm.mark_step()
2544
-
2545
  # Save the models taking care of FSDP and DeepSpeed nuances
2546
  weights = []
2547
  for i, model in enumerate(self._models):
@@ -2560,7 +2180,6 @@ class Accelerator:
2560
  logger.info(f"Megatron-LM Model , Optimizer and Scheduler saved to output dir {output_dir}")
2561
  else:
2562
  weights.append(self.get_state_dict(model, unwrap=False))
2563
-
2564
  # Save the optimizers taking care of FSDP and DeepSpeed nuances
2565
  optimizers = []
2566
  if self.distributed_type == DistributedType.FSDP:
@@ -2570,7 +2189,6 @@ class Accelerator:
2570
  logger.info(f"FSDP Optimizer saved to output dir {output_dir}")
2571
  elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
2572
  optimizers = self._optimizers
2573
-
2574
  # Save the lr schedulers taking care of DeepSpeed nuances
2575
  schedulers = []
2576
  if self.distributed_type == DistributedType.DEEPSPEED:
@@ -2580,15 +2198,12 @@ class Accelerator:
2580
  schedulers.append(scheduler)
2581
  elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
2582
  schedulers = self._schedulers
2583
-
2584
  # Save the samplers of the dataloaders
2585
  dataloaders = self._dataloaders
2586
-
2587
  # Call model loading hooks that might have been registered with
2588
  # accelerator.register_model_state_hook
2589
  for hook in self._save_model_state_pre_hook.values():
2590
  hook(self._models, weights, output_dir)
2591
-
2592
  save_location = save_accelerator_state(
2593
  output_dir,
2594
  weights,
@@ -2604,30 +2219,21 @@ class Accelerator:
2604
  save_custom_state(obj, output_dir, i, save_on_each_node=self.project_configuration.save_on_each_node)
2605
  self.project_configuration.iteration += 1
2606
  return save_location
2607
-
2608
  def register_load_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
2609
  """
2610
  Registers a pre hook to be run before [`load_checkpoint`] is called in [`Accelerator.load_state`].
2611
-
2612
  Args:
2613
  hook (`Callable`):
2614
  A function to be called in [`Accelerator.load_state`] before `load_checkpoint`.
2615
-
2616
  The hook should have the following signature:
2617
-
2618
  `hook(models: list[torch.nn.Module], input_dir: str) -> None`
2619
-
2620
  The `models` argument are the models as saved in the accelerator state under `accelerator._models`, and the
2621
  `input_dir` argument is the `input_dir` argument passed to [`Accelerator.load_state`].
2622
-
2623
  <Tip>
2624
-
2625
  Should only be used in conjunction with [`Accelerator.register_save_state_pre_hook`]. Can be useful to load
2626
  configurations in addition to model weights. Can also be used to overwrite model loading with a customized
2627
  method. In this case, make sure to remove already loaded models from the models list.
2628
-
2629
  </Tip>
2630
-
2631
  Returns:
2632
  `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling
2633
  `handle.remove()`
@@ -2635,18 +2241,13 @@ class Accelerator:
2635
  handle = hooks.RemovableHandle(self._load_model_state_pre_hook)
2636
  self._load_model_state_pre_hook[handle.id] = hook
2637
  return handle
2638
-
2639
  def load_state(self, input_dir: str = None, **load_model_func_kwargs):
2640
  """
2641
  Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects.
2642
-
2643
  <Tip>
2644
-
2645
  Should only be used in conjunction with [`Accelerator.save_state`]. If a file is not registered for
2646
  checkpointing, it will not be loaded if stored in the directory.
2647
-
2648
  </Tip>
2649
-
2650
  Args:
2651
  input_dir (`str` or `os.PathLike`):
2652
  The name of the folder all relevant weights and states were saved in. Can be `None` if
@@ -2655,12 +2256,9 @@ class Accelerator:
2655
  Additional keyword arguments for loading model which can be passed to the underlying load function,
2656
  such as optional arguments for DeepSpeed's `load_checkpoint` function or a `map_location` to load the
2657
  model and optimizer on.
2658
-
2659
  Example:
2660
-
2661
  ```python
2662
  >>> from accelerate import Accelerator
2663
-
2664
  >>> accelerator = Accelerator()
2665
  >>> model, optimizer, lr_scheduler = ...
2666
  >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
@@ -2676,16 +2274,13 @@ class Accelerator:
2676
  # Pick up from automatic checkpoint naming
2677
  input_dir = os.path.join(self.project_dir, "checkpoints")
2678
  folders = [os.path.join(input_dir, folder) for folder in os.listdir(input_dir)]
2679
-
2680
  def _inner(folder):
2681
  return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0]
2682
-
2683
  folders.sort(key=_inner)
2684
  input_dir = folders[-1]
2685
  else:
2686
  raise ValueError("No input_dir provided and automatic checkpoint naming is disabled.")
2687
  logger.info(f"Loading states from {input_dir}")
2688
-
2689
  # Load the models taking care of FSDP and DeepSpeed nuances
2690
  models = []
2691
  for i, model in enumerate(self._models):
@@ -2704,7 +2299,6 @@ class Accelerator:
2704
  logger.info(f"Megatron-LM Model , Optimizer and Scheduler loaded from input dir {input_dir}")
2705
  else:
2706
  models.append(model)
2707
-
2708
  # Load the optimizers taking care of FSDP and DeepSpeed nuances
2709
  optimizers = []
2710
  if self.distributed_type == DistributedType.FSDP:
@@ -2714,7 +2308,6 @@ class Accelerator:
2714
  logger.info(f"FSDP Optimizer loaded from input dir {input_dir}")
2715
  elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
2716
  optimizers = self._optimizers
2717
-
2718
  # Load the lr schedulers taking care of DeepSpeed nuances
2719
  schedulers = []
2720
  if self.distributed_type == DistributedType.DEEPSPEED:
@@ -2724,14 +2317,11 @@ class Accelerator:
2724
  schedulers.append(scheduler)
2725
  elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
2726
  schedulers = self._schedulers
2727
-
2728
  dataloaders = self._dataloaders
2729
-
2730
  # Call model loading hooks that might have been registered with
2731
  # accelerator.register_model_state_hook
2732
  for hook in self._load_model_state_pre_hook.values():
2733
  hook(models, input_dir)
2734
-
2735
  map_location = load_model_func_kwargs.pop("map_location", None)
2736
  if map_location is None:
2737
  if self.num_processes > 1 and self.distributed_type in (
@@ -2741,7 +2331,6 @@ class Accelerator:
2741
  map_location = "on_device"
2742
  else:
2743
  map_location = "cpu"
2744
-
2745
  load_accelerator_state(
2746
  input_dir,
2747
  models,
@@ -2767,17 +2356,13 @@ class Accelerator:
2767
  logger.info(f"Loading in {len(custom_checkpoints)} custom states")
2768
  for index, obj in enumerate(self._custom_objects):
2769
  load_custom_state(obj, input_dir, index)
2770
-
2771
  def free_memory(self):
2772
  """
2773
  Will release all references to the internal objects stored and call the garbage collector. You should call this
2774
  method between two trainings with different models/optimizers. Also will reset `Accelerator.step` to 0.
2775
-
2776
  Example:
2777
-
2778
  ```python
2779
  >>> from accelerate import Accelerator
2780
-
2781
  >>> accelerator = Accelerator()
2782
  >>> model, optimizer, scheduler = ...
2783
  >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
@@ -2792,17 +2377,13 @@ class Accelerator:
2792
  self.deepspeed_engine_wrapped = None
2793
  self.step = 0
2794
  release_memory()
2795
-
2796
  def clear(self):
2797
  """
2798
  Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the
2799
  garbage collector. You should call this method between two trainings with different models/optimizers.
2800
-
2801
  Example:
2802
-
2803
  ```python
2804
  >>> from accelerate import Accelerator
2805
-
2806
  >>> accelerator = Accelerator()
2807
  >>> model, optimizer, scheduler = ...
2808
  >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
@@ -2811,7 +2392,6 @@ class Accelerator:
2811
  ```
2812
  """
2813
  self.free_memory()
2814
-
2815
  def _get_named_parameters(self, *args):
2816
  named_parameters = {}
2817
  for obj in args:
@@ -2819,7 +2399,6 @@ class Accelerator:
2819
  obj = extract_model_from_parallel(obj)
2820
  named_parameters.update({n: p for n, p in obj.named_parameters()})
2821
  return named_parameters
2822
-
2823
  def _get_devices(self, *args):
2824
  model_device = None
2825
  optimizer_device = None
@@ -2836,27 +2415,21 @@ class Accelerator:
2836
  optimizer_device = param_group["params"][0].device
2837
  break
2838
  return (model_device, optimizer_device)
2839
-
2840
  def get_state_dict(self, model, unwrap=True):
2841
  """
2842
  Returns the state dictionary of a model sent through [`Accelerator.prepare`] potentially without full
2843
  precision.
2844
-
2845
  Args:
2846
  model (`torch.nn.Module`):
2847
  A PyTorch model sent through [`Accelerator.prepare`]
2848
  unwrap (`bool`, *optional*, defaults to `True`):
2849
  Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict
2850
-
2851
  Returns:
2852
  `dict`: The state dictionary of the model potentially without full precision.
2853
-
2854
  Example:
2855
-
2856
  ```python
2857
  >>> import torch
2858
  >>> from accelerate import Accelerator
2859
-
2860
  >>> accelerator = Accelerator()
2861
  >>> net = torch.nn.Linear(2, 2)
2862
  >>> net = accelerator.prepare(net)
@@ -2876,12 +2449,10 @@ class Accelerator:
2876
  )
2877
  else:
2878
  from deepspeed.checkpoint.utils import clone_tensors_for_torch_save
2879
-
2880
  state_dict = clone_tensors_for_torch_save(self.unwrap_model(model).state_dict())
2881
  elif self.distributed_type == DistributedType.FSDP:
2882
  from torch.distributed.fsdp import FullStateDictConfig, StateDictType
2883
  from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
2884
-
2885
  full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
2886
  with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config):
2887
  state_dict = model.state_dict()
@@ -2889,27 +2460,18 @@ class Accelerator:
2889
  if unwrap:
2890
  model = self.unwrap_model(model)
2891
  state_dict = model.state_dict()
2892
-
2893
  return state_dict
2894
-
2895
  def register_for_checkpointing(self, *objects):
2896
  """
2897
  Makes note of `objects` and will save or load them in during `save_state` or `load_state`.
2898
-
2899
  These should be utilized when the state is being loaded or saved in the same script. It is not designed to be
2900
  used in different scripts.
2901
-
2902
  <Tip>
2903
-
2904
  Every `object` must have a `load_state_dict` and `state_dict` function to be stored.
2905
-
2906
  </Tip>
2907
-
2908
  Example:
2909
-
2910
  ```python
2911
  >>> from accelerate import Accelerator
2912
-
2913
  >>> accelerator = Accelerator()
2914
  >>> # Assume `CustomObject` has a `state_dict` and `load_state_dict` function.
2915
  >>> obj = CustomObject()
@@ -2927,21 +2489,16 @@ class Accelerator:
2927
  err += f"\n\t- Item at index {index}, `{get_pretty_name(obj)}`"
2928
  raise ValueError(err)
2929
  self._custom_objects.extend(objects)
2930
-
2931
  @contextmanager
2932
  def autocast(self, cache_enabled: bool = False, autocast_handler: AutocastKwargs = None):
2933
  """
2934
  Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing
2935
  different will happen otherwise.
2936
-
2937
  A different `autocast_handler` can be passed in to override the one set in the `Accelerator` object. This is
2938
  useful in blocks under `autocast` where you want to revert to fp32.
2939
-
2940
  Example:
2941
-
2942
  ```python
2943
  >>> from accelerate import Accelerator
2944
-
2945
  >>> accelerator = Accelerator(mixed_precision="fp16")
2946
  >>> with accelerator.autocast():
2947
  ... train()
@@ -2963,7 +2520,6 @@ class Accelerator:
2963
  autocast_context.__enter__()
2964
  yield
2965
  autocast_context.__exit__(*sys.exc_info())
2966
-
2967
  @property
2968
  def optimizer_step_was_skipped(self):
2969
  """
@@ -2974,20 +2530,15 @@ class Accelerator:
2974
  if optimizer.step_was_skipped:
2975
  return True
2976
  return False
2977
-
2978
  def skip_first_batches(self, dataloader, num_batches: int = 0):
2979
  """
2980
  Creates a new `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
2981
-
2982
  Args:
2983
  dataloader (`torch.utils.data.DataLoader`): The data loader in which to skip batches.
2984
  num_batches (`int`, *optional*, defaults to 0): The number of batches to skip
2985
-
2986
  Example:
2987
-
2988
  ```python
2989
  >>> from accelerate import Accelerator
2990
-
2991
  >>> accelerator = Accelerator()
2992
  >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
2993
  >>> skipped_dataloader = accelerator.skip_first_batches(dataloader, num_batches=2)
@@ -2998,7 +2549,6 @@ class Accelerator:
2998
  ... loss = loss_func(output, target)
2999
  ... accelerator.backward(loss)
3000
  ... optimizer.step()
3001
-
3002
  >>> # subsequent epochs
3003
  >>> for input, target in dataloader:
3004
  ... optimizer.zero_grad()
@@ -3006,11 +2556,9 @@ class Accelerator:
3006
  ```
3007
  """
3008
  return skip_first_batches(dataloader, num_batches=num_batches)
3009
-
3010
  def __deepcopy__(self, memo):
3011
  logger.info("Deep copying the `Accelerator` object, note that this will point to the same original object.")
3012
  return self
3013
-
3014
  def verify_device_map(self, model: torch.nn.Module) -> bool:
3015
  """
3016
  Verifies that `model` has not been prepared with big model inference with a device-map resembling `auto`.
@@ -3019,5 +2567,4 @@ class Accelerator:
3019
  for m in model.modules():
3020
  if hasattr(m, "hf_device_map") and len(m.hf_device_map) > 1:
3021
  return True
3022
-
3023
  return False
 
 
1
  logger = get_logger(__name__)
 
 
2
  class Accelerator:
3
  """
4
  Creates an instance of an accelerator for distributed training (on multi-GPU, TPU) or mixed precision training.
 
5
  Args:
6
  device_placement (`bool`, *optional*, defaults to `True`):
7
  Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model,
 
35
  rng_types (list of `str` or [`~utils.RNGType`]):
36
  The list of random number generators to synchronize at the beginning of each iteration in your prepared
37
  dataloaders. Should be one or several of:
 
38
  - `"torch"`: the base torch random number generator
39
  - `"cuda"`: the CUDA random number generator (GPU only)
40
  - `"xla"`: the XLA random number generator (TPU only)
41
  - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your
42
  dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.
 
43
  Will default to `["torch"]` for PyTorch versions <=1.5.1 and `["generator"]` for PyTorch versions >= 1.6.
44
  log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):
45
  A list of loggers to be setup for experiment tracking. Should be one or several of:
 
46
  - `"all"`
47
  - `"tensorboard"`
48
  - `"wandb"`
 
73
  gradient_accumulation_plugin (`GradientAccumulationPlugin`, *optional*):
74
  A configuration for how gradient accumulation should be handled, if more tweaking than just the
75
  `gradient_accumulation_steps` is needed.
 
76
  **Available attributes:**
 
77
  - **device** (`torch.device`) -- The device to use.
78
  - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration.
79
  - **local_process_index** (`int`) -- The process index on the current machine.
 
121
  raise ValueError(
122
  f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}"
123
  )
 
124
  dynamo_plugin = TorchDynamoPlugin() if dynamo_backend is None else TorchDynamoPlugin(backend=dynamo_backend)
 
125
  if deepspeed_plugin is None: # init from env variables
126
  deepspeed_plugin = (
127
  DeepSpeedPlugin() if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" else None
 
136
  raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.")
137
  if compare_versions("deepspeed", "<", "0.9.3"):
138
  raise ImportError("DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.")
 
139
  mixed_precision = (
140
  os.environ.get("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision
141
  )
142
  deepspeed_plugin.set_mixed_precision(mixed_precision)
143
  deepspeed_plugin.set_deepspeed_weakref()
 
144
  if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" or isinstance(
145
  fsdp_plugin, FullyShardedDataParallelPlugin
146
  ):
147
  if is_torch_version("<", FSDP_PYTORCH_VERSION):
148
  raise ValueError(f"FSDP requires PyTorch >= {FSDP_PYTORCH_VERSION}")
 
149
  if fsdp_plugin is None: # init from env variables
150
  fsdp_plugin = (
151
  FullyShardedDataParallelPlugin() if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" else None
 
154
  if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):
155
  raise TypeError("`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.")
156
  os.environ["ACCELERATE_USE_FSDP"] = "true" # use FSDP if plugin is provided
 
157
  if megatron_lm_plugin is None: # init from env variables
158
  megatron_lm_plugin = (
159
  MegatronLMPlugin() if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" else None
 
162
  if not isinstance(megatron_lm_plugin, MegatronLMPlugin):
163
  raise TypeError("`megatron_lm_plugin` must be a MegatronLMPlugin object.")
164
  os.environ["ACCELERATE_USE_MEGATRON_LM"] = "true" # use MegatronLM if plugin is provided
 
165
  if megatron_lm_plugin:
166
  if not is_megatron_lm_available():
167
  raise ImportError("Megatron is not installed. please build it from source.")
 
168
  # Kwargs handlers
169
  self.ddp_handler = None
170
  self.scaler_handler = None
 
203
  self.autocast_handler = handler
204
  if self.fp8_recipe_handler is None and mixed_precision == "fp8":
205
  self.fp8_recipe_handler = FP8RecipeKwargs()
 
206
  kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {}
207
  self.state = AcceleratorState(
208
  mixed_precision=mixed_precision,
 
214
  _from_accelerator=True,
215
  **kwargs,
216
  )
 
217
  trackers = filter_trackers(log_with, self.logging_dir)
218
  if len(trackers) < 1 and log_with is not None:
219
  warnings.warn(f"`log_with={log_with}` was passed but no supported trackers are currently installed.")
220
  self.log_with = trackers
 
221
  if (
222
  (mixed_precision != "bf16")
223
  and getattr(self.state, "downcast_bfloat", False)
224
  and (self.state.distributedType != DistributedType.TPU)
225
  ):
226
  raise ValueError("Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU")
 
227
  if gradient_accumulation_plugin is not None:
228
  if gradient_accumulation_steps != 1:
229
  raise ValueError(
 
242
  raise ValueError(
243
  "Gradient accumulation is not supported on TPU. Please set `gradient_accumulation_steps` to 1 and don't pass in a `GradientAccumulationPlugin` object."
244
  )
 
245
  self.device_placement = device_placement
246
  self.split_batches = split_batches
247
  self.dispatch_batches = dispatch_batches
248
  self.even_batches = even_batches
249
  self.step_scheduler_with_optimizer = step_scheduler_with_optimizer
 
250
  # Mixed precision attributes
251
  self.scaler = None
252
  self.native_amp = False
 
262
  kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}
263
  if self.distributed_type == DistributedType.FSDP:
264
  from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
 
265
  self.scaler = ShardedGradScaler(**kwargs)
266
  elif is_npu_available():
267
  self.scaler = torch.npu.amp.GradScaler(**kwargs)
268
  else:
269
  self.scaler = torch.cuda.amp.GradScaler(**kwargs)
 
270
  elif self.state.mixed_precision == "bf16" and self.distributed_type not in (
271
  DistributedType.DEEPSPEED,
272
  DistributedType.MEGATRON_LM,
 
277
  self.native_amp = is_bf16_available(True)
278
  if mixed_precision == "bf16" and not self.native_amp and not is_tpu_available():
279
  raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device."))
 
280
  # Start of internal step tracking
281
  self.step = 0
 
282
  # Internal references to the training objects
283
  self._optimizers = []
284
  self._models = []
285
  self._schedulers = []
286
  self._dataloaders = []
287
  self._custom_objects = []
 
288
  # Hooks
289
  self._load_model_state_pre_hook = OrderedDict()
290
  self._save_model_state_pre_hook = OrderedDict()
 
291
  # RNG Types
292
  self.rng_types = rng_types
293
  if self.rng_types is None:
294
  self.rng_types = ["generator"]
 
295
  # Set a flag tensor for early stopping and other breakpoints
296
  self.flag_tensor = None
 
297
  check_os_kernel()
 
298
  @property
299
  def use_distributed(self):
300
  """
301
  Whether the Accelerator is configured for distributed training
302
  """
303
  return self.state.use_distributed
 
304
  @property
305
  def distributed_type(self):
306
  return self.state.distributed_type
 
307
  @property
308
  def num_processes(self):
309
  return self.state.num_processes
 
310
  @property
311
  def process_index(self):
312
  return self.state.process_index
 
313
  @property
314
  def local_process_index(self):
315
  return self.state.local_process_index
 
316
  @property
317
  def device(self):
318
  return self.state.device
 
319
  @property
320
  def project_dir(self):
321
  return self.project_configuration.project_dir
 
322
  @property
323
  def logging_dir(self):
324
  return self.project_configuration.logging_dir
 
325
  @property
326
  def save_iteration(self):
327
  return self.project_configuration.iteration
 
328
  @property
329
  def is_main_process(self):
330
  """True for one process only."""
331
  return self.state.is_main_process
 
332
  @property
333
  def is_local_main_process(self):
334
  """True for one process per server."""
335
  return self.state.is_local_main_process
 
336
  @property
337
  def use_fp16(self):
338
  warnings.warn(
 
341
  FutureWarning,
342
  )
343
  return self.mixed_precision != "no"
 
344
  @property
345
  def is_last_process(self):
346
  return self.process_index == self.num_processes - 1
 
347
  @property
348
  def mixed_precision(self):
349
  return self.state.mixed_precision
 
350
  @contextmanager
351
  def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
352
  """
353
  Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
354
  distributed inference, such as with different prompts.
 
355
  Note that when using a `dict`, all keys need to have the same number of elements.
 
356
  Args:
357
  inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
358
  The input to split between processes.
 
361
  number of elements. Useful when trying to perform actions such as `Accelerator.gather()` on the outputs
362
  or passing in less inputs than there are processes. If so, just remember to drop the padded elements
363
  afterwards.
 
364
  Example:
 
365
  ```python
366
  # Assume there are two processes
367
  from accelerate import Accelerator
 
368
  accelerator = Accelerator()
369
  with accelerator.split_between_processes(["A", "B", "C"]) as inputs:
370
  print(inputs)
 
372
  ["A", "B"]
373
  # Process 1
374
  ["C"]
 
375
  with accelerator.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
376
  print(inputs)
377
  # Process 0
 
382
  """
383
  with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
384
  yield inputs
 
385
  def on_main_process(self, function: Callable[..., Any] = None):
386
  """
387
  A decorator that will run the decorated function on the main process only. Can also be called using the
388
  `PartialState` class.
 
389
  Args:
390
  function (`Callable`): The function to decorate.
 
391
  Example:
 
392
  ```python
393
  >>> from accelerate import Accelerator
 
394
  >>> accelerator = Accelerator()
 
 
395
  >>> @accelerator.on_main_process
396
  ... def print_something():
397
  ... print("This will be printed by process 0 only.")
 
 
398
  >>> print_something()
399
  "This will be printed by process 0 only"
400
  ```
 
407
  raise ValueError(
408
  "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
409
  )
 
410
  def _inner(*args, **kwargs):
411
  return PartialState().on_main_process(function)(*args, **kwargs)
 
412
  return _inner
 
413
  def on_local_main_process(self, function: Callable[..., Any] = None):
414
  """
415
  A decorator that will run the decorated function on the local main process only. Can also be called using the
416
  `PartialState` class.
 
417
  Args:
418
  function (`Callable`): The function to decorate.
 
419
  Example:
420
  ```python
421
  # Assume we have 2 servers with 4 processes each.
422
  from accelerate import Accelerator
 
423
  accelerator = Accelerator()
 
 
424
  @accelerator.on_local_main_process
425
  def print_something():
426
  print("This will be printed by process 0 only on each server.")
 
 
427
  print_something()
428
  # On server 1:
429
  "This will be printed by process 0 only"
 
439
  raise ValueError(
440
  "The `on_local_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
441
  )
 
442
  def _inner(*args, **kwargs):
443
  return PartialState().on_local_main_process(function)(*args, **kwargs)
 
444
  return _inner
 
445
  def on_last_process(self, function: Callable[..., Any]):
446
  """
447
  A decorator that will run the decorated function on the last process only. Can also be called using the
448
  `PartialState` class.
 
449
  Args:
450
  function (`Callable`): The function to decorate.
 
451
  Example:
452
  ```python
453
  # Assume we have 4 processes.
454
  from accelerate import Accelerator
 
455
  accelerator = Accelerator()
 
 
456
  @accelerator.on_last_process
457
  def print_something():
458
  print(f"Printed on process {accelerator.process_index}")
 
 
459
  print_something()
460
  "Printed on process 3"
461
  ```
 
468
  raise ValueError(
469
  "The `on_last_process` decorator must be called with a function on an instantiated `Accelerator` object."
470
  )
 
471
  def _inner(*args, **kwargs):
472
  return PartialState().on_last_process(function)(*args, **kwargs)
 
473
  return _inner
 
474
  def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
475
  """
476
  A decorator that will run the decorated function on a given process index only. Can also be called using the
477
  `PartialState` class.
 
478
  Args:
479
  function (`Callable`, `optional`):
480
  The function to decorate.
481
  process_index (`int`, `optional`):
482
  The index of the process on which to run the function.
 
483
  Example:
484
  ```python
485
  # Assume we have 4 processes.
486
  from accelerate import Accelerator
 
487
  accelerator = Accelerator()
 
 
488
  @accelerator.on_process(process_index=2)
489
  def print_something():
490
  print(f"Printed on process {accelerator.process_index}")
 
 
491
  print_something()
492
  "Printed on process 2"
493
  ```
 
503
  raise ValueError(
504
  "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
505
  )
 
506
  def _inner(*args, **kwargs):
507
  return PartialState().on_process(function, process_index)(*args, **kwargs)
 
508
  return _inner
 
509
  def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
510
  """
511
  A decorator that will run the decorated function on a given local process index only. Can also be called using
512
  the `PartialState` class.
 
513
  Args:
514
  function (`Callable`, *optional*):
515
  The function to decorate.
516
  local_process_index (`int`, *optional*):
517
  The index of the local process on which to run the function.
 
518
  Example:
519
  ```python
520
  # Assume we have 2 servers with 4 processes each.
521
  from accelerate import Accelerator
 
522
  accelerator = Accelerator()
 
 
523
  @accelerator.on_local_process(local_process_index=2)
524
  def print_something():
525
  print(f"Printed on process {accelerator.local_process_index}")
 
 
526
  print_something()
527
  # On server 1:
528
  "Printed on process 2"
 
541
  raise ValueError(
542
  "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
543
  )
 
544
  def _inner(*args, **kwargs):
545
  return PartialState().on_local_process(function, local_process_index)(*args, **kwargs)
 
546
  return _inner
 
547
  @contextmanager
548
  def main_process_first(self):
549
  """
550
  Lets the main process go first inside a with block.
 
551
  The other processes will enter the with block after the main process exits.
 
552
  Example:
 
553
  ```python
554
  >>> from accelerate import Accelerator
 
555
  >>> accelerator = Accelerator()
556
  >>> with accelerator.main_process_first():
557
  ... # This will be printed first by process 0 then in a seemingly
 
561
  """
562
  with self.state.main_process_first():
563
  yield
 
564
  @contextmanager
565
  def local_main_process_first(self):
566
  """
567
  Lets the local main process go inside a with block.
 
568
  The other processes will enter the with block after the main process exits.
 
569
  Example:
 
570
  ```python
571
  >>> from accelerate import Accelerator
 
572
  >>> accelerator = Accelerator()
573
  >>> with accelerator.local_main_process_first():
574
  ... # This will be printed first by local process 0 then in a seemingly
 
578
  """
579
  with self.state.local_main_process_first():
580
  yield
 
581
  @contextmanager
582
  def no_sync(self, model):
583
  """
584
  A context manager to disable gradient synchronizations across DDP processes by calling
585
  `torch.nn.parallel.DistributedDataParallel.no_sync`.
 
586
  If `model` is not in DDP, this context manager does nothing
 
587
  Args:
588
  model (`torch.nn.Module`):
589
  PyTorch Module that was prepared with `Accelerator.prepare`
 
590
  Example:
 
591
  ```python
592
  >>> from accelerate import Accelerator
 
593
  >>> accelerator = Accelerator()
594
  >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer)
595
  >>> input_a = next(iter(dataloader))
596
  >>> input_b = next(iter(dataloader))
 
597
  >>> with accelerator.no_sync():
598
  ... outputs = model(input_a)
599
  ... loss = loss_func(outputs)
 
609
  context = contextlib.nullcontext
610
  if self.use_distributed:
611
  context = getattr(model, "no_sync", context)
 
612
  with context():
613
  yield
 
614
  @staticmethod
615
  @contextmanager
616
  def trigger_sync_in_backward(model):
617
  """Trigger the sync of the gradients in the next backward pass of the model after multiple forward passes under
618
  `Accelerator.no_sync` (only applicable in multi-GPU scenarios).
 
619
  If the script is not launched in distributed mode, this context manager does nothing.
 
620
  Args:
621
  model (`torch.nn.Module`):
622
  The model for which to trigger the gradient synchronization.
 
623
  Example:
 
624
  ```python
625
  >>> from accelerate import Accelerator
 
626
  >>> accelerator = Accelerator()
627
  >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer)
 
628
  >>> with accelerator.no_sync():
629
  ... loss_a = loss_func(model(input_a)) # first forward pass
630
  ... loss_b = loss_func(model(input_b)) # second forward pass
 
638
  if not isinstance(model, torch.nn.parallel.DistributedDataParallel):
639
  yield
640
  return
 
641
  old_require_backward_grad_sync = model.require_backward_grad_sync
642
  old_require_forward_param_sync = model.require_forward_param_sync
 
643
  # EXPERIMENTAL: This will force grad sync during `backward()`, but it is unknown if it breaks other DDP features.
644
  # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/nn/parallel/distributed.py#L1453-L1466
645
  model.require_backward_grad_sync = True
 
651
  finally:
652
  model.require_backward_grad_sync = old_require_backward_grad_sync
653
  model.require_forward_param_sync = old_require_forward_param_sync
 
654
  def _do_sync(self):
655
  "Sets the right `sync_gradients` context and either resets or increases `self.step`"
656
  if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader:
 
659
  else:
660
  self.step += 1
661
  self.gradient_state._set_sync_gradients((self.step % self.gradient_state.num_steps) == 0)
 
662
  @property
663
  def sync_gradients(self):
664
  return self.gradient_state.sync_gradients
 
665
  @sync_gradients.setter
666
  def sync_gradients(self, sync_gradients):
667
  self.gradient_state.sync_gradients = sync_gradients
 
668
  @property
669
  def gradient_accumulation_steps(self):
670
  return self.gradient_state.num_steps
 
671
  @gradient_accumulation_steps.setter
672
  def gradient_accumulation_steps(self, gradient_accumulation_steps):
673
  self.gradient_state.plugin_kwargs.update({"num_steps": gradient_accumulation_steps})
 
674
  @contextmanager
675
  def accumulate(self, *models):
676
  """
677
  A context manager that will lightly wrap around and perform gradient accumulation automatically
 
678
  Args:
679
  *models (list of `torch.nn.Module`):
680
  PyTorch Modules that were prepared with `Accelerator.prepare`. Models passed to `accumulate()` will
681
  skip gradient syncing during backward pass in distributed training
 
682
  Example:
 
683
  ```python
684
  >>> from accelerate import Accelerator
 
685
  >>> accelerator = Accelerator(gradient_accumulation_steps=1)
686
  >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
 
687
  >>> for input, output in dataloader:
688
  ... with accelerator.accumulate(model):
689
  ... outputs = model(input)
 
699
  for m in models:
700
  cm_stack.enter_context(contextlib.nullcontext() if self.sync_gradients else self.no_sync(m))
701
  yield
 
702
  @contextmanager
703
  def join_uneven_inputs(self, joinables, even_batches=None):
704
  """
705
  A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper
706
  around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the
707
  length of the dataset.
 
708
  Args:
709
  joinables (`list[torch.distributed.algorithms.Joinable]`):
710
  A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a
 
712
  even_batches (`bool`, *optional*)
713
  If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,
714
  the default `Accelerator` value wil be used.
 
715
  <Tip warning={true}>
 
716
  `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other
717
  configuration, this method will have no effect.
 
718
  </Tip>
 
719
  <Tip warning={true}>
 
720
  Overidding `even_batches` will not affect iterable-style data loaders.
 
721
  </Tip>
 
722
  Example:
 
723
  ```python
724
  >>> from accelerate import Accelerator
 
725
  >>> accelerator = Accelerator(even_batches=True)
726
  >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)
 
727
  >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):
728
  ... for input, output in dataloader:
729
  ... outputs = model(input)
 
735
  """
736
  if self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU):
737
  dl_even_batches_values = []
 
738
  if even_batches is not None:
739
  iterable_dl_seen = False
740
  # override value in batch sampler for map-style datasets
 
744
  continue
745
  dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches))
746
  dl.batch_sampler.even_batches = even_batches
 
747
  if iterable_dl_seen:
748
  warnings.warn(
749
  "Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable"
750
  )
751
  else:
752
  even_batches = self.even_batches
 
753
  enable_join = False if even_batches else True
754
  try:
755
  with Join(joinables, enable=enable_join, throw_on_early_termination=False):
 
764
  warnings.warn(
765
  "Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect."
766
  )
 
767
  with contextlib.nullcontext(joinables):
768
  yield
 
769
  def print(self, *args, **kwargs):
770
  """
771
  Drop in replacement of `print()` to only print once per server.
 
772
  Example:
 
773
  ```python
774
  >>> from accelerate import Accelerator
 
775
  >>> accelerator = Accelerator()
776
  >>> accelerator.print("Hello world!")
777
  ```
778
  """
779
  self.state.print(*args, **kwargs)
 
780
  def _prepare_one(self, obj, first_pass=False, device_placement=None):
781
  # First pass of preparation: DataLoader, model, optimizer
782
  if first_pass:
 
793
  return scheduler
794
  # Return the unprocessed object if previous criteria was not met
795
  return obj
 
796
  def prepare(self, *args, device_placement=None):
797
  """
798
  Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same
799
  order.
 
800
  Args:
801
  *args (list of objects):
802
  Any of the following type of objects:
 
803
  - `torch.utils.data.DataLoader`: PyTorch Dataloader
804
  - `torch.nn.Module`: PyTorch Module
805
  - `torch.optim.Optimizer`: PyTorch Optimizer
806
  - `torch.optim.lr_scheduler.LRScheduler`: PyTorch LR Scheduler
 
807
  device_placement (`list[bool]`, *optional*):
808
  Used to customize whether automatic device placement should be performed for each object passed. Needs
809
  to be a list of the same length as `args`. Not compatible with DeepSpeed or FSDP.
 
810
  <Tip>
 
811
  You don't need to prepare a model if you only use it for inference without any kind of mixed precision
 
812
  </Tip>
 
813
  Examples:
 
814
  ```python
815
  >>> from accelerate import Accelerator
 
816
  >>> accelerator = Accelerator()
817
  >>> # Assume a model, optimizer, data_loader and scheduler are defined
818
  >>> model, optimizer, data_loader, scheduler = accelerator.prepare(model, optimizer, data_loader, scheduler)
819
  ```
 
820
  ```python
821
  >>> from accelerate import Accelerator
 
822
  >>> accelerator = Accelerator()
823
  >>> # Assume a model, optimizer, data_loader and scheduler are defined
824
  >>> device_placement = [True, True, False, False]
 
836
  raise ValueError(
837
  f"`device_placement` should be a list with {len(args)} elements (the number of objects passed)."
838
  )
 
839
  for obj in args:
840
  # TODO: Look at enabling native TP training directly with a proper config
841
  if (
 
848
  "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode."
849
  " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`."
850
  )
 
851
  if self.distributed_type == DistributedType.DEEPSPEED:
852
  model_count = 0
853
  for obj in args:
 
857
  raise AssertionError(
858
  "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed"
859
  )
 
860
  # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will
861
  # have parameters disconnected from the model (so no training :-( ).
862
  # If the model and optimizer have parameters on different devices we raise an error.
 
870
  "the flag default value for `device_placement` in your `Accelerator` to let it handle that "
871
  "part for you."
872
  )
 
873
  # If we're dealing with device placement, this deals with that by...
874
  tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.TPU
875
  if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"):
876
  # 1. grabbing old model parameters
877
  old_named_params = self._get_named_parameters(*args)
 
878
  if self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
879
  if self.device.type == "cpu" and self.state.use_ipex:
880
  args = self._prepare_ipex(*args)
 
893
  self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement)
894
  )
895
  result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement))
 
896
  if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"):
897
  # 2. grabbing new model parameters
898
  new_named_params = self._get_named_parameters(*result)
 
902
  for obj in result:
903
  if isinstance(obj, torch.optim.Optimizer):
904
  obj._switch_parameters(mapping)
 
905
  for item in result:
906
  if any(
907
  item in container
908
  for container in (self._dataloaders, self._models, self._optimizers, self._schedulers)
909
  ):
910
  setattr(item, "_is_accelerate_prepared", True)
 
911
  return result if len(result) > 1 else result[0]
 
912
  def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, evaluation_mode: bool = False):
913
  """
914
  Prepares a PyTorch model for training in any distributed setup. It is recommended to use
915
  [`Accelerator.prepare`] instead.
 
916
  Args:
917
  model (`torch.nn.Module`):
918
  A PyTorch model to prepare. You don't need to prepare a model if it is used only for inference without
 
922
  evaluation_mode (`bool`, *optional*, defaults to `False`):
923
  Whether or not to set the model for evaluation only, by just applying mixed precision and
924
  `torch.compile` (if configured in the `Accelerator` object).
 
925
  Example:
 
926
  ```python
927
  >>> from accelerate import Accelerator
 
928
  >>> accelerator = Accelerator()
929
  >>> # Assume a model is defined
930
  >>> model = accelerator.prepare_model(model)
 
933
  if device_placement is None:
934
  device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP
935
  self._models.append(model)
 
936
  # TODO: Look at enabling native TP training directly with a proper config
937
  if (
938
  self.verify_device_map(model)
 
943
  "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode."
944
  " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`."
945
  )
 
946
  if self.native_amp:
947
  model._original_forward = model.forward
948
  model_forward_func = model.forward.__func__ if hasattr(model.forward, "__func__") else model.forward
 
959
  convert_model(model)
960
  model._converted_to_transformer_engine = True
961
  model._original_forward = model.forward
 
962
  kwargs = self.fp8_recipe_handler.to_kwargs() if self.fp8_recipe_handler is not None else {}
963
  if "fp8_format" in kwargs:
964
  kwargs["fp8_format"] = getattr(te_recipe.Format, kwargs["fp8_format"])
965
  fp8_recipe = te_recipe.DelayedScaling(**kwargs)
966
  model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe)(model.forward)
 
967
  if (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)) and getattr(
968
  model, "hf_device_map", False
969
  ):
 
976
  )
977
  current_device = list(model_devices)[0]
978
  current_device_index = current_device.index if isinstance(current_device, torch.device) else current_device
 
979
  if torch.device(current_device_index) != self.device:
980
  # if on the first device (GPU 0) we don't care
981
  if (self.device.index is not None) or (current_device_index != 0):
 
983
  "You can't train a model that has been loaded in 8-bit precision on a different device than the one "
984
  "you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device() or device_map={'':torch.xpu.current_device()}"
985
  )
 
986
  if "cpu" in model_devices or "disk" in model_devices:
987
  raise ValueError(
988
  "You can't train a model that has been loaded in 8-bit precision with CPU or disk offload."
 
1002
  device_ids, output_device = [self.local_process_index], self.local_process_index
1003
  else:
1004
  device_ids, output_device = None, None
 
1005
  model = torch.nn.parallel.DistributedDataParallel(
1006
  model, device_ids=device_ids, output_device=output_device, **kwargs
1007
  )
1008
  elif self.distributed_type == DistributedType.FSDP:
1009
  from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
 
1010
  # Check if the model is already a FSDP model due to `Manual Wrapping` and if so,
1011
  # don't wrap it again
1012
  # In case the model is already compiled using PyTorch 2.0 and the wrapped model in it
 
1014
  is_type_fsdp = isinstance(model, FSDP) or (
1015
  is_compiled_module(model) and isinstance(model._orig_mod, FSDP)
1016
  )
 
1017
  if not is_type_fsdp:
1018
  self.state.fsdp_plugin.set_auto_wrap_policy(model)
1019
  fsdp_plugin = self.state.fsdp_plugin
 
1038
  apply_activation_checkpointing,
1039
  checkpoint_wrapper,
1040
  )
 
1041
  apply_activation_checkpointing(
1042
  model,
1043
  checkpoint_wrapper_fn=functools.partial(
 
1061
  raise ValueError("Using `torch.compile` requires PyTorch 2.0 or higher.")
1062
  model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs())
1063
  return model
 
1064
  def _prepare_deepspeed(self, *args):
1065
  import deepspeed
 
1066
  deepspeed_plugin = self.state.deepspeed_plugin
 
1067
  is_dataloader_present = any(isinstance(obj, torch.utils.data.DataLoader) for obj in args)
1068
  result = [
1069
  self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj
1070
  for obj in args
1071
  ]
 
1072
  if deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] == "auto":
1073
  if is_dataloader_present:
1074
  batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")]
 
1080
  )
1081
  if self.split_batches:
1082
  batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]
 
1083
  batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)
1084
  if len(batch_sizes) > 1:
1085
  logger.info(
 
1095
  )
1096
  else:
1097
  batch_size_per_device = deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"]
 
1098
  # handle `gradient_accumulation_steps` when the value is `auto`
1099
  deepspeed_plugin.fill_match(
1100
  "gradient_accumulation_steps",
1101
  must_match=False,
1102
  gradient_accumulation_steps=self.gradient_accumulation_steps,
1103
  )
 
1104
  config_kwargs = {
1105
  "train_micro_batch_size_per_gpu": batch_size_per_device,
1106
  "train_batch_size": batch_size_per_device
 
1109
  "gradient_clipping": 1.0,
1110
  "zero_optimization.stage3_gather_16bit_weights_on_model_save": False,
1111
  }
 
1112
  model = None
1113
  optimizer = None
1114
  scheduler = None
 
1121
  type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES
1122
  ):
1123
  scheduler = obj
 
1124
  if optimizer is not None:
1125
  if "optimizer" in deepspeed_plugin.deepspeed_config and not isinstance(optimizer, (DummyOptim)):
1126
  raise ValueError(
 
1132
  raise ValueError(
1133
  "You cannot create a `DummyOptim` without specifying an optimizer in the config file."
1134
  )
 
1135
  if isinstance(optimizer, (torch.optim.Optimizer)):
1136
  deepspeed_plugin.deepspeed_config["zero_allow_untested_optimizer"] = True
 
1137
  if scheduler is not None:
1138
  if "scheduler" in deepspeed_plugin.deepspeed_config and not isinstance(scheduler, (DummyScheduler)):
1139
  raise ValueError(
 
1150
  "Either specify a scheduler in the config file or "
1151
  "pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`."
1152
  )
 
1153
  if optimizer is not None and scheduler is not None:
1154
  if isinstance(optimizer, (DummyOptim)) and not isinstance(scheduler, (DummyScheduler)):
1155
  raise ValueError(
1156
  "You can only specify `accelerate.utils.DummyScheduler` in the code when using "
1157
  "`accelerate.utils.DummyOptim`."
1158
  )
 
1159
  if model is not None:
1160
  if hasattr(model, "config"):
1161
  hidden_size = (
 
1171
  "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
1172
  }
1173
  )
 
1174
  if isinstance(optimizer, (DummyOptim)):
1175
  config_kwargs.update(
1176
  {"optimizer.params.lr": optimizer.lr, "optimizer.params.weight_decay": optimizer.weight_decay}
 
1207
  "device", "none"
1208
  ) != "none" and self.deepspeed_config.get("zero_force_ds_cpu_optimizer", True):
1209
  from deepspeed.ops.adam import DeepSpeedCPUAdam
 
1210
  defaults = {k: v for k, v in optimizer.defaults.items() if k in ["lr", "weight_decay"]}
1211
  optimizer = DeepSpeedCPUAdam(optimizer.param_groups, **defaults)
1212
  kwargs["optimizer"] = optimizer
 
1216
  or type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES
1217
  ):
1218
  kwargs["lr_scheduler"] = scheduler
 
1219
  engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs)
1220
  if optimizer is not None:
1221
  optimizer = DeepSpeedOptimizerWrapper(optimizer)
 
1229
  )
1230
  else:
1231
  scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer)
 
1232
  for i in range(len(result)):
1233
  if isinstance(result[i], torch.nn.Module):
1234
  result[i] = engine
 
1250
  "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed"
1251
  )
1252
  return tuple(result)
 
1253
  def _prepare_megatron_lm(self, *args):
1254
  megatron_lm_plugin = self.state.megatron_lm_plugin
1255
  if not megatron_lm_plugin.megatron_dataset_flag:
 
1258
  raise ValueError(
1259
  "You must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM."
1260
  )
 
1261
  micro_batch_size = min(batch_sizes) if megatron_lm_plugin.is_train_batch_min else max(batch_sizes)
1262
  if len(batch_sizes) > 1:
1263
  logger.info(
 
1269
  if isinstance(obj, MegatronLMDummyDataLoader):
1270
  micro_batch_size = obj.dataset_args["micro_batch_size"]
1271
  break
 
1272
  dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree)
1273
  megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree)
 
1274
  model = None
1275
  optimizer = None
1276
  scheduler = None
 
1285
  optimizer = obj
1286
  elif isinstance(obj, (LRScheduler, MegatronLMDummyScheduler)):
1287
  scheduler = obj
 
1288
  if model is not None:
1289
  megatron_lm_plugin.set_network_size_args(model, batch_data)
1290
  if optimizer is not None:
 
1296
  "You can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead."
1297
  )
1298
  megatron_lm_plugin.set_scheduler_args(scheduler)
 
1299
  # initialize megatron-lm
1300
  megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args)
1301
  counter = 0
 
1312
  counter += 1
1313
  else:
1314
  result.append(obj)
 
1315
  if model is not None:
1316
  model = megatron_lm_prepare_model(self)
1317
  if optimizer is not None:
1318
  optimizer = megatron_lm_prepare_optimizer(self, model)
1319
  if scheduler is not None:
1320
  scheduler = megatron_lm_prepare_scheduler(self, optimizer, scheduler)
 
1321
  if model is not None:
1322
  model = MegatronEngine(self, model, optimizer, scheduler)
1323
  if optimizer is not None:
1324
  optimizer = MegatronLMOptimizerWrapper(optimizer)
1325
  if scheduler is not None:
1326
  scheduler = MegatronLMSchedulerWrapper(scheduler, optimizer)
 
1327
  for i in range(len(result)):
1328
  if isinstance(result[i], torch.nn.Module):
1329
  result[i] = model
 
1342
  "You can't use same `Accelerator()` instance with multiple models when using Megatron-LM"
1343
  )
1344
  return tuple(result)
 
1345
  def _prepare_ipex(self, *args):
1346
  if not is_ipex_available():
1347
  raise ImportError(
 
1350
  )
1351
  else:
1352
  import intel_extension_for_pytorch as ipex
 
1353
  model = None
1354
  optimizer = None
1355
  result = [obj for obj in args]
 
1373
  elif isinstance(result[i], (torch.optim.Optimizer)):
1374
  result[i] = optimizer
1375
  return tuple(result)
 
1376
  def _prepare_msamp(self, *args):
1377
  if not is_msamp_available():
1378
  raise ImportError(
 
1381
  )
1382
  else:
1383
  import msamp
 
1384
  model, optimizer = None, None
1385
  num_models, num_optimizers = 0, 0
1386
  result = [obj for obj in args]
 
1407
  elif isinstance(result[i], (torch.optim.Optimizer)):
1408
  result[i] = optimizer
1409
  return tuple(result)
 
1410
  def prepare_data_loader(
1411
  self, data_loader: torch.utils.data.DataLoader, device_placement=None, slice_fn_for_dispatch=None
1412
  ):
1413
  """
1414
  Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use
1415
  [`Accelerator.prepare`] instead.
 
1416
  Args:
1417
  data_loader (`torch.utils.data.DataLoader`):
1418
  A vanilla PyTorch DataLoader to prepare
 
1423
  If passed, this function will be used to slice tensors across `num_processes`. Will default to
1424
  [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will
1425
  be ignored otherwise.
 
1426
  Example:
 
1427
  ```python
1428
  >>> import torch
1429
  >>> from accelerate import Accelerator
 
1430
  >>> accelerator = Accelerator()
1431
  >>> data_loader = torch.utils.data.DataLoader(...)
1432
  >>> data_loader = accelerator.prepare_data_loader(data_loader, device_placement=True)
 
1453
  )
1454
  self._dataloaders.append(prepared_data_loader)
1455
  return prepared_data_loader
 
1456
  def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None):
1457
  """
1458
  Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use
1459
  [`Accelerator.prepare`] instead.
 
1460
  Args:
1461
  optimizer (`torch.optim.Optimizer`):
1462
  A vanilla PyTorch optimizer to prepare
1463
  device_placement (`bool`, *optional*):
1464
  Whether or not to place the optimizer on the proper device. Will default to `self.device_placement`.
 
1465
  Example:
 
1466
  ```python
1467
  >>> import torch
1468
  >>> from accelerate import Accelerator
 
1469
  >>> accelerator = Accelerator()
1470
  >>> optimizer = torch.optim.Adam(...)
1471
  >>> optimizer = accelerator.prepare_optimizer(optimizer, device_placement=True)
 
1481
  optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler)
1482
  self._optimizers.append(optimizer)
1483
  return optimizer
 
1484
  def prepare_scheduler(self, scheduler: LRScheduler):
1485
  """
1486
  Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use
1487
  [`Accelerator.prepare`] instead.
 
1488
  Args:
1489
  scheduler (`torch.optim.lr_scheduler.LRScheduler`):
1490
  A vanilla PyTorch scheduler to prepare
 
1491
  Example:
 
1492
  ```python
1493
  >>> import torch
1494
  >>> from accelerate import Accelerator
 
1495
  >>> accelerator = Accelerator()
1496
  >>> optimizer = torch.optim.Adam(...)
1497
  >>> scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...)
 
1517
  )
1518
  self._schedulers.append(scheduler)
1519
  return scheduler
 
1520
  def backward(self, loss, **kwargs):
1521
  """
1522
  Scales the gradients in accordance to the `GradientAccumulationPlugin` and calls the correct `backward()` based
1523
  on the configuration.
 
1524
  Should be used in lieu of `loss.backward()`.
 
1525
  Example:
 
1526
  ```python
1527
  >>> from accelerate import Accelerator
 
1528
  >>> accelerator = Accelerator(gradient_accumulation_steps=2)
1529
  >>> outputs = model(inputs)
1530
  >>> loss = loss_fn(outputs, labels)
 
1542
  self.scaler.scale(loss).backward(**kwargs)
1543
  else:
1544
  loss.backward(**kwargs)
 
1545
  def set_trigger(self):
1546
  """
1547
  Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which
1548
  will check across all processes.
 
1549
  Note:
1550
  Does not require `wait_for_everyone()`
 
1551
  Example:
 
1552
  ```python
1553
  >>> from accelerate import Accelerator
 
1554
  >>> accelerator = Accelerator()
1555
  >>> # Assume later in the training script
1556
  >>> # `should_do_breakpoint` is a custom function to monitor when to break,
 
1563
  ```
1564
  """
1565
  self.flag_tensor = torch.tensor(1, device=self.device)
 
1566
  def check_trigger(self):
1567
  """
1568
  Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and
1569
  reset the trigger tensor to 0.
 
1570
  Note:
1571
  Does not require `wait_for_everyone()`
 
1572
  Example:
 
1573
  ```python
1574
  >>> from accelerate import Accelerator
 
1575
  >>> accelerator = Accelerator()
1576
  >>> # Assume later in the training script
1577
  >>> # `should_do_breakpoint` is a custom function to monitor when to break,
 
1591
  self.flag_tensor = torch.tensor(0, device=self.device)
1592
  return True
1593
  return False
 
1594
  def unscale_gradients(self, optimizer=None):
1595
  """
1596
  Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings.
 
1597
  Likely should be called through [`Accelerator.clip_grad_norm_`] or [`Accelerator.clip_grad_value_`]
 
1598
  Args:
1599
  optimizer (`torch.optim.Optimizer` or `list[torch.optim.Optimizer]`, *optional*):
1600
  The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers
1601
  that were passed to [`~Accelerator.prepare`].
 
1602
  Example:
 
1603
  ```python
1604
  >>> from accelerate import Accelerator
 
1605
  >>> accelerator = Accelerator()
1606
  >>> model, optimizer = accelerator.prepare(model, optimizer)
1607
  >>> outputs = model(inputs)
 
1624
  gradients = xm._fetch_gradients(opt)
1625
  self.reduce(gradients, scale=1.0 / self.num_processes)
1626
  self.scaler.unscale_(opt)
 
1627
  def clip_grad_norm_(self, parameters, max_norm, norm_type=2):
1628
  """
1629
  Should be used in place of `torch.nn.utils.clip_grad_norm_`.
 
1630
  Returns:
1631
  `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector).
 
1632
  Example:
 
1633
  ```python
1634
  >>> from accelerate import Accelerator
 
1635
  >>> accelerator = Accelerator(gradient_accumulation_steps=2)
1636
  >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
 
1637
  >>> for input, target in dataloader:
1638
  ... optimizer.zero_grad()
1639
  ... output = model(input)
 
1656
  return None
1657
  self.unscale_gradients()
1658
  return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)
 
1659
  def clip_grad_value_(self, parameters, clip_value):
1660
  """
1661
  Should be used in place of `torch.nn.utils.clip_grad_value_`.
 
1662
  Example:
 
1663
  ```python
1664
  >>> from accelerate import Accelerator
 
1665
  >>> accelerator = Accelerator(gradient_accumulation_steps=2)
1666
  >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
 
1667
  >>> for input, target in dataloader:
1668
  ... optimizer.zero_grad()
1669
  ... output = model(input)
 
1678
  raise Exception("DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.")
1679
  self.unscale_gradients()
1680
  torch.nn.utils.clip_grad_value_(parameters, clip_value)
 
1681
  def gather(self, tensor):
1682
  """
1683
  Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to
1684
  regroup the predictions from all processes when doing evaluation.
 
1685
  Note:
1686
  This gather happens in all processes.
 
1687
  Args:
1688
  tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):
1689
  The tensors to gather across all processes.
 
1690
  Returns:
1691
  `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The gathered tensor(s). Note that the
1692
  first dimension of the result is *num_processes* multiplied by the first dimension of the input tensors.
 
1693
  Example:
 
1694
  ```python
1695
  >>> # Assuming four processes
1696
  >>> import torch
1697
  >>> from accelerate import Accelerator
 
1698
  >>> accelerator = Accelerator()
1699
  >>> process_tensor = torch.tensor([accelerator.process_index])
1700
  >>> gathered_tensor = accelerator.gather(process_tensor)
 
1703
  ```
1704
  """
1705
  return gather(tensor)
 
1706
  def gather_for_metrics(self, input_data):
1707
  """
1708
  Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be
1709
  used for gathering the inputs and targets for metric calculation.
 
1710
  Args:
1711
  input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`):
1712
  The tensors or objects for calculating metrics across all processes
 
1713
  Example:
 
1714
  ```python
1715
  >>> # Assuming two processes, with a batch size of 5 on a dataset with 9 samples
1716
  >>> import torch
1717
  >>> from accelerate import Accelerator
 
1718
  >>> accelerator = Accelerator()
1719
  >>> dataloader = torch.utils.data.DataLoader(range(9), batch_size=5)
1720
  >>> dataloader = accelerator.prepare(dataloader)
 
1729
  all_tensors = True
1730
  except TypeError:
1731
  all_tensors = False
 
1732
  if not all_tensors:
1733
  data = gather_object(input_data)
1734
  else:
1735
  data = self.gather(input_data)
 
1736
  try:
1737
  if self.gradient_state.end_of_dataloader:
1738
  # at the end of a dataloader, `gather_for_metrics` regresses to
 
1746
  # Last batch needs to be truncated on distributed systems as it contains additional samples
1747
  def _adjust_samples(tensor):
1748
  return tensor[: self.gradient_state.remainder]
 
1749
  return recursively_apply(_adjust_samples, data)
1750
  else: # remainder is 0
1751
  # no remainder even though at end of dataloader, so nothing to do.
 
1756
  except Exception:
1757
  # Dataset had no length or raised an error
1758
  return data
 
1759
  def reduce(self, tensor, reduction="sum", scale=1.0):
1760
  """
1761
  Reduce the values in *tensor* across all processes based on *reduction*.
 
1762
  Note:
1763
  All processes get the reduced value.
 
1764
  Args:
1765
  tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):
1766
  The tensors to reduce across all processes.
 
1768
  A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation.
1769
  scale (`float`, *optional*, defaults to 1.0):
1770
  A default scaling value to be applied after the reduce, only valied on XLA.
 
1771
  Returns:
1772
  `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`:
1773
  The reduced tensor(s).
 
1774
  Example:
 
1775
  ```python
1776
  >>> # Assuming two processes
1777
  >>> import torch
1778
  >>> from accelerate import Accelerator
 
1779
  >>> accelerator = Accelerator()
1780
  >>> process_tensor = torch.arange(accelerator.num_processes) + 1 + (2 * accelerator.process_index)
1781
  >>> process_tensor = process_tensor.to(accelerator.device)
 
1785
  ```
1786
  """
1787
  return reduce(tensor, reduction, scale)
 
1788
  def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):
1789
  """
1790
  Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
1791
  they can safely be gathered.
 
1792
  Args:
1793
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
1794
  The data to gather.
 
1798
  The value with which to pad.
1799
  pad_first (`bool`, *optional*, defaults to `False`):
1800
  Whether to pad at the beginning or the end.
 
1801
  Returns:
1802
  `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`:
1803
  The padded tensor(s).
 
1804
  Example:
 
1805
  ```python
1806
  >>> # Assuming two processes, with the first processes having a tensor of size 1 and the second of size 2
1807
  >>> import torch
1808
  >>> from accelerate import Accelerator
 
1809
  >>> accelerator = Accelerator()
1810
  >>> process_tensor = torch.arange(accelerator.process_index + 1).to(accelerator.device)
1811
  >>> padded_tensor = accelerator.pad_across_processes(process_tensor)
 
1814
  ```
1815
  """
1816
  return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first)
 
1817
  def unwrap_model(self, model, keep_fp32_wrapper: bool = True):
1818
  """
1819
  Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving
1820
  the model.
 
1821
  Args:
1822
  model (`torch.nn.Module`):
1823
  The model to unwrap.
1824
  keep_fp32_wrapper (`bool`, *optional*, defaults to `True`):
1825
  Whether to not remove the mixed precision hook if it was added.
 
1826
  Returns:
1827
  `torch.nn.Module`: The unwrapped model.
 
1828
  Example:
 
1829
  ```python
1830
  >>> # Assuming two GPU processes
1831
  >>> from torch.nn.parallel import DistributedDataParallel
1832
  >>> from accelerate import Accelerator
 
1833
  >>> accelerator = Accelerator()
1834
  >>> model = accelerator.prepare(MyModel())
1835
  >>> print(model.__class__.__name__)
1836
  DistributedDataParallel
 
1837
  >>> model = accelerator.unwrap_model(model)
1838
  >>> print(model.__class__.__name__)
1839
  MyModel
1840
  ```
1841
  """
1842
  return extract_model_from_parallel(model, keep_fp32_wrapper)
 
1843
  def wait_for_everyone(self):
1844
  """
1845
  Will stop the execution of the current process until every other process has reached that point (so this does
1846
  nothing when the script is only run in one process). Useful to do before saving a model.
 
1847
  Example:
 
1848
  ```python
1849
  >>> # Assuming two GPU processes
1850
  >>> import time
1851
  >>> from accelerate import Accelerator
 
1852
  >>> accelerator = Accelerator()
1853
  >>> if accelerator.is_main_process:
1854
  ... time.sleep(2)
 
1860
  ```
1861
  """
1862
  wait_for_everyone()
 
1863
  @on_main_process
1864
  def init_trackers(self, project_name: str, config: dict | None = None, init_kwargs: dict | None = {}):
1865
  """
1866
  Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations
 
1867
  Args:
1868
  project_name (`str`):
1869
  The name of the project. All trackers will save their data based on this
 
1875
  ```python
1876
  {"wandb": {"tags": ["tag_a", "tag_b"]}}
1877
  ```
 
1878
  Example:
 
1879
  ```python
1880
  >>> from accelerate import Accelerator
 
1881
  >>> accelerator = Accelerator(log_with="tensorboard")
1882
  >>> accelerator.init_trackers(
1883
  ... project_name="my_project",
 
1902
  if config is not None:
1903
  for tracker in self.trackers:
1904
  tracker.store_init_configuration(config)
 
1905
  def get_tracker(self, name: str, unwrap: bool = False):
1906
  """
1907
  Returns a `tracker` from `self.trackers` based on `name` on the main process only.
 
1908
  Args:
1909
  name (`str`):
1910
  The name of a tracker, corresponding to the `.name` property.
1911
  unwrap (`bool`):
1912
  Whether to return the internal tracking mechanism or to return the wrapped tracker instead
1913
  (recommended).
 
1914
  Returns:
1915
  `GeneralTracker`: The tracker corresponding to `name` if it exists.
 
1916
  Example:
 
1917
  ```python
1918
  >>> from accelerate import Accelerator
 
1919
  >>> accelerator = Accelerator(log_with="tensorboard")
1920
  >>> accelerator.init_trackers("my_project")
1921
  >>> tensorboard_tracker = accelerator.get_tracker("tensorboard")
 
1928
  raise ValueError(f"{name} is not an available tracker stored inside the `Accelerator`.")
1929
  # Handle tracker only made on main process
1930
  return GeneralTracker(_blank=True)
 
1931
  @on_main_process
1932
  def log(self, values: dict, step: int | None = None, log_kwargs: dict | None = {}):
1933
  """
1934
  Logs `values` to all stored trackers in `self.trackers` on the main process only.
 
1935
  Args:
1936
  values (`dict`):
1937
  Values should be a dictionary-like object containing only types `int`, `float`, or `str`.
 
1943
  ```python
1944
  {"wandb": {"tags": ["tag_a", "tag_b"]}}
1945
  ```
 
1946
  Example:
 
1947
  ```python
1948
  >>> from accelerate import Accelerator
 
1949
  >>> accelerator = Accelerator(log_with="tensorboard")
1950
  >>> accelerator.init_trackers("my_project")
1951
  >>> accelerator.log({"loss": 0.5, "accuracy": 0.9})
 
1953
  """
1954
  for tracker in self.trackers:
1955
  tracker.log(values, step=step, **log_kwargs.get(tracker.name, {}))
 
1956
  @on_main_process
1957
  def end_training(self):
1958
  """
1959
  Runs any special end training behaviors, such as stopping trackers on the main process only. Should always be
1960
  called at the end of your script if using experiment tracking.
 
1961
  Example:
 
1962
  ```python
1963
  >>> from accelerate import Accelerator
 
1964
  >>> accelerator = Accelerator(log_with="tensorboard")
1965
  >>> accelerator.init_trackers("my_project")
1966
  >>> # Do training
 
1969
  """
1970
  for tracker in self.trackers:
1971
  tracker.finish()
 
1972
  def save(self, obj, f, safe_serialization=False):
1973
  """
1974
  Save the object passed to disk once per machine. Use in place of `torch.save`.
 
1975
  Args:
1976
  obj (`object`): The object to save.
1977
  f (`str` or `os.PathLike`): Where to save the content of `obj`.
1978
  safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors`
 
1979
  Note:
1980
  If `save_on_each_node` was passed in as a `ProjectConfiguration`, will save the object once per node,
1981
  rather than only once on the main node.
 
1982
  Example:
 
1983
  ```python
1984
  >>> from accelerate import Accelerator
 
1985
  >>> accelerator = Accelerator()
1986
  >>> arr = [0, 1, 2, 3]
1987
  >>> accelerator.save(arr, "array.pkl")
 
1993
  save_on_each_node=self.project_configuration.save_on_each_node,
1994
  safe_serialization=safe_serialization,
1995
  )
 
1996
  def save_model(
1997
  self,
1998
  model: torch.nn.Module,
 
2002
  ):
2003
  """
2004
  Save a model so that it can be re-loaded using load_checkpoint_in_model
 
2005
  Arguments:
2006
  model: (`torch.nn.Module`):
2007
  Model to be saved. The model can be wrapped or unwraped.
 
2010
  max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
2011
  The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
2012
  lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).
 
2013
  <Tip warning={true}>
 
2014
  If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard
2015
  which will be bigger than `max_shard_size`.
 
2016
  </Tip>
 
2017
  safe_serialization (`bool`, *optional*, defaults to `True`):
2018
  Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
 
2019
  Example:
 
2020
  ```python
2021
  >>> from accelerate import Accelerator
 
2022
  >>> accelerator = Accelerator()
2023
  >>> model = ...
2024
  >>> accelerator.save_model(model, save_directory)
 
2027
  if os.path.isfile(save_directory):
2028
  logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
2029
  return
 
2030
  os.makedirs(save_directory, exist_ok=True)
 
2031
  # get the state_dict of the model
2032
  if any(
2033
  [
 
2041
  if any(param.device == torch.device("meta") for param in model.parameters()):
2042
  raise RuntimeError("You can't save the model since some parameters are on the meta device.")
2043
  state_dict = self.get_state_dict(model)
 
2044
  if safe_serialization:
2045
  state_dict = clean_state_dict_for_safetensors(state_dict)
2046
  weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
 
2047
  # Shard the model if it is too big.
2048
  shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name)
 
2049
  # Clean the folder from a previous save
2050
  for filename in os.listdir(save_directory):
2051
  full_filename = os.path.join(save_directory, filename)
2052
  # If we have a shard file that is not going to be replaced, we delete it, but only from the main process
2053
  # in distributed settings to avoid race conditions.
2054
  weights_no_suffix = weights_name.replace(".bin", "")
 
2055
  # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005
2056
  filename_no_suffix = filename.replace(".bin", "")
2057
  reg = re.compile(r"(.*?)-\d{5}-of-\d{5}")
 
2058
  if (
2059
  filename.startswith(weights_no_suffix)
2060
  and os.path.isfile(full_filename)
 
2063
  and PartialState().is_main_process
2064
  ):
2065
  os.remove(full_filename)
 
2066
  # Save the model
2067
  for shard_file, shard in shards.items():
2068
  self.save(shard, os.path.join(save_directory, shard_file), safe_serialization=safe_serialization)
 
2069
  if index is None:
2070
  path_to_weights = os.path.join(save_directory, WEIGHTS_NAME)
2071
  logger.info(f"Model weights saved in {path_to_weights}")
 
2081
  f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
2082
  f"index located at {save_index_file}."
2083
  )
 
2084
  def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
2085
  """
2086
  Registers a pre hook to be run before `save_checkpoint` is called in [`Accelerator.save_state`].
 
2087
  Args:
2088
  hook (`Callable`):
2089
  A function to be called in [`Accelerator.save_state`] before `save_checkpoint`.
 
2090
  The hook should have the following signature:
 
2091
  `hook(models: list[torch.nn.Module], weights: list[dict[str, torch.Tensor]], input_dir: str) -> None`
 
2092
  The `models` argument are the models as saved in the accelerator state under `accelerator._models`, `weigths`
2093
  argument are the state dicts of the `models`, and the `input_dir` argument is the `input_dir` argument passed
2094
  to [`Accelerator.load_state`].
 
2095
  <Tip>
 
2096
  Should only be used in conjunction with [`Accelerator.register_load_state_pre_hook`]. Can be useful to save
2097
  configurations in addition to model weights. Can also be used to overwrite model saving with a customized
2098
  method. In this case, make sure to remove already loaded weights from the weights list.
 
2099
  </Tip>
 
2100
  Returns:
2101
  `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling
2102
  `handle.remove()`
 
2104
  handle = hooks.RemovableHandle(self._save_model_state_pre_hook)
2105
  self._save_model_state_pre_hook[handle.id] = hook
2106
  return handle
 
2107
  def save_state(self, output_dir: str = None, safe_serialization: bool = True, **save_model_func_kwargs):
2108
  """
2109
  Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder.
 
2110
  If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled
2111
  then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater
2112
  than `total_limit` then the oldest save is deleted. Each checkpoint is saved in seperate folders named
2113
  `checkpoint_<iteration>`.
 
2114
  Otherwise they are just saved to `output_dir`.
 
2115
  <Tip>
 
2116
  Should only be used when wanting to save a checkpoint during training and restoring the state in the same
2117
  environment.
 
2118
  </Tip>
 
2119
  Args:
2120
  output_dir (`str` or `os.PathLike`):
2121
  The name of the folder to save all relevant weights and states.
 
2124
  save_model_func_kwargs (`dict`, *optional*):
2125
  Additional keyword arguments for saving model which can be passed to the underlying save function, such
2126
  as optional arguments for DeepSpeed's `save_checkpoint` function.
 
2127
  Example:
 
2128
  ```python
2129
  >>> from accelerate import Accelerator
 
2130
  >>> accelerator = Accelerator()
2131
  >>> model, optimizer, lr_scheduler = ...
2132
  >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
 
2143
  and (len(folders) + 1 > self.project_configuration.total_limit)
2144
  and self.is_main_process
2145
  ):
 
2146
  def _inner(folder):
2147
  return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0]
 
2148
  folders.sort(key=_inner)
2149
  logger.warning(
2150
  f"Deleting {len(folders) + 1 - self.project_configuration.total_limit} checkpoints to make room for new checkpoint."
 
2159
  self.wait_for_everyone()
2160
  os.makedirs(output_dir, exist_ok=True)
2161
  logger.info(f"Saving current state to {output_dir}")
 
2162
  if self.distributed_type == DistributedType.TPU:
2163
  # Finish running the previous step before checkpointing
2164
  xm.mark_step()
 
2165
  # Save the models taking care of FSDP and DeepSpeed nuances
2166
  weights = []
2167
  for i, model in enumerate(self._models):
 
2180
  logger.info(f"Megatron-LM Model , Optimizer and Scheduler saved to output dir {output_dir}")
2181
  else:
2182
  weights.append(self.get_state_dict(model, unwrap=False))
 
2183
  # Save the optimizers taking care of FSDP and DeepSpeed nuances
2184
  optimizers = []
2185
  if self.distributed_type == DistributedType.FSDP:
 
2189
  logger.info(f"FSDP Optimizer saved to output dir {output_dir}")
2190
  elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
2191
  optimizers = self._optimizers
 
2192
  # Save the lr schedulers taking care of DeepSpeed nuances
2193
  schedulers = []
2194
  if self.distributed_type == DistributedType.DEEPSPEED:
 
2198
  schedulers.append(scheduler)
2199
  elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
2200
  schedulers = self._schedulers
 
2201
  # Save the samplers of the dataloaders
2202
  dataloaders = self._dataloaders
 
2203
  # Call model loading hooks that might have been registered with
2204
  # accelerator.register_model_state_hook
2205
  for hook in self._save_model_state_pre_hook.values():
2206
  hook(self._models, weights, output_dir)
 
2207
  save_location = save_accelerator_state(
2208
  output_dir,
2209
  weights,
 
2219
  save_custom_state(obj, output_dir, i, save_on_each_node=self.project_configuration.save_on_each_node)
2220
  self.project_configuration.iteration += 1
2221
  return save_location
 
2222
  def register_load_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
2223
  """
2224
  Registers a pre hook to be run before [`load_checkpoint`] is called in [`Accelerator.load_state`].
 
2225
  Args:
2226
  hook (`Callable`):
2227
  A function to be called in [`Accelerator.load_state`] before `load_checkpoint`.
 
2228
  The hook should have the following signature:
 
2229
  `hook(models: list[torch.nn.Module], input_dir: str) -> None`
 
2230
  The `models` argument are the models as saved in the accelerator state under `accelerator._models`, and the
2231
  `input_dir` argument is the `input_dir` argument passed to [`Accelerator.load_state`].
 
2232
  <Tip>
 
2233
  Should only be used in conjunction with [`Accelerator.register_save_state_pre_hook`]. Can be useful to load
2234
  configurations in addition to model weights. Can also be used to overwrite model loading with a customized
2235
  method. In this case, make sure to remove already loaded models from the models list.
 
2236
  </Tip>
 
2237
  Returns:
2238
  `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling
2239
  `handle.remove()`
 
2241
  handle = hooks.RemovableHandle(self._load_model_state_pre_hook)
2242
  self._load_model_state_pre_hook[handle.id] = hook
2243
  return handle
 
2244
  def load_state(self, input_dir: str = None, **load_model_func_kwargs):
2245
  """
2246
  Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects.
 
2247
  <Tip>
 
2248
  Should only be used in conjunction with [`Accelerator.save_state`]. If a file is not registered for
2249
  checkpointing, it will not be loaded if stored in the directory.
 
2250
  </Tip>
 
2251
  Args:
2252
  input_dir (`str` or `os.PathLike`):
2253
  The name of the folder all relevant weights and states were saved in. Can be `None` if
 
2256
  Additional keyword arguments for loading model which can be passed to the underlying load function,
2257
  such as optional arguments for DeepSpeed's `load_checkpoint` function or a `map_location` to load the
2258
  model and optimizer on.
 
2259
  Example:
 
2260
  ```python
2261
  >>> from accelerate import Accelerator
 
2262
  >>> accelerator = Accelerator()
2263
  >>> model, optimizer, lr_scheduler = ...
2264
  >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
 
2274
  # Pick up from automatic checkpoint naming
2275
  input_dir = os.path.join(self.project_dir, "checkpoints")
2276
  folders = [os.path.join(input_dir, folder) for folder in os.listdir(input_dir)]
 
2277
  def _inner(folder):
2278
  return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0]
 
2279
  folders.sort(key=_inner)
2280
  input_dir = folders[-1]
2281
  else:
2282
  raise ValueError("No input_dir provided and automatic checkpoint naming is disabled.")
2283
  logger.info(f"Loading states from {input_dir}")
 
2284
  # Load the models taking care of FSDP and DeepSpeed nuances
2285
  models = []
2286
  for i, model in enumerate(self._models):
 
2299
  logger.info(f"Megatron-LM Model , Optimizer and Scheduler loaded from input dir {input_dir}")
2300
  else:
2301
  models.append(model)
 
2302
  # Load the optimizers taking care of FSDP and DeepSpeed nuances
2303
  optimizers = []
2304
  if self.distributed_type == DistributedType.FSDP:
 
2308
  logger.info(f"FSDP Optimizer loaded from input dir {input_dir}")
2309
  elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
2310
  optimizers = self._optimizers
 
2311
  # Load the lr schedulers taking care of DeepSpeed nuances
2312
  schedulers = []
2313
  if self.distributed_type == DistributedType.DEEPSPEED:
 
2317
  schedulers.append(scheduler)
2318
  elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
2319
  schedulers = self._schedulers
 
2320
  dataloaders = self._dataloaders
 
2321
  # Call model loading hooks that might have been registered with
2322
  # accelerator.register_model_state_hook
2323
  for hook in self._load_model_state_pre_hook.values():
2324
  hook(models, input_dir)
 
2325
  map_location = load_model_func_kwargs.pop("map_location", None)
2326
  if map_location is None:
2327
  if self.num_processes > 1 and self.distributed_type in (
 
2331
  map_location = "on_device"
2332
  else:
2333
  map_location = "cpu"
 
2334
  load_accelerator_state(
2335
  input_dir,
2336
  models,
 
2356
  logger.info(f"Loading in {len(custom_checkpoints)} custom states")
2357
  for index, obj in enumerate(self._custom_objects):
2358
  load_custom_state(obj, input_dir, index)
 
2359
  def free_memory(self):
2360
  """
2361
  Will release all references to the internal objects stored and call the garbage collector. You should call this
2362
  method between two trainings with different models/optimizers. Also will reset `Accelerator.step` to 0.
 
2363
  Example:
 
2364
  ```python
2365
  >>> from accelerate import Accelerator
 
2366
  >>> accelerator = Accelerator()
2367
  >>> model, optimizer, scheduler = ...
2368
  >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
 
2377
  self.deepspeed_engine_wrapped = None
2378
  self.step = 0
2379
  release_memory()
 
2380
  def clear(self):
2381
  """
2382
  Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the
2383
  garbage collector. You should call this method between two trainings with different models/optimizers.
 
2384
  Example:
 
2385
  ```python
2386
  >>> from accelerate import Accelerator
 
2387
  >>> accelerator = Accelerator()
2388
  >>> model, optimizer, scheduler = ...
2389
  >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
 
2392
  ```
2393
  """
2394
  self.free_memory()
 
2395
  def _get_named_parameters(self, *args):
2396
  named_parameters = {}
2397
  for obj in args:
 
2399
  obj = extract_model_from_parallel(obj)
2400
  named_parameters.update({n: p for n, p in obj.named_parameters()})
2401
  return named_parameters
 
2402
  def _get_devices(self, *args):
2403
  model_device = None
2404
  optimizer_device = None
 
2415
  optimizer_device = param_group["params"][0].device
2416
  break
2417
  return (model_device, optimizer_device)
 
2418
  def get_state_dict(self, model, unwrap=True):
2419
  """
2420
  Returns the state dictionary of a model sent through [`Accelerator.prepare`] potentially without full
2421
  precision.
 
2422
  Args:
2423
  model (`torch.nn.Module`):
2424
  A PyTorch model sent through [`Accelerator.prepare`]
2425
  unwrap (`bool`, *optional*, defaults to `True`):
2426
  Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict
 
2427
  Returns:
2428
  `dict`: The state dictionary of the model potentially without full precision.
 
2429
  Example:
 
2430
  ```python
2431
  >>> import torch
2432
  >>> from accelerate import Accelerator
 
2433
  >>> accelerator = Accelerator()
2434
  >>> net = torch.nn.Linear(2, 2)
2435
  >>> net = accelerator.prepare(net)
 
2449
  )
2450
  else:
2451
  from deepspeed.checkpoint.utils import clone_tensors_for_torch_save
 
2452
  state_dict = clone_tensors_for_torch_save(self.unwrap_model(model).state_dict())
2453
  elif self.distributed_type == DistributedType.FSDP:
2454
  from torch.distributed.fsdp import FullStateDictConfig, StateDictType
2455
  from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
 
2456
  full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
2457
  with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config):
2458
  state_dict = model.state_dict()
 
2460
  if unwrap:
2461
  model = self.unwrap_model(model)
2462
  state_dict = model.state_dict()
 
2463
  return state_dict
 
2464
  def register_for_checkpointing(self, *objects):
2465
  """
2466
  Makes note of `objects` and will save or load them in during `save_state` or `load_state`.
 
2467
  These should be utilized when the state is being loaded or saved in the same script. It is not designed to be
2468
  used in different scripts.
 
2469
  <Tip>
 
2470
  Every `object` must have a `load_state_dict` and `state_dict` function to be stored.
 
2471
  </Tip>
 
2472
  Example:
 
2473
  ```python
2474
  >>> from accelerate import Accelerator
 
2475
  >>> accelerator = Accelerator()
2476
  >>> # Assume `CustomObject` has a `state_dict` and `load_state_dict` function.
2477
  >>> obj = CustomObject()
 
2489
  err += f"\n\t- Item at index {index}, `{get_pretty_name(obj)}`"
2490
  raise ValueError(err)
2491
  self._custom_objects.extend(objects)
 
2492
  @contextmanager
2493
  def autocast(self, cache_enabled: bool = False, autocast_handler: AutocastKwargs = None):
2494
  """
2495
  Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing
2496
  different will happen otherwise.
 
2497
  A different `autocast_handler` can be passed in to override the one set in the `Accelerator` object. This is
2498
  useful in blocks under `autocast` where you want to revert to fp32.
 
2499
  Example:
 
2500
  ```python
2501
  >>> from accelerate import Accelerator
 
2502
  >>> accelerator = Accelerator(mixed_precision="fp16")
2503
  >>> with accelerator.autocast():
2504
  ... train()
 
2520
  autocast_context.__enter__()
2521
  yield
2522
  autocast_context.__exit__(*sys.exc_info())
 
2523
  @property
2524
  def optimizer_step_was_skipped(self):
2525
  """
 
2530
  if optimizer.step_was_skipped:
2531
  return True
2532
  return False
 
2533
  def skip_first_batches(self, dataloader, num_batches: int = 0):
2534
  """
2535
  Creates a new `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
 
2536
  Args:
2537
  dataloader (`torch.utils.data.DataLoader`): The data loader in which to skip batches.
2538
  num_batches (`int`, *optional*, defaults to 0): The number of batches to skip
 
2539
  Example:
 
2540
  ```python
2541
  >>> from accelerate import Accelerator
 
2542
  >>> accelerator = Accelerator()
2543
  >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
2544
  >>> skipped_dataloader = accelerator.skip_first_batches(dataloader, num_batches=2)
 
2549
  ... loss = loss_func(output, target)
2550
  ... accelerator.backward(loss)
2551
  ... optimizer.step()
 
2552
  >>> # subsequent epochs
2553
  >>> for input, target in dataloader:
2554
  ... optimizer.zero_grad()
 
2556
  ```
2557
  """
2558
  return skip_first_batches(dataloader, num_batches=num_batches)
 
2559
  def __deepcopy__(self, memo):
2560
  logger.info("Deep copying the `Accelerator` object, note that this will point to the same original object.")
2561
  return self
 
2562
  def verify_device_map(self, model: torch.nn.Module) -> bool:
2563
  """
2564
  Verifies that `model` has not been prepared with big model inference with a device-map resembling `auto`.
 
2567
  for m in model.modules():
2568
  if hasattr(m, "hf_device_map") and len(m.hf_device_map) > 1:
2569
  return True
 
2570
  return False
src/big_modeling.py CHANGED
@@ -1,86 +1,66 @@
1
  logger = logging.getLogger(__name__)
2
-
3
-
4
  @contextmanager
5
  def init_empty_weights(include_buffers: bool = None):
6
  """
7
  A context manager under which models are initialized with all parameters on the meta device, therefore creating an
8
  empty model. Useful when just initializing the model would blow the available RAM.
9
-
10
  Args:
11
  include_buffers (`bool`, *optional*):
12
  Whether or not to also put all buffers on the meta device while initializing.
13
-
14
  Example:
15
-
16
  ```python
17
  import torch.nn as nn
18
  from accelerate import init_empty_weights
19
-
20
  # Initialize a model with 100 billions parameters in no time and without using any RAM.
21
  with init_empty_weights():
22
  tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
23
  ```
24
-
25
  <Tip warning={true}>
26
-
27
  Any model created under this context manager has no weights. As such you can't do something like
28
  `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
29
-
30
  </Tip>
31
  """
32
  if include_buffers is None:
33
  include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
34
  with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
35
  yield f
36
-
37
-
38
  @contextmanager
39
  def init_on_device(device: torch.device, include_buffers: bool = None):
40
  """
41
  A context manager under which models are initialized with all parameters on the specified device.
42
-
43
  Args:
44
  device (`torch.device`):
45
  Device to initialize all parameters on.
46
  include_buffers (`bool`, *optional*):
47
  Whether or not to also put all buffers on the meta device while initializing.
48
-
49
  Example:
50
-
51
  ```python
52
  import torch.nn as nn
53
  from accelerate import init_on_device
54
-
55
  with init_on_device(device=torch.device("cuda")):
56
  tst = nn.Liner(100, 100) # on `cuda` device
57
  ```
58
  """
59
  if include_buffers is None:
60
  include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
61
-
62
  # TODO(shingjan): remove the torch version check once older versions are deprecated
63
  if is_torch_version(">=", "2.0") and include_buffers:
64
  with device:
65
  yield
66
  return
67
-
68
  old_register_parameter = nn.Module.register_parameter
69
  if include_buffers:
70
  old_register_buffer = nn.Module.register_buffer
71
-
72
  def register_empty_parameter(module, name, param):
73
  old_register_parameter(module, name, param)
74
  if param is not None:
75
  param_cls = type(module._parameters[name])
76
  kwargs = module._parameters[name].__dict__
77
  module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
78
-
79
  def register_empty_buffer(module, name, buffer, persistent=True):
80
  old_register_buffer(module, name, buffer, persistent=persistent)
81
  if buffer is not None:
82
  module._buffers[name] = module._buffers[name].to(device)
83
-
84
  # Patch tensor creation
85
  if include_buffers:
86
  tensor_constructors_to_patch = {
@@ -89,14 +69,11 @@ def init_on_device(device: torch.device, include_buffers: bool = None):
89
  }
90
  else:
91
  tensor_constructors_to_patch = {}
92
-
93
  def patch_tensor_constructor(fn):
94
  def wrapper(*args, **kwargs):
95
  kwargs["device"] = device
96
  return fn(*args, **kwargs)
97
-
98
  return wrapper
99
-
100
  try:
101
  nn.Module.register_parameter = register_empty_parameter
102
  if include_buffers:
@@ -110,8 +87,6 @@ def init_on_device(device: torch.device, include_buffers: bool = None):
110
  nn.Module.register_buffer = old_register_buffer
111
  for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():
112
  setattr(torch, torch_function_name, old_torch_function)
113
-
114
-
115
  def cpu_offload(
116
  model: nn.Module,
117
  execution_device: Optional[torch.device] = None,
@@ -123,7 +98,6 @@ def cpu_offload(
123
  Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one
124
  copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that
125
  state dict and put on the execution device passed as they are needed, then offloaded again.
126
-
127
  Args:
128
  model (`torch.nn.Module`):
129
  The model to offload.
@@ -144,7 +118,6 @@ def cpu_offload(
144
  execution_device = next(iter(model.parameters())).device
145
  if state_dict is None:
146
  state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()}
147
-
148
  add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
149
  attach_align_device_hook(
150
  model,
@@ -154,10 +127,7 @@ def cpu_offload(
154
  weights_map=state_dict,
155
  preload_module_classes=preload_module_classes,
156
  )
157
-
158
  return model
159
-
160
-
161
  def cpu_offload_with_hook(
162
  model: torch.nn.Module,
163
  execution_device: Optional[Union[int, str, torch.device]] = None,
@@ -167,7 +137,6 @@ def cpu_offload_with_hook(
167
  Offloads a model on the CPU and puts it back to an execution device when executed. The difference with
168
  [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when
169
  the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop.
170
-
171
  Args:
172
  model (`torch.nn.Module`):
173
  The model to offload.
@@ -177,21 +146,17 @@ def cpu_offload_with_hook(
177
  prev_module_hook (`UserCpuOffloadHook`, *optional*):
178
  The hook sent back by this function for a previous model in the pipeline you are running. If passed, its
179
  offload method will be called just before the forward of the model to which this hook is attached.
180
-
181
  Example:
182
-
183
  ```py
184
  model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device)
185
  model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1)
186
  model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2)
187
-
188
  hid_1 = model_1(input)
189
  for i in range(50):
190
  # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
191
  hid_2 = model_2(hid_1)
192
  # model2 is offloaded to the CPU just before this forward.
193
  hid_3 = model_3(hid_3)
194
-
195
  # For model3, you need to manually call the hook offload method.
196
  hook_3.offload()
197
  ```
@@ -200,8 +165,6 @@ def cpu_offload_with_hook(
200
  add_hook_to_module(model, hook, append=True)
201
  user_hook = UserCpuOffloadHook(model, hook)
202
  return model, user_hook
203
-
204
-
205
  def disk_offload(
206
  model: nn.Module,
207
  offload_dir: Union[str, os.PathLike],
@@ -213,7 +176,6 @@ def disk_offload(
213
  Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as
214
  memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and
215
  put on the execution device passed as they are needed, then offloaded again.
216
-
217
  Args:
218
  model (`torch.nn.Module`): The model to offload.
219
  offload_dir (`str` or `os.PathLike`):
@@ -234,7 +196,6 @@ def disk_offload(
234
  if execution_device is None:
235
  execution_device = next(iter(model.parameters())).device
236
  weights_map = OffloadedWeightsLoader(save_folder=offload_dir)
237
-
238
  add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
239
  attach_align_device_hook(
240
  model,
@@ -244,10 +205,7 @@ def disk_offload(
244
  weights_map=weights_map,
245
  preload_module_classes=preload_module_classes,
246
  )
247
-
248
  return model
249
-
250
-
251
  def dispatch_model(
252
  model: nn.Module,
253
  device_map: Dict[str, Union[str, int, torch.device]],
@@ -263,7 +221,6 @@ def dispatch_model(
263
  """
264
  Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on
265
  the CPU or even the disk.
266
-
267
  Args:
268
  model (`torch.nn.Module`):
269
  The model to dispatch.
@@ -295,12 +252,10 @@ def dispatch_model(
295
  """
296
  # Error early if the device map is incomplete.
297
  check_device_map(model, device_map)
298
-
299
  # for backward compatibility
300
  is_bnb_quantized = (
301
  getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False)
302
  ) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes"
303
-
304
  # We attach hooks if the device_map has at least 2 different devices or if
305
  # force_hooks is set to `True`. Otherwise, the model in already loaded
306
  # in the unique device and the user can decide where to dispatch the model.
@@ -311,12 +266,10 @@ def dispatch_model(
311
  main_device = "cpu"
312
  else:
313
  main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0]
314
-
315
  if main_device != "cpu":
316
  cpu_modules = [name for name, device in device_map.items() if device == "cpu"]
317
  if state_dict is None and len(cpu_modules) > 0:
318
  state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)
319
-
320
  disk_modules = [name for name, device in device_map.items() if device == "disk"]
321
  if offload_dir is None and offload_index is None and len(disk_modules) > 0:
322
  raise ValueError(
@@ -330,7 +283,6 @@ def dispatch_model(
330
  ):
331
  disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)
332
  offload_state_dict(offload_dir, disk_state_dict)
333
-
334
  execution_device = {
335
  name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items()
336
  }
@@ -345,7 +297,6 @@ def dispatch_model(
345
  )
346
  else:
347
  weights_map = None
348
-
349
  tied_params = find_tied_parameters(model)
350
  attach_align_device_hook_on_blocks(
351
  model,
@@ -356,7 +307,6 @@ def dispatch_model(
356
  skip_keys=skip_keys,
357
  preload_module_classes=preload_module_classes,
358
  )
359
-
360
  # warn if there is any params on the meta device
361
  offloaded_devices_str = " and ".join(
362
  [device for device in set(device_map.values()) if device in ("cpu", "disk")]
@@ -365,10 +315,8 @@ def dispatch_model(
365
  logging.warning(
366
  f"Some parameters are on the meta device device because they were offloaded to the {offloaded_devices_str}."
367
  )
368
-
369
  # Attaching the hook may break tied weights, so we retie them
370
  retie_parameters(model, tied_params)
371
-
372
  # add warning to cuda and to method
373
  def add_warning(fn, model):
374
  @wraps(fn)
@@ -378,15 +326,12 @@ def dispatch_model(
378
  if param.device == torch.device("meta"):
379
  raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.")
380
  return fn(*args, **kwargs)
381
-
382
  return wrapper
383
-
384
  model.to = add_warning(model.to, model)
385
  if is_npu_available():
386
  model.npu = add_warning(model.npu, model)
387
  else:
388
  model.cuda = add_warning(model.cuda, model)
389
-
390
  else:
391
  device = list(device_map.values())[0]
392
  # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
@@ -400,8 +345,6 @@ def dispatch_model(
400
  )
401
  model.hf_device_map = device_map
402
  return model
403
-
404
-
405
  def load_checkpoint_and_dispatch(
406
  model: nn.Module,
407
  checkpoint: Union[str, os.PathLike],
@@ -419,7 +362,6 @@ def load_checkpoint_and_dispatch(
419
  """
420
  Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
421
  loaded and adds the various hooks that will make this model run properly (even if split across devices).
422
-
423
  Args:
424
  model (`torch.nn.Module`): The model in which we want to load a checkpoint.
425
  checkpoint (`str` or `os.PathLike`):
@@ -430,7 +372,6 @@ def load_checkpoint_and_dispatch(
430
  device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
431
  A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
432
  name, once a given module name is inside, every submodule of it will be sent to the same device.
433
-
434
  To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more
435
  information about each option see [here](big_modeling#designing-a-device-map).
436
  max_memory (`Dict`, *optional*):
@@ -460,23 +401,18 @@ def load_checkpoint_and_dispatch(
460
  force_hooks (`bool`, *optional*, defaults to `False`):
461
  Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
462
  single device.
463
-
464
  Example:
465
-
466
  ```python
467
  >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch
468
  >>> from huggingface_hub import hf_hub_download
469
  >>> from transformers import AutoConfig, AutoModelForCausalLM
470
-
471
  >>> # Download the Weights
472
  >>> checkpoint = "EleutherAI/gpt-j-6B"
473
  >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin")
474
-
475
  >>> # Create a model and initialize it with empty weights
476
  >>> config = AutoConfig.from_pretrained(checkpoint)
477
  >>> with init_empty_weights():
478
  ... model = AutoModelForCausalLM.from_config(config)
479
-
480
  >>> # Load the checkpoint and dispatch it to the right devices
481
  >>> model = load_checkpoint_and_dispatch(
482
  ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"]
 
1
  logger = logging.getLogger(__name__)
 
 
2
  @contextmanager
3
  def init_empty_weights(include_buffers: bool = None):
4
  """
5
  A context manager under which models are initialized with all parameters on the meta device, therefore creating an
6
  empty model. Useful when just initializing the model would blow the available RAM.
 
7
  Args:
8
  include_buffers (`bool`, *optional*):
9
  Whether or not to also put all buffers on the meta device while initializing.
 
10
  Example:
 
11
  ```python
12
  import torch.nn as nn
13
  from accelerate import init_empty_weights
 
14
  # Initialize a model with 100 billions parameters in no time and without using any RAM.
15
  with init_empty_weights():
16
  tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
17
  ```
 
18
  <Tip warning={true}>
 
19
  Any model created under this context manager has no weights. As such you can't do something like
20
  `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
 
21
  </Tip>
22
  """
23
  if include_buffers is None:
24
  include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
25
  with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
26
  yield f
 
 
27
  @contextmanager
28
  def init_on_device(device: torch.device, include_buffers: bool = None):
29
  """
30
  A context manager under which models are initialized with all parameters on the specified device.
 
31
  Args:
32
  device (`torch.device`):
33
  Device to initialize all parameters on.
34
  include_buffers (`bool`, *optional*):
35
  Whether or not to also put all buffers on the meta device while initializing.
 
36
  Example:
 
37
  ```python
38
  import torch.nn as nn
39
  from accelerate import init_on_device
 
40
  with init_on_device(device=torch.device("cuda")):
41
  tst = nn.Liner(100, 100) # on `cuda` device
42
  ```
43
  """
44
  if include_buffers is None:
45
  include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
 
46
  # TODO(shingjan): remove the torch version check once older versions are deprecated
47
  if is_torch_version(">=", "2.0") and include_buffers:
48
  with device:
49
  yield
50
  return
 
51
  old_register_parameter = nn.Module.register_parameter
52
  if include_buffers:
53
  old_register_buffer = nn.Module.register_buffer
 
54
  def register_empty_parameter(module, name, param):
55
  old_register_parameter(module, name, param)
56
  if param is not None:
57
  param_cls = type(module._parameters[name])
58
  kwargs = module._parameters[name].__dict__
59
  module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
 
60
  def register_empty_buffer(module, name, buffer, persistent=True):
61
  old_register_buffer(module, name, buffer, persistent=persistent)
62
  if buffer is not None:
63
  module._buffers[name] = module._buffers[name].to(device)
 
64
  # Patch tensor creation
65
  if include_buffers:
66
  tensor_constructors_to_patch = {
 
69
  }
70
  else:
71
  tensor_constructors_to_patch = {}
 
72
  def patch_tensor_constructor(fn):
73
  def wrapper(*args, **kwargs):
74
  kwargs["device"] = device
75
  return fn(*args, **kwargs)
 
76
  return wrapper
 
77
  try:
78
  nn.Module.register_parameter = register_empty_parameter
79
  if include_buffers:
 
87
  nn.Module.register_buffer = old_register_buffer
88
  for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():
89
  setattr(torch, torch_function_name, old_torch_function)
 
 
90
  def cpu_offload(
91
  model: nn.Module,
92
  execution_device: Optional[torch.device] = None,
 
98
  Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one
99
  copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that
100
  state dict and put on the execution device passed as they are needed, then offloaded again.
 
101
  Args:
102
  model (`torch.nn.Module`):
103
  The model to offload.
 
118
  execution_device = next(iter(model.parameters())).device
119
  if state_dict is None:
120
  state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()}
 
121
  add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
122
  attach_align_device_hook(
123
  model,
 
127
  weights_map=state_dict,
128
  preload_module_classes=preload_module_classes,
129
  )
 
130
  return model
 
 
131
  def cpu_offload_with_hook(
132
  model: torch.nn.Module,
133
  execution_device: Optional[Union[int, str, torch.device]] = None,
 
137
  Offloads a model on the CPU and puts it back to an execution device when executed. The difference with
138
  [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when
139
  the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop.
 
140
  Args:
141
  model (`torch.nn.Module`):
142
  The model to offload.
 
146
  prev_module_hook (`UserCpuOffloadHook`, *optional*):
147
  The hook sent back by this function for a previous model in the pipeline you are running. If passed, its
148
  offload method will be called just before the forward of the model to which this hook is attached.
 
149
  Example:
 
150
  ```py
151
  model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device)
152
  model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1)
153
  model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2)
 
154
  hid_1 = model_1(input)
155
  for i in range(50):
156
  # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
157
  hid_2 = model_2(hid_1)
158
  # model2 is offloaded to the CPU just before this forward.
159
  hid_3 = model_3(hid_3)
 
160
  # For model3, you need to manually call the hook offload method.
161
  hook_3.offload()
162
  ```
 
165
  add_hook_to_module(model, hook, append=True)
166
  user_hook = UserCpuOffloadHook(model, hook)
167
  return model, user_hook
 
 
168
  def disk_offload(
169
  model: nn.Module,
170
  offload_dir: Union[str, os.PathLike],
 
176
  Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as
177
  memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and
178
  put on the execution device passed as they are needed, then offloaded again.
 
179
  Args:
180
  model (`torch.nn.Module`): The model to offload.
181
  offload_dir (`str` or `os.PathLike`):
 
196
  if execution_device is None:
197
  execution_device = next(iter(model.parameters())).device
198
  weights_map = OffloadedWeightsLoader(save_folder=offload_dir)
 
199
  add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
200
  attach_align_device_hook(
201
  model,
 
205
  weights_map=weights_map,
206
  preload_module_classes=preload_module_classes,
207
  )
 
208
  return model
 
 
209
  def dispatch_model(
210
  model: nn.Module,
211
  device_map: Dict[str, Union[str, int, torch.device]],
 
221
  """
222
  Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on
223
  the CPU or even the disk.
 
224
  Args:
225
  model (`torch.nn.Module`):
226
  The model to dispatch.
 
252
  """
253
  # Error early if the device map is incomplete.
254
  check_device_map(model, device_map)
 
255
  # for backward compatibility
256
  is_bnb_quantized = (
257
  getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False)
258
  ) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes"
 
259
  # We attach hooks if the device_map has at least 2 different devices or if
260
  # force_hooks is set to `True`. Otherwise, the model in already loaded
261
  # in the unique device and the user can decide where to dispatch the model.
 
266
  main_device = "cpu"
267
  else:
268
  main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0]
 
269
  if main_device != "cpu":
270
  cpu_modules = [name for name, device in device_map.items() if device == "cpu"]
271
  if state_dict is None and len(cpu_modules) > 0:
272
  state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)
 
273
  disk_modules = [name for name, device in device_map.items() if device == "disk"]
274
  if offload_dir is None and offload_index is None and len(disk_modules) > 0:
275
  raise ValueError(
 
283
  ):
284
  disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)
285
  offload_state_dict(offload_dir, disk_state_dict)
 
286
  execution_device = {
287
  name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items()
288
  }
 
297
  )
298
  else:
299
  weights_map = None
 
300
  tied_params = find_tied_parameters(model)
301
  attach_align_device_hook_on_blocks(
302
  model,
 
307
  skip_keys=skip_keys,
308
  preload_module_classes=preload_module_classes,
309
  )
 
310
  # warn if there is any params on the meta device
311
  offloaded_devices_str = " and ".join(
312
  [device for device in set(device_map.values()) if device in ("cpu", "disk")]
 
315
  logging.warning(
316
  f"Some parameters are on the meta device device because they were offloaded to the {offloaded_devices_str}."
317
  )
 
318
  # Attaching the hook may break tied weights, so we retie them
319
  retie_parameters(model, tied_params)
 
320
  # add warning to cuda and to method
321
  def add_warning(fn, model):
322
  @wraps(fn)
 
326
  if param.device == torch.device("meta"):
327
  raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.")
328
  return fn(*args, **kwargs)
 
329
  return wrapper
 
330
  model.to = add_warning(model.to, model)
331
  if is_npu_available():
332
  model.npu = add_warning(model.npu, model)
333
  else:
334
  model.cuda = add_warning(model.cuda, model)
 
335
  else:
336
  device = list(device_map.values())[0]
337
  # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
 
345
  )
346
  model.hf_device_map = device_map
347
  return model
 
 
348
  def load_checkpoint_and_dispatch(
349
  model: nn.Module,
350
  checkpoint: Union[str, os.PathLike],
 
362
  """
363
  Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
364
  loaded and adds the various hooks that will make this model run properly (even if split across devices).
 
365
  Args:
366
  model (`torch.nn.Module`): The model in which we want to load a checkpoint.
367
  checkpoint (`str` or `os.PathLike`):
 
372
  device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
373
  A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
374
  name, once a given module name is inside, every submodule of it will be sent to the same device.
 
375
  To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more
376
  information about each option see [here](big_modeling#designing-a-device-map).
377
  max_memory (`Dict`, *optional*):
 
401
  force_hooks (`bool`, *optional*, defaults to `False`):
402
  Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
403
  single device.
 
404
  Example:
 
405
  ```python
406
  >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch
407
  >>> from huggingface_hub import hf_hub_download
408
  >>> from transformers import AutoConfig, AutoModelForCausalLM
 
409
  >>> # Download the Weights
410
  >>> checkpoint = "EleutherAI/gpt-j-6B"
411
  >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin")
 
412
  >>> # Create a model and initialize it with empty weights
413
  >>> config = AutoConfig.from_pretrained(checkpoint)
414
  >>> with init_empty_weights():
415
  ... model = AutoModelForCausalLM.from_config(config)
 
416
  >>> # Load the checkpoint and dispatch it to the right devices
417
  >>> model = load_checkpoint_and_dispatch(
418
  ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"]
src/checkpointing.py CHANGED
@@ -1,6 +1,4 @@
1
  logger = get_logger(__name__)
2
-
3
-
4
  def save_accelerator_state(
5
  output_dir: str,
6
  model_states: List[dict],
@@ -14,14 +12,10 @@ def save_accelerator_state(
14
  ):
15
  """
16
  Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.
17
-
18
  <Tip>
19
-
20
  If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native
21
  `pickle`.
22
-
23
  </Tip>
24
-
25
  Args:
26
  output_dir (`str` or `os.PathLike`):
27
  The name of the folder to save all relevant weights and states.
@@ -71,14 +65,11 @@ def save_accelerator_state(
71
  output_sampler_file = output_dir.joinpath(sampler_name)
72
  # Only save if we have our custom sampler
73
  from .data_loader import IterableDatasetShard, SeedableRandomSampler
74
-
75
  if isinstance(dataloader.dataset, IterableDatasetShard):
76
  sampler = dataloader.sampler.sampler
77
-
78
  if isinstance(sampler, SeedableRandomSampler):
79
  save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
80
  logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}")
81
-
82
  # GradScaler state
83
  if scaler is not None:
84
  state = scaler.state_dict()
@@ -101,8 +92,6 @@ def save_accelerator_state(
101
  torch.save(states, output_states_file)
102
  logger.info(f"Random states saved in {output_states_file}")
103
  return output_dir
104
-
105
-
106
  def load_accelerator_state(
107
  input_dir,
108
  models,
@@ -116,7 +105,6 @@ def load_accelerator_state(
116
  ):
117
  """
118
  Loads states of the models, optimizers, scaler, and RNG generators from a given directory.
119
-
120
  Args:
121
  input_dir (`str` or `os.PathLike`):
122
  The name of the folder to load all relevant weights and states.
@@ -143,7 +131,6 @@ def load_accelerator_state(
143
  map_location = "cpu"
144
  elif map_location == "on_device":
145
  map_location = PartialState().device
146
-
147
  input_dir = Path(input_dir)
148
  # Model states
149
  for i, model in enumerate(models):
@@ -157,7 +144,6 @@ def load_accelerator_state(
157
  state_dict = torch.load(input_model_file, map_location=map_location)
158
  models[i].load_state_dict(state_dict, **load_model_func_kwargs)
159
  logger.info("All model weights loaded successfully")
160
-
161
  # Optimizer states
162
  for i, opt in enumerate(optimizers):
163
  optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
@@ -165,33 +151,27 @@ def load_accelerator_state(
165
  optimizer_state = torch.load(input_optimizer_file, map_location=map_location)
166
  optimizers[i].load_state_dict(optimizer_state)
167
  logger.info("All optimizer states loaded successfully")
168
-
169
  # Scheduler states
170
  for i, scheduler in enumerate(schedulers):
171
  scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
172
  input_scheduler_file = input_dir.joinpath(scheduler_name)
173
  scheduler.load_state_dict(torch.load(input_scheduler_file))
174
  logger.info("All scheduler states loaded successfully")
175
-
176
  for i, dataloader in enumerate(dataloaders):
177
  sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
178
  input_sampler_file = input_dir.joinpath(sampler_name)
179
  # Only load if we have our custom sampler
180
  from .data_loader import IterableDatasetShard, SeedableRandomSampler
181
-
182
  if isinstance(dataloader.dataset, IterableDatasetShard):
183
  sampler = dataloader.sampler.sampler
184
-
185
  if isinstance(sampler, SeedableRandomSampler):
186
  dataloader.sampler.sampler = torch.load(input_sampler_file)
187
  logger.info("All dataloader sampler states loaded successfully")
188
-
189
  # GradScaler state
190
  if scaler is not None:
191
  input_scaler_file = input_dir.joinpath(SCALER_NAME)
192
  scaler.load_state_dict(torch.load(input_scaler_file))
193
  logger.info("GradScaler state loaded successfully")
194
-
195
  # Random states
196
  try:
197
  states = torch.load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl"))
@@ -207,8 +187,6 @@ def load_accelerator_state(
207
  logger.info("All random states loaded successfully")
208
  except Exception:
209
  logger.info("Could not load random states")
210
-
211
-
212
  def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False):
213
  """
214
  Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`
@@ -217,8 +195,6 @@ def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False
217
  save_location = Path(path) / f"custom_checkpoint_{index}.pkl"
218
  logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}")
219
  save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node)
220
-
221
-
222
  def load_custom_state(obj, path, index: int = 0):
223
  """
224
  Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`
 
1
  logger = get_logger(__name__)
 
 
2
  def save_accelerator_state(
3
  output_dir: str,
4
  model_states: List[dict],
 
12
  ):
13
  """
14
  Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.
 
15
  <Tip>
 
16
  If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native
17
  `pickle`.
 
18
  </Tip>
 
19
  Args:
20
  output_dir (`str` or `os.PathLike`):
21
  The name of the folder to save all relevant weights and states.
 
65
  output_sampler_file = output_dir.joinpath(sampler_name)
66
  # Only save if we have our custom sampler
67
  from .data_loader import IterableDatasetShard, SeedableRandomSampler
 
68
  if isinstance(dataloader.dataset, IterableDatasetShard):
69
  sampler = dataloader.sampler.sampler
 
70
  if isinstance(sampler, SeedableRandomSampler):
71
  save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
72
  logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}")
 
73
  # GradScaler state
74
  if scaler is not None:
75
  state = scaler.state_dict()
 
92
  torch.save(states, output_states_file)
93
  logger.info(f"Random states saved in {output_states_file}")
94
  return output_dir
 
 
95
  def load_accelerator_state(
96
  input_dir,
97
  models,
 
105
  ):
106
  """
107
  Loads states of the models, optimizers, scaler, and RNG generators from a given directory.
 
108
  Args:
109
  input_dir (`str` or `os.PathLike`):
110
  The name of the folder to load all relevant weights and states.
 
131
  map_location = "cpu"
132
  elif map_location == "on_device":
133
  map_location = PartialState().device
 
134
  input_dir = Path(input_dir)
135
  # Model states
136
  for i, model in enumerate(models):
 
144
  state_dict = torch.load(input_model_file, map_location=map_location)
145
  models[i].load_state_dict(state_dict, **load_model_func_kwargs)
146
  logger.info("All model weights loaded successfully")
 
147
  # Optimizer states
148
  for i, opt in enumerate(optimizers):
149
  optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
 
151
  optimizer_state = torch.load(input_optimizer_file, map_location=map_location)
152
  optimizers[i].load_state_dict(optimizer_state)
153
  logger.info("All optimizer states loaded successfully")
 
154
  # Scheduler states
155
  for i, scheduler in enumerate(schedulers):
156
  scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
157
  input_scheduler_file = input_dir.joinpath(scheduler_name)
158
  scheduler.load_state_dict(torch.load(input_scheduler_file))
159
  logger.info("All scheduler states loaded successfully")
 
160
  for i, dataloader in enumerate(dataloaders):
161
  sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
162
  input_sampler_file = input_dir.joinpath(sampler_name)
163
  # Only load if we have our custom sampler
164
  from .data_loader import IterableDatasetShard, SeedableRandomSampler
 
165
  if isinstance(dataloader.dataset, IterableDatasetShard):
166
  sampler = dataloader.sampler.sampler
 
167
  if isinstance(sampler, SeedableRandomSampler):
168
  dataloader.sampler.sampler = torch.load(input_sampler_file)
169
  logger.info("All dataloader sampler states loaded successfully")
 
170
  # GradScaler state
171
  if scaler is not None:
172
  input_scaler_file = input_dir.joinpath(SCALER_NAME)
173
  scaler.load_state_dict(torch.load(input_scaler_file))
174
  logger.info("GradScaler state loaded successfully")
 
175
  # Random states
176
  try:
177
  states = torch.load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl"))
 
187
  logger.info("All random states loaded successfully")
188
  except Exception:
189
  logger.info("Could not load random states")
 
 
190
  def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False):
191
  """
192
  Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`
 
195
  save_location = Path(path) / f"custom_checkpoint_{index}.pkl"
196
  logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}")
197
  save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node)
 
 
198
  def load_custom_state(obj, path, index: int = 0):
199
  """
200
  Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`
src/commands/accelerate_cli.py CHANGED
@@ -2,7 +2,6 @@
2
  def main():
3
  parser = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
4
  subparsers = parser.add_subparsers(help="accelerate command helpers")
5
-
6
  # Register commands
7
  get_config_parser(subparsers=subparsers)
8
  estimate_command_parser(subparsers=subparsers)
@@ -10,17 +9,12 @@ def main():
10
  launch_command_parser(subparsers=subparsers)
11
  tpu_command_parser(subparsers=subparsers)
12
  test_command_parser(subparsers=subparsers)
13
-
14
  # Let's go
15
  args = parser.parse_args()
16
-
17
  if not hasattr(args, "func"):
18
  parser.print_help()
19
  exit(1)
20
-
21
  # Run
22
  args.func(args)
23
-
24
-
25
  if __name__ == "__main__":
26
  main()
 
2
  def main():
3
  parser = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
4
  subparsers = parser.add_subparsers(help="accelerate command helpers")
 
5
  # Register commands
6
  get_config_parser(subparsers=subparsers)
7
  estimate_command_parser(subparsers=subparsers)
 
9
  launch_command_parser(subparsers=subparsers)
10
  tpu_command_parser(subparsers=subparsers)
11
  test_command_parser(subparsers=subparsers)
 
12
  # Let's go
13
  args = parser.parse_args()
 
14
  if not hasattr(args, "func"):
15
  parser.print_help()
16
  exit(1)
 
17
  # Run
18
  args.func(args)
 
 
19
  if __name__ == "__main__":
20
  main()
src/commands/config/cluster.py CHANGED
@@ -5,7 +5,6 @@ def get_cluster_input():
5
  ["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "TPU"],
6
  _convert_distributed_mode,
7
  )
8
-
9
  machine_rank = 0
10
  num_machines = 1
11
  num_processes = 1
@@ -15,7 +14,6 @@ def get_cluster_input():
15
  rdzv_backend = "static"
16
  same_network = True
17
  debug = False
18
-
19
  if distributed_type in [
20
  DistributedType.MULTI_GPU,
21
  DistributedType.MULTI_NPU,
@@ -56,7 +54,6 @@ def get_cluster_input():
56
  default=False,
57
  error_message="Please enter yes or no.",
58
  )
59
-
60
  if distributed_type == DistributedType.NO:
61
  use_cpu = _ask_field(
62
  "Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:",
@@ -68,7 +65,6 @@ def get_cluster_input():
68
  use_cpu = True
69
  else:
70
  use_cpu = False
71
-
72
  ipex_config = {}
73
  if use_cpu:
74
  ipex_config["ipex"] = _ask_field(
@@ -88,7 +84,6 @@ def get_cluster_input():
88
  default=False,
89
  error_message="Please enter yes or no.",
90
  )
91
-
92
  dynamo_config = {}
93
  use_dynamo = _ask_field(
94
  "Do you wish to optimize your script with torch dynamo?[yes/NO]:",
@@ -110,7 +105,6 @@ def get_cluster_input():
110
  default=False,
111
  error_message="Please enter yes or no.",
112
  )
113
-
114
  if use_custom_options:
115
  dynamo_config[prefix + "mode"] = _ask_options(
116
  "Which mode do you want to use?",
@@ -130,7 +124,6 @@ def get_cluster_input():
130
  default=False,
131
  error_message="Please enter yes or no.",
132
  )
133
-
134
  use_mps = not use_cpu and is_mps_available()
135
  deepspeed_config = {}
136
  if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.NO] and not use_mps:
@@ -145,7 +138,6 @@ def get_cluster_input():
145
  assert (
146
  is_deepspeed_available()
147
  ), "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source"
148
-
149
  if distributed_type == DistributedType.DEEPSPEED:
150
  use_deepspeed_config = _ask_field(
151
  "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ",
@@ -166,7 +158,6 @@ def get_cluster_input():
166
  int,
167
  default=2,
168
  )
169
-
170
  deepspeed_devices = ["none", "cpu", "nvme"]
171
  if deepspeed_config["zero_stage"] >= 2:
172
  deepspeed_config["offload_optimizer_device"] = _ask_options(
@@ -223,7 +214,6 @@ def get_cluster_input():
223
  "When `zero3_init_flag` is set, it requires Transformers to be installed. "
224
  "Please run `pip3 install transformers`."
225
  )
226
-
227
  if num_machines > 1:
228
  launcher_query = "Which Type of launcher do you want to use?"
229
  deepspeed_config["deepspeed_multinode_launcher"] = _ask_options(
@@ -231,7 +221,6 @@ def get_cluster_input():
231
  DEEPSPEED_MULTINODE_LAUNCHERS,
232
  lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)],
233
  )
234
-
235
  if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
236
  deepspeed_config["deepspeed_hostfile"] = _ask_field(
237
  "DeepSpeed configures multi-node compute resources with hostfile. "
@@ -241,7 +230,6 @@ def get_cluster_input():
241
  "Please specify the location of hostfile: ",
242
  str,
243
  )
244
-
245
  is_exclusion_filter = _ask_field(
246
  "Do you want to specify exclusion filter string? [yes/NO]: ",
247
  _convert_yes_no_to_bool,
@@ -253,7 +241,6 @@ def get_cluster_input():
253
  "DeepSpeed exclusion filter string: ",
254
  str,
255
  )
256
-
257
  is_inclusion_filter = _ask_field(
258
  "Do you want to specify inclusion filter string? [yes/NO]: ",
259
  _convert_yes_no_to_bool,
@@ -265,7 +252,6 @@ def get_cluster_input():
265
  "DeepSpeed inclusion filter string: ",
266
  str,
267
  )
268
-
269
  fsdp_config = {}
270
  if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU]:
271
  use_fsdp = _ask_field(
@@ -354,7 +340,6 @@ def get_cluster_input():
354
  default=True,
355
  error_message="Please enter yes or no.",
356
  )
357
-
358
  megatron_lm_config = {}
359
  if distributed_type in [DistributedType.MULTI_GPU]:
360
  use_megatron_lm = _ask_field(
@@ -380,7 +365,6 @@ def get_cluster_input():
380
  default=True,
381
  error_message="Please enter yes or no.",
382
  )
383
-
384
  megatron_lm_config[prefix + "pp_degree"] = _ask_field(
385
  "What is the Pipeline Parallelism degree/size? [1]:",
386
  int,
@@ -394,14 +378,12 @@ def get_cluster_input():
394
  default=1,
395
  error_message="Please enter an integer.",
396
  )
397
-
398
  megatron_lm_config[prefix + "recompute_activations"] = _ask_field(
399
  "Do you want to enable selective activation recomputation? [YES/no]: ",
400
  _convert_yes_no_to_bool,
401
  default=True,
402
  error_message="Please enter yes or no.",
403
  )
404
-
405
  megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field(
406
  "Do you want to use distributed optimizer "
407
  "which shards optimizer state and gradients across data parallel ranks? [YES/no]: ",
@@ -409,7 +391,6 @@ def get_cluster_input():
409
  default=True,
410
  error_message="Please enter yes or no.",
411
  )
412
-
413
  megatron_lm_config[prefix + "gradient_clipping"] = _ask_field(
414
  "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ",
415
  float,
@@ -425,7 +406,6 @@ def get_cluster_input():
425
  tpu_zone = None
426
  tpu_use_sudo = False
427
  tpu_use_cluster = False
428
-
429
  if distributed_type in [
430
  DistributedType.MULTI_CPU,
431
  DistributedType.MULTI_XPU,
@@ -453,12 +433,10 @@ def get_cluster_input():
453
  )
454
  else:
455
  num_processes = 1
456
-
457
  if (distributed_type == DistributedType.MULTI_GPU) and (num_machines == 1) and (num_processes == 1):
458
  raise ValueError(
459
  f"Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using."
460
  )
461
-
462
  if (
463
  distributed_type
464
  in [
@@ -478,7 +456,6 @@ def get_cluster_input():
478
  f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:",
479
  default="all",
480
  )
481
-
482
  if distributed_type == DistributedType.TPU:
483
  mixed_precision = "no"
484
  main_training_function = _ask_field(
@@ -553,7 +530,6 @@ def get_cluster_input():
553
  "What environment variables do you wish to set in each pod, seperated by a comma: ",
554
  default="",
555
  ).split(",")
556
-
557
  else:
558
  main_training_function = "main"
559
  if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:
@@ -564,17 +540,14 @@ def get_cluster_input():
564
  ["no", "fp16", "bf16", "fp8"],
565
  _convert_mixed_precision,
566
  )
567
-
568
  if use_dynamo and mixed_precision == "no" and not use_cpu:
569
  print(
570
  "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
571
  )
572
-
573
  if distributed_type == DistributedType.TPU and mixed_precision == "bf16":
574
  tpu_downcast_bf16 = _ask_field(
575
  "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no"
576
  )
577
-
578
  return ClusterConfig(
579
  compute_environment=ComputeEnvironment.LOCAL_MACHINE,
580
  distributed_type=distributed_type,
 
5
  ["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "TPU"],
6
  _convert_distributed_mode,
7
  )
 
8
  machine_rank = 0
9
  num_machines = 1
10
  num_processes = 1
 
14
  rdzv_backend = "static"
15
  same_network = True
16
  debug = False
 
17
  if distributed_type in [
18
  DistributedType.MULTI_GPU,
19
  DistributedType.MULTI_NPU,
 
54
  default=False,
55
  error_message="Please enter yes or no.",
56
  )
 
57
  if distributed_type == DistributedType.NO:
58
  use_cpu = _ask_field(
59
  "Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:",
 
65
  use_cpu = True
66
  else:
67
  use_cpu = False
 
68
  ipex_config = {}
69
  if use_cpu:
70
  ipex_config["ipex"] = _ask_field(
 
84
  default=False,
85
  error_message="Please enter yes or no.",
86
  )
 
87
  dynamo_config = {}
88
  use_dynamo = _ask_field(
89
  "Do you wish to optimize your script with torch dynamo?[yes/NO]:",
 
105
  default=False,
106
  error_message="Please enter yes or no.",
107
  )
 
108
  if use_custom_options:
109
  dynamo_config[prefix + "mode"] = _ask_options(
110
  "Which mode do you want to use?",
 
124
  default=False,
125
  error_message="Please enter yes or no.",
126
  )
 
127
  use_mps = not use_cpu and is_mps_available()
128
  deepspeed_config = {}
129
  if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.NO] and not use_mps:
 
138
  assert (
139
  is_deepspeed_available()
140
  ), "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source"
 
141
  if distributed_type == DistributedType.DEEPSPEED:
142
  use_deepspeed_config = _ask_field(
143
  "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ",
 
158
  int,
159
  default=2,
160
  )
 
161
  deepspeed_devices = ["none", "cpu", "nvme"]
162
  if deepspeed_config["zero_stage"] >= 2:
163
  deepspeed_config["offload_optimizer_device"] = _ask_options(
 
214
  "When `zero3_init_flag` is set, it requires Transformers to be installed. "
215
  "Please run `pip3 install transformers`."
216
  )
 
217
  if num_machines > 1:
218
  launcher_query = "Which Type of launcher do you want to use?"
219
  deepspeed_config["deepspeed_multinode_launcher"] = _ask_options(
 
221
  DEEPSPEED_MULTINODE_LAUNCHERS,
222
  lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)],
223
  )
 
224
  if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
225
  deepspeed_config["deepspeed_hostfile"] = _ask_field(
226
  "DeepSpeed configures multi-node compute resources with hostfile. "
 
230
  "Please specify the location of hostfile: ",
231
  str,
232
  )
 
233
  is_exclusion_filter = _ask_field(
234
  "Do you want to specify exclusion filter string? [yes/NO]: ",
235
  _convert_yes_no_to_bool,
 
241
  "DeepSpeed exclusion filter string: ",
242
  str,
243
  )
 
244
  is_inclusion_filter = _ask_field(
245
  "Do you want to specify inclusion filter string? [yes/NO]: ",
246
  _convert_yes_no_to_bool,
 
252
  "DeepSpeed inclusion filter string: ",
253
  str,
254
  )
 
255
  fsdp_config = {}
256
  if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU]:
257
  use_fsdp = _ask_field(
 
340
  default=True,
341
  error_message="Please enter yes or no.",
342
  )
 
343
  megatron_lm_config = {}
344
  if distributed_type in [DistributedType.MULTI_GPU]:
345
  use_megatron_lm = _ask_field(
 
365
  default=True,
366
  error_message="Please enter yes or no.",
367
  )
 
368
  megatron_lm_config[prefix + "pp_degree"] = _ask_field(
369
  "What is the Pipeline Parallelism degree/size? [1]:",
370
  int,
 
378
  default=1,
379
  error_message="Please enter an integer.",
380
  )
 
381
  megatron_lm_config[prefix + "recompute_activations"] = _ask_field(
382
  "Do you want to enable selective activation recomputation? [YES/no]: ",
383
  _convert_yes_no_to_bool,
384
  default=True,
385
  error_message="Please enter yes or no.",
386
  )
 
387
  megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field(
388
  "Do you want to use distributed optimizer "
389
  "which shards optimizer state and gradients across data parallel ranks? [YES/no]: ",
 
391
  default=True,
392
  error_message="Please enter yes or no.",
393
  )
 
394
  megatron_lm_config[prefix + "gradient_clipping"] = _ask_field(
395
  "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ",
396
  float,
 
406
  tpu_zone = None
407
  tpu_use_sudo = False
408
  tpu_use_cluster = False
 
409
  if distributed_type in [
410
  DistributedType.MULTI_CPU,
411
  DistributedType.MULTI_XPU,
 
433
  )
434
  else:
435
  num_processes = 1
 
436
  if (distributed_type == DistributedType.MULTI_GPU) and (num_machines == 1) and (num_processes == 1):
437
  raise ValueError(
438
  f"Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using."
439
  )
 
440
  if (
441
  distributed_type
442
  in [
 
456
  f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:",
457
  default="all",
458
  )
 
459
  if distributed_type == DistributedType.TPU:
460
  mixed_precision = "no"
461
  main_training_function = _ask_field(
 
530
  "What environment variables do you wish to set in each pod, seperated by a comma: ",
531
  default="",
532
  ).split(",")
 
533
  else:
534
  main_training_function = "main"
535
  if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:
 
540
  ["no", "fp16", "bf16", "fp8"],
541
  _convert_mixed_precision,
542
  )
 
543
  if use_dynamo and mixed_precision == "no" and not use_cpu:
544
  print(
545
  "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
546
  )
 
547
  if distributed_type == DistributedType.TPU and mixed_precision == "bf16":
548
  tpu_downcast_bf16 = _ask_field(
549
  "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no"
550
  )
 
551
  return ClusterConfig(
552
  compute_environment=ComputeEnvironment.LOCAL_MACHINE,
553
  distributed_type=distributed_type,
src/commands/config/config.py CHANGED
@@ -1,7 +1,5 @@
1
  #!/usr/bin/env python
2
  description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
3
-
4
-
5
  def get_user_input():
6
  compute_environment = _ask_options(
7
  "In which compute environment are you running?",
@@ -13,14 +11,11 @@ def get_user_input():
13
  else:
14
  config = get_cluster_input()
15
  return config
16
-
17
-
18
  def config_command_parser(subparsers=None):
19
  if subparsers is not None:
20
  parser = subparsers.add_parser("config", description=description)
21
  else:
22
  parser = argparse.ArgumentParser("Accelerate config command", description=description)
23
-
24
  parser.add_argument(
25
  "--config_file",
26
  default=None,
@@ -31,12 +26,9 @@ def config_command_parser(subparsers=None):
31
  "with 'huggingface'."
32
  ),
33
  )
34
-
35
  if subparsers is not None:
36
  parser.set_defaults(func=config_command)
37
  return parser
38
-
39
-
40
  def config_command(args):
41
  config = get_user_input()
42
  if args.config_file is not None:
@@ -45,19 +37,14 @@ def config_command(args):
45
  if not os.path.isdir(cache_dir):
46
  os.makedirs(cache_dir)
47
  config_file = default_yaml_config_file
48
-
49
  if config_file.endswith(".json"):
50
  config.to_json_file(config_file)
51
  else:
52
  config.to_yaml_file(config_file)
53
  print(f"accelerate configuration saved at {config_file}")
54
-
55
-
56
  def main():
57
  parser = config_command_parser()
58
  args = parser.parse_args()
59
  config_command(args)
60
-
61
-
62
  if __name__ == "__main__":
63
  main()
 
1
  #!/usr/bin/env python
2
  description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
 
 
3
  def get_user_input():
4
  compute_environment = _ask_options(
5
  "In which compute environment are you running?",
 
11
  else:
12
  config = get_cluster_input()
13
  return config
 
 
14
  def config_command_parser(subparsers=None):
15
  if subparsers is not None:
16
  parser = subparsers.add_parser("config", description=description)
17
  else:
18
  parser = argparse.ArgumentParser("Accelerate config command", description=description)
 
19
  parser.add_argument(
20
  "--config_file",
21
  default=None,
 
26
  "with 'huggingface'."
27
  ),
28
  )
 
29
  if subparsers is not None:
30
  parser.set_defaults(func=config_command)
31
  return parser
 
 
32
  def config_command(args):
33
  config = get_user_input()
34
  if args.config_file is not None:
 
37
  if not os.path.isdir(cache_dir):
38
  os.makedirs(cache_dir)
39
  config_file = default_yaml_config_file
 
40
  if config_file.endswith(".json"):
41
  config.to_json_file(config_file)
42
  else:
43
  config.to_yaml_file(config_file)
44
  print(f"accelerate configuration saved at {config_file}")
 
 
45
  def main():
46
  parser = config_command_parser()
47
  args = parser.parse_args()
48
  config_command(args)
 
 
49
  if __name__ == "__main__":
50
  main()
src/commands/config/config_args.py CHANGED
@@ -5,14 +5,11 @@ hf_cache_home = os.path.expanduser(
5
  cache_dir = os.path.join(hf_cache_home, "accelerate")
6
  default_json_config_file = os.path.join(cache_dir, "default_config.yaml")
7
  default_yaml_config_file = os.path.join(cache_dir, "default_config.yaml")
8
-
9
  # For backward compatibility: the default config is the json one if it's the only existing file.
10
  if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):
11
  default_config_file = default_yaml_config_file
12
  else:
13
  default_config_file = default_json_config_file
14
-
15
-
16
  def load_config_from_file(config_file):
17
  if config_file is not None:
18
  if not os.path.isfile(config_file):
@@ -43,8 +40,6 @@ def load_config_from_file(config_file):
43
  else:
44
  config_class = SageMakerConfig
45
  return config_class.from_yaml_file(yaml_file=config_file)
46
-
47
-
48
  @dataclass
49
  class BaseConfig:
50
  compute_environment: ComputeEnvironment
@@ -52,7 +47,6 @@ class BaseConfig:
52
  mixed_precision: str
53
  use_cpu: bool
54
  debug: bool
55
-
56
  def to_dict(self):
57
  result = self.__dict__
58
  # For serialization, it's best to convert Enums to strings (or their underlying value type).
@@ -63,7 +57,6 @@ class BaseConfig:
63
  result[key] = None
64
  result = {k: v for k, v in result.items() if v is not None}
65
  return result
66
-
67
  @classmethod
68
  def from_json_file(cls, json_file=None):
69
  json_file = default_json_config_file if json_file is None else json_file
@@ -88,14 +81,11 @@ class BaseConfig:
88
  f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
89
  " version or fix (and potentially remove) these keys from your config file."
90
  )
91
-
92
  return cls(**config_dict)
93
-
94
  def to_json_file(self, json_file):
95
  with open(json_file, "w", encoding="utf-8") as f:
96
  content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
97
  f.write(content)
98
-
99
  @classmethod
100
  def from_yaml_file(cls, yaml_file=None):
101
  yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
@@ -123,11 +113,9 @@ class BaseConfig:
123
  " version or fix (and potentially remove) these keys from your config file."
124
  )
125
  return cls(**config_dict)
126
-
127
  def to_yaml_file(self, yaml_file):
128
  with open(yaml_file, "w", encoding="utf-8") as f:
129
  yaml.safe_dump(self.to_dict(), f)
130
-
131
  def __post_init__(self):
132
  if isinstance(self.compute_environment, str):
133
  self.compute_environment = ComputeEnvironment(self.compute_environment)
@@ -138,8 +126,6 @@ class BaseConfig:
138
  self.distributed_type = DistributedType(self.distributed_type)
139
  if self.dynamo_config is None:
140
  self.dynamo_config = {}
141
-
142
-
143
  @dataclass
144
  class ClusterConfig(BaseConfig):
145
  num_processes: int
@@ -151,7 +137,6 @@ class ClusterConfig(BaseConfig):
151
  rdzv_backend: Optional[str] = "static"
152
  same_network: Optional[bool] = False
153
  main_training_function: str = "main"
154
-
155
  # args for deepspeed_plugin
156
  deepspeed_config: dict = None
157
  # args for fsdp
@@ -162,7 +147,6 @@ class ClusterConfig(BaseConfig):
162
  ipex_config: dict = None
163
  # args for TPU
164
  downcast_bf16: bool = False
165
-
166
  # args for TPU pods
167
  tpu_name: str = None
168
  tpu_zone: str = None
@@ -172,10 +156,8 @@ class ClusterConfig(BaseConfig):
172
  commands: List[str] = None
173
  tpu_vm: List[str] = None
174
  tpu_env: List[str] = None
175
-
176
  # args for dynamo
177
  dynamo_config: dict = None
178
-
179
  def __post_init__(self):
180
  if self.deepspeed_config is None:
181
  self.deepspeed_config = {}
@@ -186,8 +168,6 @@ class ClusterConfig(BaseConfig):
186
  if self.ipex_config is None:
187
  self.ipex_config = {}
188
  return super().__post_init__()
189
-
190
-
191
  @dataclass
192
  class SageMakerConfig(BaseConfig):
193
  ec2_instance_type: str
 
5
  cache_dir = os.path.join(hf_cache_home, "accelerate")
6
  default_json_config_file = os.path.join(cache_dir, "default_config.yaml")
7
  default_yaml_config_file = os.path.join(cache_dir, "default_config.yaml")
 
8
  # For backward compatibility: the default config is the json one if it's the only existing file.
9
  if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):
10
  default_config_file = default_yaml_config_file
11
  else:
12
  default_config_file = default_json_config_file
 
 
13
  def load_config_from_file(config_file):
14
  if config_file is not None:
15
  if not os.path.isfile(config_file):
 
40
  else:
41
  config_class = SageMakerConfig
42
  return config_class.from_yaml_file(yaml_file=config_file)
 
 
43
  @dataclass
44
  class BaseConfig:
45
  compute_environment: ComputeEnvironment
 
47
  mixed_precision: str
48
  use_cpu: bool
49
  debug: bool
 
50
  def to_dict(self):
51
  result = self.__dict__
52
  # For serialization, it's best to convert Enums to strings (or their underlying value type).
 
57
  result[key] = None
58
  result = {k: v for k, v in result.items() if v is not None}
59
  return result
 
60
  @classmethod
61
  def from_json_file(cls, json_file=None):
62
  json_file = default_json_config_file if json_file is None else json_file
 
81
  f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
82
  " version or fix (and potentially remove) these keys from your config file."
83
  )
 
84
  return cls(**config_dict)
 
85
  def to_json_file(self, json_file):
86
  with open(json_file, "w", encoding="utf-8") as f:
87
  content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
88
  f.write(content)
 
89
  @classmethod
90
  def from_yaml_file(cls, yaml_file=None):
91
  yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
 
113
  " version or fix (and potentially remove) these keys from your config file."
114
  )
115
  return cls(**config_dict)
 
116
  def to_yaml_file(self, yaml_file):
117
  with open(yaml_file, "w", encoding="utf-8") as f:
118
  yaml.safe_dump(self.to_dict(), f)
 
119
  def __post_init__(self):
120
  if isinstance(self.compute_environment, str):
121
  self.compute_environment = ComputeEnvironment(self.compute_environment)
 
126
  self.distributed_type = DistributedType(self.distributed_type)
127
  if self.dynamo_config is None:
128
  self.dynamo_config = {}
 
 
129
  @dataclass
130
  class ClusterConfig(BaseConfig):
131
  num_processes: int
 
137
  rdzv_backend: Optional[str] = "static"
138
  same_network: Optional[bool] = False
139
  main_training_function: str = "main"
 
140
  # args for deepspeed_plugin
141
  deepspeed_config: dict = None
142
  # args for fsdp
 
147
  ipex_config: dict = None
148
  # args for TPU
149
  downcast_bf16: bool = False
 
150
  # args for TPU pods
151
  tpu_name: str = None
152
  tpu_zone: str = None
 
156
  commands: List[str] = None
157
  tpu_vm: List[str] = None
158
  tpu_env: List[str] = None
 
159
  # args for dynamo
160
  dynamo_config: dict = None
 
161
  def __post_init__(self):
162
  if self.deepspeed_config is None:
163
  self.deepspeed_config = {}
 
168
  if self.ipex_config is None:
169
  self.ipex_config = {}
170
  return super().__post_init__()
 
 
171
  @dataclass
172
  class SageMakerConfig(BaseConfig):
173
  ec2_instance_type: str
src/commands/config/config_utils.py CHANGED
@@ -13,8 +13,6 @@ DYNAMO_BACKENDS = [
13
  "IPEX",
14
  "TVM",
15
  ]
16
-
17
-
18
  def _ask_field(input_text, convert_value=None, default=None, error_message=None):
19
  ask_again = True
20
  while ask_again:
@@ -26,43 +24,27 @@ def _ask_field(input_text, convert_value=None, default=None, error_message=None)
26
  except Exception:
27
  if error_message is not None:
28
  print(error_message)
29
-
30
-
31
  def _ask_options(input_text, options=[], convert_value=None, default=0):
32
  menu = BulletMenu(input_text, options)
33
  result = menu.run(default_choice=default)
34
  return convert_value(result) if convert_value is not None else result
35
-
36
-
37
  def _convert_compute_environment(value):
38
  value = int(value)
39
  return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value])
40
-
41
-
42
  def _convert_distributed_mode(value):
43
  value = int(value)
44
  return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value])
45
-
46
-
47
  def _convert_dynamo_backend(value):
48
  value = int(value)
49
  return DynamoBackend(DYNAMO_BACKENDS[value]).value
50
-
51
-
52
  def _convert_mixed_precision(value):
53
  value = int(value)
54
  return PrecisionType(["no", "fp16", "bf16", "fp8"][value])
55
-
56
-
57
  def _convert_sagemaker_distributed_mode(value):
58
  value = int(value)
59
  return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value])
60
-
61
-
62
  def _convert_yes_no_to_bool(value):
63
  return {"yes": True, "no": False}[value.lower()]
64
-
65
-
66
  class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
67
  """
68
  A custom formatter that will remove the usage line from the help message for subcommands.
 
13
  "IPEX",
14
  "TVM",
15
  ]
 
 
16
  def _ask_field(input_text, convert_value=None, default=None, error_message=None):
17
  ask_again = True
18
  while ask_again:
 
24
  except Exception:
25
  if error_message is not None:
26
  print(error_message)
 
 
27
  def _ask_options(input_text, options=[], convert_value=None, default=0):
28
  menu = BulletMenu(input_text, options)
29
  result = menu.run(default_choice=default)
30
  return convert_value(result) if convert_value is not None else result
 
 
31
  def _convert_compute_environment(value):
32
  value = int(value)
33
  return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value])
 
 
34
  def _convert_distributed_mode(value):
35
  value = int(value)
36
  return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value])
 
 
37
  def _convert_dynamo_backend(value):
38
  value = int(value)
39
  return DynamoBackend(DYNAMO_BACKENDS[value]).value
 
 
40
  def _convert_mixed_precision(value):
41
  value = int(value)
42
  return PrecisionType(["no", "fp16", "bf16", "fp8"][value])
 
 
43
  def _convert_sagemaker_distributed_mode(value):
44
  value = int(value)
45
  return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value])
 
 
46
  def _convert_yes_no_to_bool(value):
47
  return {"yes": True, "no": False}[value.lower()]
 
 
48
  class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
49
  """
50
  A custom formatter that will remove the usage line from the help message for subcommands.
src/commands/config/default.py CHANGED
@@ -1,12 +1,9 @@
1
  #!/usr/bin/env python
2
  description = "Create a default config file for Accelerate with only a few flags set."
3
-
4
-
5
  def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file, use_xpu: bool = False):
6
  """
7
  Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also
8
  set CPU if it is a CPU-only machine.
9
-
10
  Args:
11
  mixed_precision (`str`, *optional*, defaults to "no"):
12
  Mixed Precision to use. Should be one of "no", "fp16", or "bf16"
@@ -66,8 +63,6 @@ def write_basic_config(mixed_precision="no", save_location: str = default_json_c
66
  config = ClusterConfig(**config)
67
  config.to_json_file(path)
68
  return path
69
-
70
-
71
  def default_command_parser(parser, parents):
72
  parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
73
  parser.add_argument(
@@ -81,7 +76,6 @@ def default_command_parser(parser, parents):
81
  ),
82
  dest="save_location",
83
  )
84
-
85
  parser.add_argument(
86
  "--mixed_precision",
87
  choices=["no", "fp16", "bf16"],
@@ -93,8 +87,6 @@ def default_command_parser(parser, parents):
93
  )
94
  parser.set_defaults(func=default_config_command)
95
  return parser
96
-
97
-
98
  def default_config_command(args):
99
  config_file = write_basic_config(args.mixed_precision, args.save_location)
100
  if config_file:
 
1
  #!/usr/bin/env python
2
  description = "Create a default config file for Accelerate with only a few flags set."
 
 
3
  def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file, use_xpu: bool = False):
4
  """
5
  Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also
6
  set CPU if it is a CPU-only machine.
 
7
  Args:
8
  mixed_precision (`str`, *optional*, defaults to "no"):
9
  Mixed Precision to use. Should be one of "no", "fp16", or "bf16"
 
63
  config = ClusterConfig(**config)
64
  config.to_json_file(path)
65
  return path
 
 
66
  def default_command_parser(parser, parents):
67
  parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
68
  parser.add_argument(
 
76
  ),
77
  dest="save_location",
78
  )
 
79
  parser.add_argument(
80
  "--mixed_precision",
81
  choices=["no", "fp16", "bf16"],
 
87
  )
88
  parser.set_defaults(func=default_config_command)
89
  return parser
 
 
90
  def default_config_command(args):
91
  config_file = write_basic_config(args.mixed_precision, args.save_location)
92
  if config_file:
src/commands/config/sagemaker.py CHANGED
@@ -1,7 +1,6 @@
1
  #!/usr/bin/env python
2
  def _create_iam_role_for_sagemaker(role_name):
3
  iam_client = boto3.client("iam")
4
-
5
  sagemaker_trust_policy = {
6
  "Version": "2012-10-17",
7
  "Statement": [
@@ -51,13 +50,9 @@ def _create_iam_role_for_sagemaker(role_name):
51
  )
52
  except iam_client.exceptions.EntityAlreadyExistsException:
53
  print(f"role {role_name} already exists. Using existing one")
54
-
55
-
56
  def _get_iam_role_arn(role_name):
57
  iam_client = boto3.client("iam")
58
  return iam_client.get_role(RoleName=role_name)["Role"]["Arn"]
59
-
60
-
61
  def get_sagemaker_input():
62
  credentials_configuration = _ask_options(
63
  "How do you want to authorize?",
@@ -75,13 +70,10 @@ def get_sagemaker_input():
75
  )
76
  aws_access_key_id = _ask_field("AWS Access Key ID: ")
77
  os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id
78
-
79
  aws_secret_access_key = _ask_field("AWS Secret Access Key: ")
80
  os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key
81
-
82
  aws_region = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1")
83
  os.environ["AWS_DEFAULT_REGION"] = aws_region
84
-
85
  role_management = _ask_options(
86
  "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?",
87
  ["Provide IAM Role name", "Create new IAM role using credentials"],
@@ -93,7 +85,6 @@ def get_sagemaker_input():
93
  iam_role_name = "accelerate_sagemaker_execution_role"
94
  print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials')
95
  _create_iam_role_for_sagemaker(iam_role_name)
96
-
97
  is_custom_docker_image = _ask_field(
98
  "Do you want to use custom Docker image? [yes/NO]: ",
99
  _convert_yes_no_to_bool,
@@ -103,7 +94,6 @@ def get_sagemaker_input():
103
  docker_image = None
104
  if is_custom_docker_image:
105
  docker_image = _ask_field("Enter your Docker image: ", lambda x: str(x).lower())
106
-
107
  is_sagemaker_inputs_enabled = _ask_field(
108
  "Do you want to provide SageMaker input channels with data locations? [yes/NO]: ",
109
  _convert_yes_no_to_bool,
@@ -116,7 +106,6 @@ def get_sagemaker_input():
116
  "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ",
117
  lambda x: str(x).lower(),
118
  )
119
-
120
  is_sagemaker_metrics_enabled = _ask_field(
121
  "Do you want to enable SageMaker metrics? [yes/NO]: ",
122
  _convert_yes_no_to_bool,
@@ -129,7 +118,6 @@ def get_sagemaker_input():
129
  "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ",
130
  lambda x: str(x).lower(),
131
  )
132
-
133
  distributed_type = _ask_options(
134
  "What is the distributed mode?",
135
  ["No distributed training", "Data parallelism"],
@@ -156,7 +144,6 @@ def get_sagemaker_input():
156
  default=False,
157
  error_message="Please enter yes or no.",
158
  )
159
-
160
  if use_custom_options:
161
  dynamo_config[prefix + "mode"] = _ask_options(
162
  "Which mode do you want to use?",
@@ -184,7 +171,6 @@ def get_sagemaker_input():
184
  else:
185
  ec2_instance_query += "? [ml.p3.2xlarge]:"
186
  ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge")
187
-
188
  debug = False
189
  if distributed_type != SageMakerDistributedType.NO:
190
  debug = _ask_field(
@@ -193,7 +179,6 @@ def get_sagemaker_input():
193
  default=False,
194
  error_message="Please enter yes or no.",
195
  )
196
-
197
  num_machines = 1
198
  if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
199
  num_machines = _ask_field(
@@ -201,18 +186,15 @@ def get_sagemaker_input():
201
  int,
202
  default=1,
203
  )
204
-
205
  mixed_precision = _ask_options(
206
  "Do you wish to use FP16 or BF16 (mixed precision)?",
207
  ["no", "fp16", "bf16", "fp8"],
208
  _convert_mixed_precision,
209
  )
210
-
211
  if use_dynamo and mixed_precision == "no":
212
  print(
213
  "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
214
  )
215
-
216
  return SageMakerConfig(
217
  image_uri=docker_image,
218
  compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER,
 
1
  #!/usr/bin/env python
2
  def _create_iam_role_for_sagemaker(role_name):
3
  iam_client = boto3.client("iam")
 
4
  sagemaker_trust_policy = {
5
  "Version": "2012-10-17",
6
  "Statement": [
 
50
  )
51
  except iam_client.exceptions.EntityAlreadyExistsException:
52
  print(f"role {role_name} already exists. Using existing one")
 
 
53
  def _get_iam_role_arn(role_name):
54
  iam_client = boto3.client("iam")
55
  return iam_client.get_role(RoleName=role_name)["Role"]["Arn"]
 
 
56
  def get_sagemaker_input():
57
  credentials_configuration = _ask_options(
58
  "How do you want to authorize?",
 
70
  )
71
  aws_access_key_id = _ask_field("AWS Access Key ID: ")
72
  os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id
 
73
  aws_secret_access_key = _ask_field("AWS Secret Access Key: ")
74
  os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key
 
75
  aws_region = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1")
76
  os.environ["AWS_DEFAULT_REGION"] = aws_region
 
77
  role_management = _ask_options(
78
  "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?",
79
  ["Provide IAM Role name", "Create new IAM role using credentials"],
 
85
  iam_role_name = "accelerate_sagemaker_execution_role"
86
  print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials')
87
  _create_iam_role_for_sagemaker(iam_role_name)
 
88
  is_custom_docker_image = _ask_field(
89
  "Do you want to use custom Docker image? [yes/NO]: ",
90
  _convert_yes_no_to_bool,
 
94
  docker_image = None
95
  if is_custom_docker_image:
96
  docker_image = _ask_field("Enter your Docker image: ", lambda x: str(x).lower())
 
97
  is_sagemaker_inputs_enabled = _ask_field(
98
  "Do you want to provide SageMaker input channels with data locations? [yes/NO]: ",
99
  _convert_yes_no_to_bool,
 
106
  "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ",
107
  lambda x: str(x).lower(),
108
  )
 
109
  is_sagemaker_metrics_enabled = _ask_field(
110
  "Do you want to enable SageMaker metrics? [yes/NO]: ",
111
  _convert_yes_no_to_bool,
 
118
  "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ",
119
  lambda x: str(x).lower(),
120
  )
 
121
  distributed_type = _ask_options(
122
  "What is the distributed mode?",
123
  ["No distributed training", "Data parallelism"],
 
144
  default=False,
145
  error_message="Please enter yes or no.",
146
  )
 
147
  if use_custom_options:
148
  dynamo_config[prefix + "mode"] = _ask_options(
149
  "Which mode do you want to use?",
 
171
  else:
172
  ec2_instance_query += "? [ml.p3.2xlarge]:"
173
  ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge")
 
174
  debug = False
175
  if distributed_type != SageMakerDistributedType.NO:
176
  debug = _ask_field(
 
179
  default=False,
180
  error_message="Please enter yes or no.",
181
  )
 
182
  num_machines = 1
183
  if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
184
  num_machines = _ask_field(
 
186
  int,
187
  default=1,
188
  )
 
189
  mixed_precision = _ask_options(
190
  "Do you wish to use FP16 or BF16 (mixed precision)?",
191
  ["no", "fp16", "bf16", "fp8"],
192
  _convert_mixed_precision,
193
  )
 
194
  if use_dynamo and mixed_precision == "no":
195
  print(
196
  "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
197
  )
 
198
  return SageMakerConfig(
199
  image_uri=docker_image,
200
  compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER,
src/commands/config/update.py CHANGED
@@ -1,7 +1,5 @@
1
  #!/usr/bin/env python
2
  description = "Update an existing config file with the latest defaults while maintaining the old configuration."
3
-
4
-
5
  def update_config(args):
6
  """
7
  Update an existing config file with the latest defaults while maintaining the old configuration.
@@ -12,14 +10,11 @@ def update_config(args):
12
  elif not Path(config_file).exists():
13
  raise ValueError(f"The passed config file located at {config_file} doesn't exist.")
14
  config = load_config_from_file(config_file)
15
-
16
  if config_file.endswith(".json"):
17
  config.to_json_file(config_file)
18
  else:
19
  config.to_yaml_file(config_file)
20
  return config_file
21
-
22
-
23
  def update_command_parser(parser, parents):
24
  parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
25
  parser.add_argument(
@@ -32,11 +27,8 @@ def update_command_parser(parser, parents):
32
  "with 'huggingface'."
33
  ),
34
  )
35
-
36
  parser.set_defaults(func=update_config_command)
37
  return parser
38
-
39
-
40
  def update_config_command(args):
41
  config_file = update_config(args)
42
  print(f"Sucessfully updated the configuration file at {config_file}.")
 
1
  #!/usr/bin/env python
2
  description = "Update an existing config file with the latest defaults while maintaining the old configuration."
 
 
3
  def update_config(args):
4
  """
5
  Update an existing config file with the latest defaults while maintaining the old configuration.
 
10
  elif not Path(config_file).exists():
11
  raise ValueError(f"The passed config file located at {config_file} doesn't exist.")
12
  config = load_config_from_file(config_file)
 
13
  if config_file.endswith(".json"):
14
  config.to_json_file(config_file)
15
  else:
16
  config.to_yaml_file(config_file)
17
  return config_file
 
 
18
  def update_command_parser(parser, parents):
19
  parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
20
  parser.add_argument(
 
27
  "with 'huggingface'."
28
  ),
29
  )
 
30
  parser.set_defaults(func=update_config_command)
31
  return parser
 
 
32
  def update_config_command(args):
33
  config_file = update_config(args)
34
  print(f"Sucessfully updated the configuration file at {config_file}.")
src/commands/env.py CHANGED
@@ -4,27 +4,21 @@ def env_command_parser(subparsers=None):
4
  parser = subparsers.add_parser("env")
5
  else:
6
  parser = argparse.ArgumentParser("Accelerate env command")
7
-
8
  parser.add_argument(
9
  "--config_file", default=None, help="The config file to use for the default values in the launching script."
10
  )
11
-
12
  if subparsers is not None:
13
  parser.set_defaults(func=env_command)
14
  return parser
15
-
16
-
17
  def env_command(args):
18
  pt_version = torch.__version__
19
  pt_cuda_available = torch.cuda.is_available()
20
  pt_xpu_available = is_xpu_available()
21
  pt_npu_available = is_npu_available()
22
-
23
  accelerate_config = "Not found"
24
  # Get the default from the config file.
25
  if args.config_file is not None or os.path.isfile(default_config_file):
26
  accelerate_config = load_config_from_file(args.config_file).to_dict()
27
-
28
  info = {
29
  "`Accelerate` version": version,
30
  "Platform": platform.platform(),
@@ -37,10 +31,8 @@ def env_command(args):
37
  }
38
  if pt_cuda_available:
39
  info["GPU type"] = torch.cuda.get_device_name()
40
-
41
  print("\nCopy-and-paste the text below in your GitHub issue\n")
42
  print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]))
43
-
44
  print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:")
45
  accelerate_config_str = (
46
  "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
@@ -48,18 +40,12 @@ def env_command(args):
48
  else f"\t{accelerate_config}"
49
  )
50
  print(accelerate_config_str)
51
-
52
  info["`Accelerate` configs"] = accelerate_config
53
-
54
  return info
55
-
56
-
57
  def main() -> int:
58
  parser = env_command_parser()
59
  args = parser.parse_args()
60
  env_command(args)
61
  return 0
62
-
63
-
64
  if __name__ == "__main__":
65
  raise SystemExit(main())
 
4
  parser = subparsers.add_parser("env")
5
  else:
6
  parser = argparse.ArgumentParser("Accelerate env command")
 
7
  parser.add_argument(
8
  "--config_file", default=None, help="The config file to use for the default values in the launching script."
9
  )
 
10
  if subparsers is not None:
11
  parser.set_defaults(func=env_command)
12
  return parser
 
 
13
  def env_command(args):
14
  pt_version = torch.__version__
15
  pt_cuda_available = torch.cuda.is_available()
16
  pt_xpu_available = is_xpu_available()
17
  pt_npu_available = is_npu_available()
 
18
  accelerate_config = "Not found"
19
  # Get the default from the config file.
20
  if args.config_file is not None or os.path.isfile(default_config_file):
21
  accelerate_config = load_config_from_file(args.config_file).to_dict()
 
22
  info = {
23
  "`Accelerate` version": version,
24
  "Platform": platform.platform(),
 
31
  }
32
  if pt_cuda_available:
33
  info["GPU type"] = torch.cuda.get_device_name()
 
34
  print("\nCopy-and-paste the text below in your GitHub issue\n")
35
  print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]))
 
36
  print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:")
37
  accelerate_config_str = (
38
  "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
 
40
  else f"\t{accelerate_config}"
41
  )
42
  print(accelerate_config_str)
 
43
  info["`Accelerate` configs"] = accelerate_config
 
44
  return info
 
 
45
  def main() -> int:
46
  parser = env_command_parser()
47
  args = parser.parse_args()
48
  env_command(args)
49
  return 0
 
 
50
  if __name__ == "__main__":
51
  raise SystemExit(main())
src/commands/estimate.py CHANGED
@@ -7,8 +7,6 @@ def verify_on_hub(repo: str, token: str = None):
7
  return "gated"
8
  except RepositoryNotFoundError:
9
  return "repo"
10
-
11
-
12
  def check_has_model(error):
13
  """
14
  Checks what library spawned `error` when a model is not found
@@ -23,12 +21,9 @@ def check_has_model(error):
23
  return "transformers"
24
  else:
25
  return "unknown"
26
-
27
-
28
  def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None):
29
  """
30
  Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption.
31
-
32
  Args:
33
  model_name (`str`):
34
  The model name on the Hub
@@ -41,10 +36,8 @@ def create_empty_model(model_name: str, library_name: str, trust_remote_code: bo
41
  execute code present on the Hub on your local machine.
42
  access_token (`str`, `optional`, defaults to `None`):
43
  The access token to use to access private or gated models on the Hub. (for use on the Gradio app)
44
-
45
  Returns:
46
  `torch.nn.Module`: The torch model that has been initialized on the `meta` device.
47
-
48
  """
49
  model_info = verify_on_hub(model_name, access_token)
50
  # Simplified errors
@@ -69,10 +62,8 @@ def create_empty_model(model_name: str, library_name: str, trust_remote_code: bo
69
  f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`"
70
  )
71
  print(f"Loading pretrained config for `{model_name}` from `transformers`...")
72
-
73
  auto_map = model_info.config.get("auto_map", False)
74
  config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code)
75
-
76
  with init_empty_weights():
77
  # remote code could specify a specific `AutoModel` class in the `auto_map`
78
  constructor = AutoModel
@@ -98,8 +89,6 @@ def create_empty_model(model_name: str, library_name: str, trust_remote_code: bo
98
  f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support."
99
  )
100
  return model
101
-
102
-
103
  def create_ascii_table(headers: list, rows: list, title: str):
104
  "Creates a pretty table from a list of rows, minimal version of `tabulate`."
105
  sep_char, in_between = "│", "─"
@@ -108,20 +97,15 @@ def create_ascii_table(headers: list, rows: list, title: str):
108
  column_values = [row[i] for row in rows] + [headers[i]]
109
  max_column_width = max(len(value) for value in column_values)
110
  column_widths.append(max_column_width)
111
-
112
  formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))]
113
-
114
  pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}"
115
  diff = 0
116
-
117
  def make_row(left_char, middle_char, right_char):
118
  return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}"
119
-
120
  separator = make_row("├", "┼", "┤")
121
  if len(title) > sum(column_widths):
122
  diff = abs(len(title) - len(separator))
123
  column_widths[-1] += diff
124
-
125
  # Update with diff
126
  separator = make_row("├", "┼", "┤")
127
  initial_rows = [
@@ -137,16 +121,12 @@ def create_ascii_table(headers: list, rows: list, title: str):
137
  centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)]
138
  table += f"{pattern % tuple(centered_line)}\n"
139
  table += f'└{"┴".join([in_between * n for n in column_widths])}┘'
140
-
141
  return table
142
-
143
-
144
  def estimate_command_parser(subparsers=None):
145
  if subparsers is not None:
146
  parser = subparsers.add_parser("estimate-memory")
147
  else:
148
  parser = argparse.ArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
149
-
150
  parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.")
151
  parser.add_argument(
152
  "--library_name",
@@ -169,12 +149,9 @@ def estimate_command_parser(subparsers=None):
169
  should only be used for repositories you trust and in which you have read the code, as it will execute
170
  code present on the Hub on your local machine.""",
171
  )
172
-
173
  if subparsers is not None:
174
  parser.set_defaults(func=estimate_command)
175
  return parser
176
-
177
-
178
  def gather_data(args):
179
  "Creates an empty model and gathers the data for the sizes"
180
  try:
@@ -188,11 +165,8 @@ def gather_data(args):
188
  f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo."
189
  )
190
  raise e
191
-
192
  total_size, largest_layer = calculate_maximum_sizes(model)
193
-
194
  data = []
195
-
196
  for dtype in args.dtypes:
197
  dtype_total_size = total_size
198
  dtype_largest_layer = largest_layer[0]
@@ -208,27 +182,19 @@ def gather_data(args):
208
  dtype_training_size = dtype_total_size * 4
209
  data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
210
  return data
211
-
212
-
213
  def estimate_command(args):
214
  data = gather_data(args)
215
  for row in data:
216
  for i, item in enumerate(row):
217
  if isinstance(item, (int, float)):
218
  row[i] = convert_bytes(item)
219
-
220
  headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"]
221
-
222
  title = f"Memory Usage for loading `{args.model_name}`"
223
  table = create_ascii_table(headers, data, title)
224
  print(table)
225
-
226
-
227
  def main():
228
  parser = estimate_command_parser()
229
  args = parser.parse_args()
230
  estimate_command(args)
231
-
232
-
233
  if __name__ == "__main__":
234
  main()
 
7
  return "gated"
8
  except RepositoryNotFoundError:
9
  return "repo"
 
 
10
  def check_has_model(error):
11
  """
12
  Checks what library spawned `error` when a model is not found
 
21
  return "transformers"
22
  else:
23
  return "unknown"
 
 
24
  def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None):
25
  """
26
  Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption.
 
27
  Args:
28
  model_name (`str`):
29
  The model name on the Hub
 
36
  execute code present on the Hub on your local machine.
37
  access_token (`str`, `optional`, defaults to `None`):
38
  The access token to use to access private or gated models on the Hub. (for use on the Gradio app)
 
39
  Returns:
40
  `torch.nn.Module`: The torch model that has been initialized on the `meta` device.
 
41
  """
42
  model_info = verify_on_hub(model_name, access_token)
43
  # Simplified errors
 
62
  f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`"
63
  )
64
  print(f"Loading pretrained config for `{model_name}` from `transformers`...")
 
65
  auto_map = model_info.config.get("auto_map", False)
66
  config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code)
 
67
  with init_empty_weights():
68
  # remote code could specify a specific `AutoModel` class in the `auto_map`
69
  constructor = AutoModel
 
89
  f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support."
90
  )
91
  return model
 
 
92
  def create_ascii_table(headers: list, rows: list, title: str):
93
  "Creates a pretty table from a list of rows, minimal version of `tabulate`."
94
  sep_char, in_between = "│", "─"
 
97
  column_values = [row[i] for row in rows] + [headers[i]]
98
  max_column_width = max(len(value) for value in column_values)
99
  column_widths.append(max_column_width)
 
100
  formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))]
 
101
  pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}"
102
  diff = 0
 
103
  def make_row(left_char, middle_char, right_char):
104
  return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}"
 
105
  separator = make_row("├", "┼", "┤")
106
  if len(title) > sum(column_widths):
107
  diff = abs(len(title) - len(separator))
108
  column_widths[-1] += diff
 
109
  # Update with diff
110
  separator = make_row("├", "┼", "┤")
111
  initial_rows = [
 
121
  centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)]
122
  table += f"{pattern % tuple(centered_line)}\n"
123
  table += f'└{"┴".join([in_between * n for n in column_widths])}┘'
 
124
  return table
 
 
125
  def estimate_command_parser(subparsers=None):
126
  if subparsers is not None:
127
  parser = subparsers.add_parser("estimate-memory")
128
  else:
129
  parser = argparse.ArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
 
130
  parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.")
131
  parser.add_argument(
132
  "--library_name",
 
149
  should only be used for repositories you trust and in which you have read the code, as it will execute
150
  code present on the Hub on your local machine.""",
151
  )
 
152
  if subparsers is not None:
153
  parser.set_defaults(func=estimate_command)
154
  return parser
 
 
155
  def gather_data(args):
156
  "Creates an empty model and gathers the data for the sizes"
157
  try:
 
165
  f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo."
166
  )
167
  raise e
 
168
  total_size, largest_layer = calculate_maximum_sizes(model)
 
169
  data = []
 
170
  for dtype in args.dtypes:
171
  dtype_total_size = total_size
172
  dtype_largest_layer = largest_layer[0]
 
182
  dtype_training_size = dtype_total_size * 4
183
  data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
184
  return data
 
 
185
  def estimate_command(args):
186
  data = gather_data(args)
187
  for row in data:
188
  for i, item in enumerate(row):
189
  if isinstance(item, (int, float)):
190
  row[i] = convert_bytes(item)
 
191
  headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"]
 
192
  title = f"Memory Usage for loading `{args.model_name}`"
193
  table = create_ascii_table(headers, data, title)
194
  print(table)
 
 
195
  def main():
196
  parser = estimate_command_parser()
197
  args = parser.parse_args()
198
  estimate_command(args)
 
 
199
  if __name__ == "__main__":
200
  main()
src/commands/launch.py CHANGED
@@ -1,6 +1,5 @@
1
  #!/usr/bin/env python
2
  logger = logging.getLogger(__name__)
3
-
4
  options_to_group = {
5
  "--multi-gpu": "Distributed GPUs",
6
  "--tpu": "TPU",
@@ -8,14 +7,10 @@ options_to_group = {
8
  "--use_fsdp": "FSDP Arguments",
9
  "--use_megatron_lm": "Megatron-LM Arguments",
10
  }
11
-
12
-
13
  def clean_option(option):
14
  "Finds all cases of - after the first two characters and changes them to _"
15
  if option.startswith("--"):
16
  return option[:3] + option[3:].replace("-", "_")
17
-
18
-
19
  class _CustomHelpAction(argparse._HelpAction):
20
  """
21
  This is a custom help action that will hide all arguments that are not used in the command line when the help is
@@ -59,19 +54,14 @@ class _CustomHelpAction(argparse._HelpAction):
59
  # If all arguments in the group are hidden, hide the group
60
  if all([arg.help == argparse.SUPPRESS for arg in group._group_actions]):
61
  parser._action_groups.remove(group)
62
-
63
  super().__call__(parser, namespace, values, option_string)
64
-
65
-
66
  def launch_command_parser(subparsers=None):
67
  if subparsers is not None:
68
  parser = subparsers.add_parser("launch", add_help=False, allow_abbrev=False)
69
  else:
70
  parser = argparse.ArgumentParser("Accelerate launch command", add_help=False, allow_abbrev=False)
71
-
72
  parser.register("action", "help", _CustomHelpAction)
73
  parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
74
-
75
  parser.add_argument(
76
  "--config_file", default=None, help="The config file to use for the default values in the launching script."
77
  )
@@ -103,7 +93,6 @@ def launch_command_parser(subparsers=None):
103
  action="store_true",
104
  help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.",
105
  )
106
-
107
  # Resource selection arguments
108
  resource_args = parser.add_argument_group(
109
  "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used."
@@ -128,7 +117,6 @@ def launch_command_parser(subparsers=None):
128
  default=None,
129
  help="The number of CPU threads per process. Can be tuned for optimal performance.",
130
  )
131
-
132
  # Dynamo arguments
133
  resource_args.add_argument(
134
  "--dynamo_backend",
@@ -156,7 +144,6 @@ def launch_command_parser(subparsers=None):
156
  action="store_true",
157
  help="Whether to enable dynamic shape tracing.",
158
  )
159
-
160
  # Training Paradigm arguments
161
  paradigm_args = parser.add_argument_group(
162
  "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used."
@@ -185,7 +172,6 @@ def launch_command_parser(subparsers=None):
185
  action="store_true",
186
  help="Whether to use IPEX plugin to speed up training on XPU specifically.",
187
  )
188
-
189
  # distributed GPU training arguments
190
  distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.")
191
  distributed_args.add_argument(
@@ -260,7 +246,6 @@ def launch_command_parser(subparsers=None):
260
  action="store_true",
261
  help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.",
262
  )
263
-
264
  # TPU arguments
265
  tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.")
266
  tpu_args.add_argument(
@@ -306,7 +291,6 @@ def launch_command_parser(subparsers=None):
306
  action="store_true",
307
  help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.",
308
  )
309
-
310
  # DeepSpeed arguments
311
  deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.")
312
  deepspeed_args.add_argument(
@@ -402,7 +386,6 @@ def launch_command_parser(subparsers=None):
402
  type=str,
403
  help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.",
404
  )
405
-
406
  # fsdp arguments
407
  fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.")
408
  fsdp_args.add_argument(
@@ -483,7 +466,6 @@ def launch_command_parser(subparsers=None):
483
  help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0."
484
  " (useful only when `use_fsdp` flag is passed).",
485
  )
486
-
487
  # megatron_lm args
488
  megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.")
489
  megatron_lm_args.add_argument(
@@ -533,7 +515,6 @@ def launch_command_parser(subparsers=None):
533
  help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). "
534
  "(useful only when `use_megatron_lm` flag is passed).",
535
  )
536
-
537
  # AWS arguments
538
  aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.")
539
  aws_args.add_argument(
@@ -561,18 +542,13 @@ def launch_command_parser(subparsers=None):
561
  "script."
562
  ),
563
  )
564
-
565
  # Other arguments of the training scripts
566
  parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
567
-
568
  if subparsers is not None:
569
  parser.set_defaults(func=launch_command)
570
  return parser
571
-
572
-
573
  def simple_launcher(args):
574
  cmd, current_env = prepare_simple_launcher_cmd_env(args)
575
-
576
  process = subprocess.Popen(cmd, env=current_env)
577
  process.wait()
578
  if process.returncode != 0:
@@ -580,11 +556,8 @@ def simple_launcher(args):
580
  raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
581
  else:
582
  sys.exit(1)
583
-
584
-
585
  def multi_gpu_launcher(args):
586
  import torch.distributed.run as distrib_run
587
-
588
  current_env = prepare_multi_gpu_env(args)
589
  if not check_cuda_p2p_ib_support():
590
  message = "Using RTX 3090 or 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
@@ -597,7 +570,6 @@ def multi_gpu_launcher(args):
597
  warn = True
598
  if warn:
599
  logger.warning(message)
600
-
601
  debug = getattr(args, "debug", False)
602
  args = _filter_args(
603
  args,
@@ -614,14 +586,10 @@ def multi_gpu_launcher(args):
614
  console.print_exception(suppress=[__file__], show_locals=False)
615
  else:
616
  raise
617
-
618
-
619
  def deepspeed_launcher(args):
620
  import torch.distributed.run as distrib_run
621
-
622
  if not is_deepspeed_available():
623
  raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
624
-
625
  cmd, current_env = prepare_deepspeed_cmd_env(args)
626
  if not check_cuda_p2p_ib_support():
627
  message = "Using RTX 3090 or 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
@@ -634,14 +602,12 @@ def deepspeed_launcher(args):
634
  warn = True
635
  if warn:
636
  logger.warning(message)
637
-
638
  if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
639
  with open(".deepspeed_env", "a") as f:
640
  for key, value in current_env.items():
641
  if ";" in value or " " in value:
642
  continue
643
  f.write(f"{key}={value}\n")
644
-
645
  process = subprocess.Popen(cmd, env=current_env)
646
  process.wait()
647
  if process.returncode != 0:
@@ -666,16 +632,11 @@ def deepspeed_launcher(args):
666
  console.print_exception(suppress=[__file__], show_locals=False)
667
  else:
668
  raise
669
-
670
-
671
  def tpu_launcher(args):
672
  import torch_xla.distributed.xla_multiprocessing as xmp
673
-
674
  if args.no_python:
675
  raise ValueError("--no_python cannot be used with TPU launcher")
676
-
677
  args, current_env = prepare_tpu(args, {})
678
-
679
  if args.module:
680
  mod_name = args.training_script
681
  else:
@@ -683,40 +644,31 @@ def tpu_launcher(args):
683
  script_path = Path(args.training_script)
684
  sys.path.append(str(script_path.parent.resolve()))
685
  mod_name = script_path.stem
686
-
687
  mod = importlib.import_module(mod_name)
688
  if not hasattr(mod, args.main_training_function):
689
  raise ValueError(
690
  f"Your training script should have a function named {args.main_training_function}, or you should pass a "
691
  "different value to `--main_training_function`."
692
  )
693
-
694
  # Patch sys.argv
695
  sys.argv = [mod.__file__] + args.training_script_args
696
-
697
  main_function = getattr(mod, args.main_training_function)
698
  with patch_environment(**current_env):
699
  xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)
700
-
701
-
702
  def tpu_pod_launcher(args):
703
  from torch_xla.distributed import xla_dist
704
-
705
  current_env = {}
706
  args, current_env = prepare_tpu(args, current_env, True)
707
  debug = getattr(args, "debug", False)
708
-
709
  training_script = args.training_script
710
  training_script_args = args.training_script_args
711
  new_args = _filter_args(
712
  args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"]
713
  )
714
-
715
  if args.tpu_use_sudo:
716
  new_cmd = ["sudo"]
717
  else:
718
  new_cmd = []
719
-
720
  new_cmd += [
721
  "accelerate-launch",
722
  "--tpu",
@@ -733,7 +685,6 @@ def tpu_pod_launcher(args):
733
  str(args.main_training_function),
734
  training_script,
735
  ] + training_script_args
736
-
737
  new_args.positional = new_cmd
738
  bad_flags = ""
739
  for arg in vars(new_args):
@@ -756,8 +707,6 @@ def tpu_pod_launcher(args):
756
  console.print_exception(suppress=[__file__], show_locals=False)
757
  else:
758
  raise
759
-
760
-
761
  def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
762
  if not is_sagemaker_available():
763
  raise ImportError(
@@ -767,17 +716,11 @@ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
767
  raise ValueError(
768
  "SageMaker requires a python training script file and cannot be used with --module or --no_python"
769
  )
770
-
771
  from sagemaker.huggingface import HuggingFace
772
-
773
  args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args)
774
-
775
  huggingface_estimator = HuggingFace(**args)
776
-
777
  huggingface_estimator.fit(inputs=sagemaker_inputs)
778
  print(f"You can find your model data at: {huggingface_estimator.model_data}")
779
-
780
-
781
  def _validate_launch_command(args):
782
  # Sanity checks
783
  if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:
@@ -786,7 +729,6 @@ def _validate_launch_command(args):
786
  )
787
  if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):
788
  raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.")
789
-
790
  defaults = None
791
  warned = []
792
  mp_from_config_flag = False
@@ -817,10 +759,8 @@ def _validate_launch_command(args):
817
  args.gpu_ids = defaults.gpu_ids
818
  else:
819
  args.gpu_ids = "all"
820
-
821
  if args.multi_gpu and args.num_machines is None:
822
  args.num_machines = defaults.num_machines
823
-
824
  if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1:
825
  raise ValueError(
826
  "Less than two GPU ids were configured and tried to run on on multiple GPUs. "
@@ -844,7 +784,6 @@ def _validate_launch_command(args):
844
  for k in defaults.ipex_config:
845
  setattr(args, k, defaults.ipex_config[k])
846
  continue
847
-
848
  # Those args are handled separately
849
  if (
850
  name not in ["compute_environment", "mixed_precision", "distributed_type"]
@@ -853,7 +792,6 @@ def _validate_launch_command(args):
853
  setattr(args, name, attr)
854
  if not args.debug:
855
  args.debug = defaults.debug
856
-
857
  if not args.mixed_precision:
858
  if defaults.mixed_precision is None:
859
  args.mixed_precision = "no"
@@ -869,7 +807,6 @@ def _validate_launch_command(args):
869
  native_amp = is_bf16_available(True)
870
  if args.mixed_precision == "bf16" and not native_amp and not (args.tpu and is_tpu_available()):
871
  raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device."))
872
-
873
  # Silently set the default here
874
  if args.dynamo_backend is None:
875
  args.dynamo_backend = "no"
@@ -907,7 +844,6 @@ def _validate_launch_command(args):
907
  args.dynamo_backend = "no"
908
  if args.debug:
909
  logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.")
910
-
911
  is_aws_env_disabled = defaults is None or (
912
  defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER
913
  )
@@ -923,7 +859,6 @@ def _validate_launch_command(args):
923
  warned.append(
924
  f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs"
925
  )
926
-
927
  if any(warned):
928
  message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n"
929
  message += "\n".join(warned)
@@ -932,8 +867,6 @@ def _validate_launch_command(args):
932
  )
933
  logger.warning(message)
934
  return args, defaults, mp_from_config_flag
935
-
936
-
937
  def launch_command(args):
938
  args, defaults, mp_from_config_flag = _validate_launch_command(args)
939
  # Use the proper launcher
@@ -958,13 +891,9 @@ def launch_command(args):
958
  sagemaker_launcher(defaults, args)
959
  else:
960
  simple_launcher(args)
961
-
962
-
963
  def main():
964
  parser = launch_command_parser()
965
  args = parser.parse_args()
966
  launch_command(args)
967
-
968
-
969
  if __name__ == "__main__":
970
  main()
 
1
  #!/usr/bin/env python
2
  logger = logging.getLogger(__name__)
 
3
  options_to_group = {
4
  "--multi-gpu": "Distributed GPUs",
5
  "--tpu": "TPU",
 
7
  "--use_fsdp": "FSDP Arguments",
8
  "--use_megatron_lm": "Megatron-LM Arguments",
9
  }
 
 
10
  def clean_option(option):
11
  "Finds all cases of - after the first two characters and changes them to _"
12
  if option.startswith("--"):
13
  return option[:3] + option[3:].replace("-", "_")
 
 
14
  class _CustomHelpAction(argparse._HelpAction):
15
  """
16
  This is a custom help action that will hide all arguments that are not used in the command line when the help is
 
54
  # If all arguments in the group are hidden, hide the group
55
  if all([arg.help == argparse.SUPPRESS for arg in group._group_actions]):
56
  parser._action_groups.remove(group)
 
57
  super().__call__(parser, namespace, values, option_string)
 
 
58
  def launch_command_parser(subparsers=None):
59
  if subparsers is not None:
60
  parser = subparsers.add_parser("launch", add_help=False, allow_abbrev=False)
61
  else:
62
  parser = argparse.ArgumentParser("Accelerate launch command", add_help=False, allow_abbrev=False)
 
63
  parser.register("action", "help", _CustomHelpAction)
64
  parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
 
65
  parser.add_argument(
66
  "--config_file", default=None, help="The config file to use for the default values in the launching script."
67
  )
 
93
  action="store_true",
94
  help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.",
95
  )
 
96
  # Resource selection arguments
97
  resource_args = parser.add_argument_group(
98
  "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used."
 
117
  default=None,
118
  help="The number of CPU threads per process. Can be tuned for optimal performance.",
119
  )
 
120
  # Dynamo arguments
121
  resource_args.add_argument(
122
  "--dynamo_backend",
 
144
  action="store_true",
145
  help="Whether to enable dynamic shape tracing.",
146
  )
 
147
  # Training Paradigm arguments
148
  paradigm_args = parser.add_argument_group(
149
  "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used."
 
172
  action="store_true",
173
  help="Whether to use IPEX plugin to speed up training on XPU specifically.",
174
  )
 
175
  # distributed GPU training arguments
176
  distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.")
177
  distributed_args.add_argument(
 
246
  action="store_true",
247
  help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.",
248
  )
 
249
  # TPU arguments
250
  tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.")
251
  tpu_args.add_argument(
 
291
  action="store_true",
292
  help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.",
293
  )
 
294
  # DeepSpeed arguments
295
  deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.")
296
  deepspeed_args.add_argument(
 
386
  type=str,
387
  help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.",
388
  )
 
389
  # fsdp arguments
390
  fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.")
391
  fsdp_args.add_argument(
 
466
  help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0."
467
  " (useful only when `use_fsdp` flag is passed).",
468
  )
 
469
  # megatron_lm args
470
  megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.")
471
  megatron_lm_args.add_argument(
 
515
  help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). "
516
  "(useful only when `use_megatron_lm` flag is passed).",
517
  )
 
518
  # AWS arguments
519
  aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.")
520
  aws_args.add_argument(
 
542
  "script."
543
  ),
544
  )
 
545
  # Other arguments of the training scripts
546
  parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
 
547
  if subparsers is not None:
548
  parser.set_defaults(func=launch_command)
549
  return parser
 
 
550
  def simple_launcher(args):
551
  cmd, current_env = prepare_simple_launcher_cmd_env(args)
 
552
  process = subprocess.Popen(cmd, env=current_env)
553
  process.wait()
554
  if process.returncode != 0:
 
556
  raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
557
  else:
558
  sys.exit(1)
 
 
559
  def multi_gpu_launcher(args):
560
  import torch.distributed.run as distrib_run
 
561
  current_env = prepare_multi_gpu_env(args)
562
  if not check_cuda_p2p_ib_support():
563
  message = "Using RTX 3090 or 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
 
570
  warn = True
571
  if warn:
572
  logger.warning(message)
 
573
  debug = getattr(args, "debug", False)
574
  args = _filter_args(
575
  args,
 
586
  console.print_exception(suppress=[__file__], show_locals=False)
587
  else:
588
  raise
 
 
589
  def deepspeed_launcher(args):
590
  import torch.distributed.run as distrib_run
 
591
  if not is_deepspeed_available():
592
  raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
 
593
  cmd, current_env = prepare_deepspeed_cmd_env(args)
594
  if not check_cuda_p2p_ib_support():
595
  message = "Using RTX 3090 or 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
 
602
  warn = True
603
  if warn:
604
  logger.warning(message)
 
605
  if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
606
  with open(".deepspeed_env", "a") as f:
607
  for key, value in current_env.items():
608
  if ";" in value or " " in value:
609
  continue
610
  f.write(f"{key}={value}\n")
 
611
  process = subprocess.Popen(cmd, env=current_env)
612
  process.wait()
613
  if process.returncode != 0:
 
632
  console.print_exception(suppress=[__file__], show_locals=False)
633
  else:
634
  raise
 
 
635
  def tpu_launcher(args):
636
  import torch_xla.distributed.xla_multiprocessing as xmp
 
637
  if args.no_python:
638
  raise ValueError("--no_python cannot be used with TPU launcher")
 
639
  args, current_env = prepare_tpu(args, {})
 
640
  if args.module:
641
  mod_name = args.training_script
642
  else:
 
644
  script_path = Path(args.training_script)
645
  sys.path.append(str(script_path.parent.resolve()))
646
  mod_name = script_path.stem
 
647
  mod = importlib.import_module(mod_name)
648
  if not hasattr(mod, args.main_training_function):
649
  raise ValueError(
650
  f"Your training script should have a function named {args.main_training_function}, or you should pass a "
651
  "different value to `--main_training_function`."
652
  )
 
653
  # Patch sys.argv
654
  sys.argv = [mod.__file__] + args.training_script_args
 
655
  main_function = getattr(mod, args.main_training_function)
656
  with patch_environment(**current_env):
657
  xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)
 
 
658
  def tpu_pod_launcher(args):
659
  from torch_xla.distributed import xla_dist
 
660
  current_env = {}
661
  args, current_env = prepare_tpu(args, current_env, True)
662
  debug = getattr(args, "debug", False)
 
663
  training_script = args.training_script
664
  training_script_args = args.training_script_args
665
  new_args = _filter_args(
666
  args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"]
667
  )
 
668
  if args.tpu_use_sudo:
669
  new_cmd = ["sudo"]
670
  else:
671
  new_cmd = []
 
672
  new_cmd += [
673
  "accelerate-launch",
674
  "--tpu",
 
685
  str(args.main_training_function),
686
  training_script,
687
  ] + training_script_args
 
688
  new_args.positional = new_cmd
689
  bad_flags = ""
690
  for arg in vars(new_args):
 
707
  console.print_exception(suppress=[__file__], show_locals=False)
708
  else:
709
  raise
 
 
710
  def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
711
  if not is_sagemaker_available():
712
  raise ImportError(
 
716
  raise ValueError(
717
  "SageMaker requires a python training script file and cannot be used with --module or --no_python"
718
  )
 
719
  from sagemaker.huggingface import HuggingFace
 
720
  args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args)
 
721
  huggingface_estimator = HuggingFace(**args)
 
722
  huggingface_estimator.fit(inputs=sagemaker_inputs)
723
  print(f"You can find your model data at: {huggingface_estimator.model_data}")
 
 
724
  def _validate_launch_command(args):
725
  # Sanity checks
726
  if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:
 
729
  )
730
  if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):
731
  raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.")
 
732
  defaults = None
733
  warned = []
734
  mp_from_config_flag = False
 
759
  args.gpu_ids = defaults.gpu_ids
760
  else:
761
  args.gpu_ids = "all"
 
762
  if args.multi_gpu and args.num_machines is None:
763
  args.num_machines = defaults.num_machines
 
764
  if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1:
765
  raise ValueError(
766
  "Less than two GPU ids were configured and tried to run on on multiple GPUs. "
 
784
  for k in defaults.ipex_config:
785
  setattr(args, k, defaults.ipex_config[k])
786
  continue
 
787
  # Those args are handled separately
788
  if (
789
  name not in ["compute_environment", "mixed_precision", "distributed_type"]
 
792
  setattr(args, name, attr)
793
  if not args.debug:
794
  args.debug = defaults.debug
 
795
  if not args.mixed_precision:
796
  if defaults.mixed_precision is None:
797
  args.mixed_precision = "no"
 
807
  native_amp = is_bf16_available(True)
808
  if args.mixed_precision == "bf16" and not native_amp and not (args.tpu and is_tpu_available()):
809
  raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device."))
 
810
  # Silently set the default here
811
  if args.dynamo_backend is None:
812
  args.dynamo_backend = "no"
 
844
  args.dynamo_backend = "no"
845
  if args.debug:
846
  logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.")
 
847
  is_aws_env_disabled = defaults is None or (
848
  defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER
849
  )
 
859
  warned.append(
860
  f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs"
861
  )
 
862
  if any(warned):
863
  message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n"
864
  message += "\n".join(warned)
 
867
  )
868
  logger.warning(message)
869
  return args, defaults, mp_from_config_flag
 
 
870
  def launch_command(args):
871
  args, defaults, mp_from_config_flag = _validate_launch_command(args)
872
  # Use the proper launcher
 
891
  sagemaker_launcher(defaults, args)
892
  else:
893
  simple_launcher(args)
 
 
894
  def main():
895
  parser = launch_command_parser()
896
  args = parser.parse_args()
897
  launch_command(args)
 
 
898
  if __name__ == "__main__":
899
  main()
src/commands/test.py CHANGED
@@ -4,7 +4,6 @@ def test_command_parser(subparsers=None):
4
  parser = subparsers.add_parser("test")
5
  else:
6
  parser = argparse.ArgumentParser("Accelerate test command")
7
-
8
  parser.add_argument(
9
  "--config_file",
10
  default=None,
@@ -15,31 +14,22 @@ def test_command_parser(subparsers=None):
15
  "with 'huggingface'."
16
  ),
17
  )
18
-
19
  if subparsers is not None:
20
  parser.set_defaults(func=test_command)
21
  return parser
22
-
23
-
24
  def test_command(args):
25
  script_name = os.path.sep.join(__file__.split(os.path.sep)[:-2] + ["test_utils", "scripts", "test_script.py"])
26
-
27
  if args.config_file is None:
28
  test_args = script_name
29
  else:
30
  test_args = f"--config_file={args.config_file} {script_name}"
31
-
32
  cmd = ["accelerate-launch"] + test_args.split()
33
  result = execute_subprocess_async(cmd, env=os.environ.copy())
34
  if result.returncode == 0:
35
  print("Test is a success! You are ready for your distributed training!")
36
-
37
-
38
  def main():
39
  parser = test_command_parser()
40
  args = parser.parse_args()
41
  test_command(args)
42
-
43
-
44
  if __name__ == "__main__":
45
  main()
 
4
  parser = subparsers.add_parser("test")
5
  else:
6
  parser = argparse.ArgumentParser("Accelerate test command")
 
7
  parser.add_argument(
8
  "--config_file",
9
  default=None,
 
14
  "with 'huggingface'."
15
  ),
16
  )
 
17
  if subparsers is not None:
18
  parser.set_defaults(func=test_command)
19
  return parser
 
 
20
  def test_command(args):
21
  script_name = os.path.sep.join(__file__.split(os.path.sep)[:-2] + ["test_utils", "scripts", "test_script.py"])
 
22
  if args.config_file is None:
23
  test_args = script_name
24
  else:
25
  test_args = f"--config_file={args.config_file} {script_name}"
 
26
  cmd = ["accelerate-launch"] + test_args.split()
27
  result = execute_subprocess_async(cmd, env=os.environ.copy())
28
  if result.returncode == 0:
29
  print("Test is a success! You are ready for your distributed training!")
 
 
30
  def main():
31
  parser = test_command_parser()
32
  args = parser.parse_args()
33
  test_command(args)
 
 
34
  if __name__ == "__main__":
35
  main()
src/commands/tpu.py CHANGED
@@ -1,7 +1,5 @@
1
  #!/usr/bin/env python
2
  _description = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
3
-
4
-
5
  def tpu_command_parser(subparsers=None):
6
  if subparsers is not None:
7
  parser = subparsers.add_parser("tpu-config", description=_description)
@@ -57,15 +55,11 @@ def tpu_command_parser(subparsers=None):
57
  pod_args.add_argument(
58
  "--debug", action="store_true", help="If set, will print the command that would be run instead of running it."
59
  )
60
-
61
  if subparsers is not None:
62
  parser.set_defaults(func=tpu_command_launcher)
63
  return parser
64
-
65
-
66
  def tpu_command_launcher(args):
67
  defaults = None
68
-
69
  # Get the default from the config file if it exists.
70
  if args.config_file is not None or os.path.isfile(default_config_file):
71
  defaults = load_config_from_file(args.config_file)
@@ -83,14 +77,11 @@ def tpu_command_launcher(args):
83
  args.accelerate_version = "accelerate -U"
84
  elif isinstance(parse(args.accelerate_version), Version):
85
  args.accelerate_version = f"accelerate=={args.accelerate_version}"
86
-
87
  if not args.command_file and not args.command:
88
  raise ValueError("You must specify either a command file or a command to run on the pod.")
89
-
90
  if args.command_file:
91
  with open(args.command_file, "r") as f:
92
  args.command = [f.read().splitlines()]
93
-
94
  # To turn list of lists into list of strings
95
  if isinstance(args.command[0], list):
96
  args.command = [line for cmd in args.command for line in cmd]
@@ -100,7 +91,6 @@ def tpu_command_launcher(args):
100
  new_cmd += [f"pip install {args.accelerate_version}"]
101
  new_cmd += args.command
102
  args.command = "; ".join(new_cmd)
103
-
104
  # Then send it to gcloud
105
  # Eventually try to use google-api-core to do this instead of subprocess
106
  cmd = ["gcloud"]
@@ -124,10 +114,7 @@ def tpu_command_launcher(args):
124
  return
125
  subprocess.run(cmd)
126
  print("Successfully setup pod.")
127
-
128
-
129
  def main():
130
  parser = tpu_command_parser()
131
  args = parser.parse_args()
132
-
133
  tpu_command_launcher(args)
 
1
  #!/usr/bin/env python
2
  _description = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
 
 
3
  def tpu_command_parser(subparsers=None):
4
  if subparsers is not None:
5
  parser = subparsers.add_parser("tpu-config", description=_description)
 
55
  pod_args.add_argument(
56
  "--debug", action="store_true", help="If set, will print the command that would be run instead of running it."
57
  )
 
58
  if subparsers is not None:
59
  parser.set_defaults(func=tpu_command_launcher)
60
  return parser
 
 
61
  def tpu_command_launcher(args):
62
  defaults = None
 
63
  # Get the default from the config file if it exists.
64
  if args.config_file is not None or os.path.isfile(default_config_file):
65
  defaults = load_config_from_file(args.config_file)
 
77
  args.accelerate_version = "accelerate -U"
78
  elif isinstance(parse(args.accelerate_version), Version):
79
  args.accelerate_version = f"accelerate=={args.accelerate_version}"
 
80
  if not args.command_file and not args.command:
81
  raise ValueError("You must specify either a command file or a command to run on the pod.")
 
82
  if args.command_file:
83
  with open(args.command_file, "r") as f:
84
  args.command = [f.read().splitlines()]
 
85
  # To turn list of lists into list of strings
86
  if isinstance(args.command[0], list):
87
  args.command = [line for cmd in args.command for line in cmd]
 
91
  new_cmd += [f"pip install {args.accelerate_version}"]
92
  new_cmd += args.command
93
  args.command = "; ".join(new_cmd)
 
94
  # Then send it to gcloud
95
  # Eventually try to use google-api-core to do this instead of subprocess
96
  cmd = ["gcloud"]
 
114
  return
115
  subprocess.run(cmd)
116
  print("Successfully setup pod.")
 
 
117
  def main():
118
  parser = tpu_command_parser()
119
  args = parser.parse_args()
 
120
  tpu_command_launcher(args)
src/data_loader.py CHANGED
@@ -1,5 +1,4 @@
1
  logger = get_logger(__name__)
2
-
3
  # kwargs of the DataLoader in min version 1.4.0.
4
  _PYTORCH_DATALOADER_KWARGS = {
5
  "batch_size": 1,
@@ -17,22 +16,16 @@ _PYTORCH_DATALOADER_KWARGS = {
17
  "prefetch_factor": 2,
18
  "persistent_workers": False,
19
  }
20
-
21
  # kwargs added after by version
22
  _PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {}
23
-
24
  for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items():
25
  if is_torch_version(">=", v):
26
  _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs)
27
-
28
-
29
  class SeedableRandomSampler(RandomSampler):
30
  """
31
  Same as a random sampler, except that in `__iter__` a seed can be used.
32
-
33
  Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed
34
  and be fully reproducable on multiple iterations.
35
-
36
  If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on
37
  (stored in `self.epoch`).
38
  """
@@ -40,7 +33,6 @@ class SeedableRandomSampler(RandomSampler):
40
  super().__init__(*args, **kwargs)
41
  self.epoch = 0
42
  self.seed = torch.random.initial_seed()
43
-
44
  def __iter__(self):
45
  if self.generator is None:
46
  self.generator = torch.Generator()
@@ -51,19 +43,15 @@ class SeedableRandomSampler(RandomSampler):
51
  self.generator.manual_seed(seed)
52
  yield from super().__iter__()
53
  self.set_epoch(self.epoch + 1)
54
-
55
  def set_epoch(self, epoch: int):
56
  "Sets the current iteration of the sampler."
57
  self.epoch = epoch
58
-
59
-
60
  class BatchSamplerShard(BatchSampler):
61
  """
62
  Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will
63
  always yield a number of batches that is a round multiple of `num_processes` and that all have the same size.
64
  Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration
65
  at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
66
-
67
  Args:
68
  batch_sampler (`torch.utils.data.sampler.BatchSampler`):
69
  The batch sampler to split in several shards.
@@ -74,9 +62,7 @@ class BatchSamplerShard(BatchSampler):
74
  split_batches (`bool`, *optional*, defaults to `False`):
75
  Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
76
  yielding different full batches on each process.
77
-
78
  On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in:
79
-
80
  - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if
81
  this argument is set to `False`.
82
  - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`
@@ -84,14 +70,10 @@ class BatchSamplerShard(BatchSampler):
84
  even_batches (`bool`, *optional*, defaults to `True`):
85
  Whether or not to loop back at the beginning of the sampler when the number of samples is not a round
86
  multiple of (original batch size / number of processes).
87
-
88
  <Tip warning={true}>
89
-
90
  `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
91
  equal to `False`
92
-
93
  </Tip>"""
94
-
95
  def __init__(
96
  self,
97
  batch_sampler: BatchSampler,
@@ -117,11 +99,9 @@ class BatchSamplerShard(BatchSampler):
117
  "You need to use `even_batches=False` when the batch sampler has no batch size. If you "
118
  "are not calling this method directly, set `accelerator.even_batches=False` instead."
119
  )
120
-
121
  @property
122
  def total_length(self):
123
  return len(self.batch_sampler)
124
-
125
  def __len__(self):
126
  if self.split_batches:
127
  # Split batches does not change the length of the batch sampler
@@ -139,10 +119,8 @@ class BatchSamplerShard(BatchSampler):
139
  else:
140
  # Otherwise it depends on the process index.
141
  return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length
142
-
143
  def __iter__(self):
144
  return self._iter_with_split() if self.split_batches else self._iter_with_no_split()
145
-
146
  def _iter_with_split(self):
147
  initial_data = []
148
  batch_length = self.batch_sampler.batch_size // self.num_processes
@@ -152,7 +130,6 @@ class BatchSamplerShard(BatchSampler):
152
  if len(batch) == self.batch_size:
153
  # If the batch is full, we yield the part of it this process is responsible of.
154
  yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
155
-
156
  # If drop_last is True of the last batch was full, iteration is over, otherwise...
157
  if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size:
158
  if not self.even_batches:
@@ -164,7 +141,6 @@ class BatchSamplerShard(BatchSampler):
164
  initial_data += initial_data
165
  batch = batch + initial_data
166
  yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
167
-
168
  def _iter_with_no_split(self):
169
  initial_data = []
170
  batch_to_yield = []
@@ -181,7 +157,6 @@ class BatchSamplerShard(BatchSampler):
181
  ):
182
  yield batch_to_yield
183
  batch_to_yield = []
184
-
185
  # If drop_last is True, iteration is over, otherwise...
186
  if not self.drop_last and len(initial_data) > 0:
187
  if not self.even_batches:
@@ -191,16 +166,13 @@ class BatchSamplerShard(BatchSampler):
191
  # ... we yield the complete batch we had saved before if it has the proper length
192
  if len(batch_to_yield) == self.batch_size:
193
  yield batch_to_yield
194
-
195
  # For degenerate cases where the dataset has less than num_process * batch_size samples
196
  while len(initial_data) < self.num_processes * self.batch_size:
197
  initial_data += initial_data
198
-
199
  # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next
200
  if len(batch) == self.batch_size:
201
  batch = []
202
  idx += 1
203
-
204
  # Make sure we yield a multiple of self.num_processes batches
205
  cycle_index = 0
206
  while idx % self.num_processes != 0 or len(batch) > 0:
@@ -211,8 +183,6 @@ class BatchSamplerShard(BatchSampler):
211
  cycle_index = end_index
212
  batch = []
213
  idx += 1
214
-
215
-
216
  class IterableDatasetShard(IterableDataset):
217
  """
218
  Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will
@@ -220,7 +190,6 @@ class IterableDatasetShard(IterableDataset):
220
  `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the
221
  `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would
222
  be too small or loop with indices from the beginning.
223
-
224
  Args:
225
  dataset (`torch.utils.data.dataset.IterableDataset`):
226
  The batch sampler to split in several shards.
@@ -237,9 +206,7 @@ class IterableDatasetShard(IterableDataset):
237
  split_batches (`bool`, *optional*, defaults to `False`):
238
  Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
239
  yielding different full batches on each process.
240
-
241
  On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in:
242
-
243
  - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this
244
  argument is set to `False`.
245
  - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if
@@ -265,19 +232,16 @@ class IterableDatasetShard(IterableDataset):
265
  self.num_processes = num_processes
266
  self.process_index = process_index
267
  self.split_batches = split_batches
268
-
269
  def set_epoch(self, epoch):
270
  self.epoch = epoch
271
  if hasattr(self.dataset, "set_epoch"):
272
  self.dataset.set_epoch(epoch)
273
-
274
  def __len__(self):
275
  # We will just raise the downstream error if the underlying dataset is not sized
276
  if self.drop_last:
277
  return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
278
  else:
279
  return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
280
-
281
  def __iter__(self):
282
  if (
283
  not hasattr(self.dataset, "set_epoch")
@@ -288,7 +252,6 @@ class IterableDatasetShard(IterableDataset):
288
  real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes)
289
  process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size
290
  process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size)
291
-
292
  first_batch = None
293
  current_batch = []
294
  for element in self.dataset:
@@ -300,7 +263,6 @@ class IterableDatasetShard(IterableDataset):
300
  if first_batch is None:
301
  first_batch = current_batch.copy()
302
  current_batch = []
303
-
304
  # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning.
305
  if not self.drop_last and len(current_batch) > 0:
306
  if first_batch is None:
@@ -309,29 +271,22 @@ class IterableDatasetShard(IterableDataset):
309
  current_batch += first_batch
310
  for i in process_slice:
311
  yield current_batch[i]
312
-
313
-
314
  class DataLoaderStateMixin:
315
  """
316
  Mixin class that adds a state to a `DataLoader` to keep track of the status inside the dataloader such as at the
317
  end of the iteration, the number of items in the dataset in the last batch relative to the batch size, and other
318
  useful information that might be needed.
319
-
320
  **Available attributes:**
321
-
322
  - **end_of_dataloader** (`bool`) -- Whether at the last iteration or batch
323
  - **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total
324
  batch size
325
-
326
  """
327
  def __init_subclass__(cls, **kwargs):
328
  cls.end_of_dataloader = False
329
  cls.remainder = -1
330
-
331
  def reset(self):
332
  self.end_of_dataloader = False
333
  self.remainder = -1
334
-
335
  def begin(self):
336
  "Prepares the gradient state for the current dataloader"
337
  self.reset()
@@ -340,16 +295,12 @@ class DataLoaderStateMixin:
340
  length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
341
  self.remainder = length % self.total_batch_size
342
  self.gradient_state._add_dataloader(self)
343
-
344
  def end(self):
345
  "Cleans up the gradient state after exiting the dataloader"
346
  self.gradient_state._remove_dataloader(self)
347
-
348
-
349
  class DataLoaderShard(DataLoader, DataLoaderStateMixin):
350
  """
351
  Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup.
352
-
353
  Args:
354
  dataset (`torch.utils.data.dataset.Dataset`):
355
  The dataset to use to build this datalaoder.
@@ -358,7 +309,6 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
358
  rng_types (list of `str` or [`~utils.RNGType`]):
359
  The list of random number generators to synchronize at the beginning of each iteration. Should be one or
360
  several of:
361
-
362
  - `"torch"`: the base torch random number generator
363
  - `"cuda"`: the CUDA random number generator (GPU only)
364
  - `"xla"`: the XLA random number generator (TPU only)
@@ -369,13 +319,10 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
369
  The number of batches to skip at the beginning.
370
  kwargs:
371
  All other keyword arguments to pass to the regular `DataLoader` initialization.
372
-
373
  **Available attributes:**
374
-
375
  - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
376
  Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
377
  number of processes
378
-
379
  - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
380
  """
381
  def __init__(
@@ -396,12 +343,10 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
396
  self.gradient_state = GradientState()
397
  self._drop_last = _drop_last
398
  self.iteration = 0
399
-
400
  def __iter__(self):
401
  if self.rng_types is not None:
402
  synchronize_rng_states(self.rng_types, self.synchronized_generator)
403
  self.begin()
404
-
405
  self.set_epoch(self.iteration)
406
  dataloader_iter = super().__iter__()
407
  # We iterate one batch ahead to check when we are at the end
@@ -409,7 +354,6 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
409
  current_batch = next(dataloader_iter)
410
  except StopIteration:
411
  yield
412
-
413
  batch_index = 0
414
  while True:
415
  try:
@@ -426,10 +370,8 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
426
  if batch_index >= self.skip_batches:
427
  yield current_batch
428
  break
429
-
430
  self.iteration += 1
431
  self.end()
432
-
433
  def set_epoch(self, epoch: int):
434
  # In case it is manually passed in, the user can set it to what they like
435
  if self.iteration != epoch:
@@ -440,7 +382,6 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
440
  # or in general HF datasets `Datasets`
441
  elif hasattr(self.dataset, "set_epoch"):
442
  self.dataset.set_epoch(epoch)
443
-
444
  @property
445
  def total_batch_size(self):
446
  batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler
@@ -449,63 +390,47 @@ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
449
  if getattr(batch_sampler, "split_batches", False)
450
  else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1))
451
  )
452
-
453
  @property
454
  def total_dataset_length(self):
455
  if hasattr(self.dataset, "total_length"):
456
  return self.dataset.total_length
457
  else:
458
  return len(self.dataset)
459
-
460
-
461
  if is_tpu_available(check_device=False):
462
  import torch_xla.distributed.parallel_loader as xpl
463
-
464
  class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):
465
  """
466
  Wrapper for the xpl.MpDeviceLoader class that knows the total batch size.
467
-
468
  XLA preloading threads will all call DataLoaderShard's __iter__(). Remove rng_types from DataLoaderShard to
469
  prevent it from using the XLA device in the preloading threads, and synchronize the RNG once from the main
470
  thread only.
471
-
472
  **Available attributes:**
473
-
474
  - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
475
  Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
476
  number of processes
477
-
478
  - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
479
  """
480
  def __init__(self, dataloader: DataLoaderShard, device: torch.device):
481
  super().__init__(dataloader, device)
482
  self._rng_types = self._loader.rng_types
483
  self._loader.rng_types = None
484
-
485
  def __iter__(self):
486
  if self._rng_types is not None:
487
  synchronize_rng_states(self._rng_types, self._loader.synchronized_generator)
488
-
489
  return super().__iter__()
490
-
491
  @property
492
  def total_batch_size(self):
493
  return self._loader.total_batch_size
494
-
495
  @property
496
  def total_dataset_length(self):
497
  return self._loader.total_dataset_length
498
-
499
  @property
500
  def batch_sampler(self):
501
  return self._loader.batch_sampler
502
-
503
-
504
  class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
505
  """
506
  Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each
507
  process their part of the batch.
508
-
509
  Args:
510
  split_batches (`bool`, *optional*, defaults to `False`):
511
  Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
@@ -516,13 +441,10 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
516
  size of the `dataloader` is a round multiple of `batch_size`.
517
  skip_batches (`int`, *optional*, defaults to 0):
518
  The number of batches to skip at the beginning of an iteration.
519
-
520
  **Available attributes:**
521
-
522
  - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
523
  Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
524
  number of processes
525
-
526
  - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
527
  """
528
  def __init__(
@@ -531,7 +453,6 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
531
  shuffle = False
532
  if is_torch_version(">=", "1.11.0"):
533
  from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe
534
-
535
  # We need to save the shuffling state of the DataPipe
536
  if isinstance(dataset, ShufflerIterDataPipe):
537
  shuffle = dataset._shuffle_enabled
@@ -539,15 +460,12 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
539
  self.split_batches = split_batches
540
  if shuffle:
541
  torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
542
-
543
  self.gradient_state = GradientState()
544
  self.state = AcceleratorState()
545
  self._drop_last = _drop_last
546
  self.skip_batches = skip_batches
547
-
548
  self.slice_fn = slice_tensors if slice_fn is None else slice_fn
549
  self.iteration = 0
550
-
551
  def _fetch_batches(self, iterator):
552
  batches, batch = None, None
553
  # On process 0, we gather the batch to dispatch.
@@ -584,7 +502,6 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
584
  batch_info = [None, True]
585
  broadcast_object_list(batch_info)
586
  return batch, batch_info
587
-
588
  def __iter__(self):
589
  self.begin()
590
  self.set_epoch(self.iteration)
@@ -603,14 +520,12 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
603
  batch_index = 0
604
  while not stop_iteration:
605
  batch, batch_info = next_batch, next_batch_info
606
-
607
  if self.state.process_index != 0:
608
  # Initialize tensors on other processes than process 0.
609
  batch = initialize_tensors(batch_info[0])
610
  batch = send_to_device(batch, self.state.device)
611
  # Broadcast the batch before splitting it.
612
  batch = broadcast(batch, from_process=0)
613
-
614
  if not self._drop_last and first_batch is None:
615
  # We keep at least num processes elements of the first batch to be able to complete the last batch
616
  first_batch = self.slice_fn(
@@ -619,15 +534,12 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
619
  process_index=self.state.process_index,
620
  num_processes=self.state.num_processes,
621
  )
622
-
623
  if batch is None:
624
  raise ValueError(
625
  f"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration."
626
  )
627
-
628
  observed_batch_size = find_batch_size(batch)
629
  batch_size = observed_batch_size // self.state.num_processes
630
-
631
  stop_iteration = self._stop_iteration
632
  if not stop_iteration:
633
  # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in
@@ -636,13 +548,11 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
636
  # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.
637
  if self._stop_iteration and next_batch_info[0] is None:
638
  stop_iteration = True
639
-
640
  if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0:
641
  # If the last batch is not complete, let's add the first batch to it.
642
  batch = concatenate([batch, first_batch], dim=0)
643
  # Batch size computation above is wrong, it's off by 1 so we fix it.
644
  batch_size += 1
645
-
646
  data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)
647
  batch = self.slice_fn(
648
  batch,
@@ -650,7 +560,6 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
650
  process_index=self.state.process_index,
651
  num_processes=self.state.num_processes,
652
  )
653
-
654
  if stop_iteration:
655
  self.end_of_dataloader = True
656
  self.remainder = observed_batch_size
@@ -659,7 +568,6 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
659
  batch_index += 1
660
  self.iteration += 1
661
  self.end()
662
-
663
  def set_epoch(self, epoch: int):
664
  # In case it is manually passed in, the user can set it to what they like
665
  if self.iteration != epoch:
@@ -668,7 +576,6 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
668
  self.batch_sampler.sampler.set_epoch(epoch)
669
  elif hasattr(self.dataset, "set_epoch"):
670
  self.dataset.set_epoch(epoch)
671
-
672
  def __len__(self):
673
  whole_length = super().__len__()
674
  if self.split_batches:
@@ -677,18 +584,14 @@ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
677
  return whole_length // self.state.num_processes
678
  else:
679
  return math.ceil(whole_length / self.state.num_processes)
680
-
681
  @property
682
  def total_batch_size(self):
683
  return (
684
  self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes)
685
  )
686
-
687
  @property
688
  def total_dataset_length(self):
689
  return len(self.dataset)
690
-
691
-
692
  def prepare_data_loader(
693
  dataloader: DataLoader,
694
  device: Optional[torch.device] = None,
@@ -703,10 +606,8 @@ def prepare_data_loader(
703
  ) -> DataLoader:
704
  """
705
  Wraps a PyTorch `DataLoader` to generate batches for one of the processes only.
706
-
707
  Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration
708
  at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
709
-
710
  Args:
711
  dataloader (`torch.utils.data.dataloader.DataLoader`):
712
  The data loader to split across several devices.
@@ -721,11 +622,9 @@ def prepare_data_loader(
721
  Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
722
  yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
723
  `num_processes` batches at each iteration).
724
-
725
  Another way to see this is that the observed batch size will be the same as the initial `dataloader` if
726
  this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes`
727
  otherwise.
728
-
729
  Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of
730
  `batch_size`.
731
  put_on_device (`bool`, *optional*, defaults to `False`):
@@ -734,13 +633,11 @@ def prepare_data_loader(
734
  rng_types (list of `str` or [`~utils.RNGType`]):
735
  The list of random number generators to synchronize at the beginning of each iteration. Should be one or
736
  several of:
737
-
738
  - `"torch"`: the base torch random number generator
739
  - `"cuda"`: the CUDA random number generator (GPU only)
740
  - `"xla"`: the XLA random number generator (TPU only)
741
  - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your
742
  dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.
743
-
744
  dispatch_batches (`bool`, *optional*):
745
  If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches
746
  are split and broadcast to each process. Will default to `True` when the underlying dataset is an
@@ -753,15 +650,11 @@ def prepare_data_loader(
753
  If passed, this function will be used to slice tensors across `num_processes`. Will default to
754
  [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be
755
  ignored otherwise.
756
-
757
  Returns:
758
  `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches
759
-
760
  <Tip warning={true}>
761
-
762
  `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
763
  equal to `False`
764
-
765
  </Tip>
766
  """
767
  if dispatch_batches is None:
@@ -769,7 +662,6 @@ def prepare_data_loader(
769
  dispatch_batches = False
770
  else:
771
  dispatch_batches = isinstance(dataloader.dataset, IterableDataset)
772
-
773
  if dispatch_batches and not put_on_device:
774
  raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.")
775
  # Grab defaults from AcceleratorState
@@ -778,14 +670,12 @@ def prepare_data_loader(
778
  num_processes = state.num_processes
779
  if process_index is None:
780
  process_index = state.process_index
781
-
782
  # Sanity check
783
  if split_batches and dataloader.batch_size > 1 and dataloader.batch_size % num_processes != 0:
784
  raise ValueError(
785
  f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) "
786
  f"needs to be a round multiple of the number of processes ({num_processes})."
787
  )
788
-
789
  new_dataset = dataloader.dataset
790
  # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it
791
  new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None
@@ -807,7 +697,6 @@ def prepare_data_loader(
807
  num_samples=sampler._num_samples,
808
  generator=getattr(sampler, "generator", torch.Generator()),
809
  )
810
-
811
  # No change if no multiprocess
812
  if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:
813
  if isinstance(new_dataset, IterableDataset):
@@ -830,7 +719,6 @@ def prepare_data_loader(
830
  split_batches=split_batches,
831
  even_batches=even_batches,
832
  )
833
-
834
  # We ignore all of those since they are all dealt with by our new_batch_sampler
835
  ignore_kwargs = [
836
  "batch_size",
@@ -839,16 +727,13 @@ def prepare_data_loader(
839
  "batch_sampler",
840
  "drop_last",
841
  ]
842
-
843
  if rng_types is not None and synchronized_generator is None and "generator" in rng_types:
844
  rng_types.remove("generator")
845
-
846
  kwargs = {
847
  k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
848
  for k in _PYTORCH_DATALOADER_KWARGS
849
  if k not in ignore_kwargs
850
  }
851
-
852
  # Need to provide batch_size as batch_sampler is None for Iterable dataset
853
  if new_batch_sampler is None:
854
  kwargs["drop_last"] = dataloader.drop_last
@@ -891,12 +776,9 @@ def prepare_data_loader(
891
  _drop_last=dataloader.drop_last,
892
  **kwargs,
893
  )
894
-
895
  if state.distributed_type == DistributedType.TPU:
896
  return MpDeviceLoaderWrapper(dataloader, device)
897
  return dataloader
898
-
899
-
900
  class SkipBatchSampler(BatchSampler):
901
  """
902
  A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`.
@@ -904,24 +786,18 @@ class SkipBatchSampler(BatchSampler):
904
  def __init__(self, batch_sampler, skip_batches=0):
905
  self.batch_sampler = batch_sampler
906
  self.skip_batches = skip_batches
907
-
908
  def __iter__(self):
909
  for index, samples in enumerate(self.batch_sampler):
910
  if index >= self.skip_batches:
911
  yield samples
912
-
913
  @property
914
  def total_length(self):
915
  return len(self.batch_sampler)
916
-
917
  def __len__(self):
918
  return len(self.batch_sampler) - self.skip_batches
919
-
920
-
921
  class SkipDataLoader(DataLoader):
922
  """
923
  Subclass of a PyTorch `DataLoader` that will skip the first batches.
924
-
925
  Args:
926
  dataset (`torch.utils.data.dataset.Dataset`):
927
  The dataset to use to build this datalaoder.
@@ -933,13 +809,10 @@ class SkipDataLoader(DataLoader):
933
  def __init__(self, dataset, skip_batches=0, **kwargs):
934
  super().__init__(dataset, **kwargs)
935
  self.skip_batches = skip_batches
936
-
937
  def __iter__(self):
938
  for index, batch in enumerate(super().__iter__()):
939
  if index >= self.skip_batches:
940
  yield batch
941
-
942
-
943
  def skip_first_batches(dataloader, num_batches=0):
944
  """
945
  Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
@@ -952,7 +825,6 @@ def skip_first_batches(dataloader, num_batches=0):
952
  sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
953
  batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
954
  new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches)
955
-
956
  # We ignore all of those since they are all dealt with by our new_batch_sampler
957
  ignore_kwargs = [
958
  "batch_size",
@@ -961,18 +833,15 @@ def skip_first_batches(dataloader, num_batches=0):
961
  "batch_sampler",
962
  "drop_last",
963
  ]
964
-
965
  kwargs = {
966
  k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
967
  for k in _PYTORCH_DATALOADER_KWARGS
968
  if k not in ignore_kwargs
969
  }
970
-
971
  # Need to provide batch_size as batch_sampler is None for Iterable dataset
972
  if new_batch_sampler is None:
973
  kwargs["drop_last"] = dataloader.drop_last
974
  kwargs["batch_size"] = dataloader.batch_size
975
-
976
  if isinstance(dataloader, DataLoaderDispatcher):
977
  if new_batch_sampler is None:
978
  # Need to manually skip batches in the dataloader
@@ -1006,5 +875,4 @@ def skip_first_batches(dataloader, num_batches=0):
1006
  dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs)
1007
  else:
1008
  dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs)
1009
-
1010
  return dataloader
 
1
  logger = get_logger(__name__)
 
2
  # kwargs of the DataLoader in min version 1.4.0.
3
  _PYTORCH_DATALOADER_KWARGS = {
4
  "batch_size": 1,
 
16
  "prefetch_factor": 2,
17
  "persistent_workers": False,
18
  }
 
19
  # kwargs added after by version
20
  _PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {}
 
21
  for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items():
22
  if is_torch_version(">=", v):
23
  _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs)
 
 
24
  class SeedableRandomSampler(RandomSampler):
25
  """
26
  Same as a random sampler, except that in `__iter__` a seed can be used.
 
27
  Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed
28
  and be fully reproducable on multiple iterations.
 
29
  If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on
30
  (stored in `self.epoch`).
31
  """
 
33
  super().__init__(*args, **kwargs)
34
  self.epoch = 0
35
  self.seed = torch.random.initial_seed()
 
36
  def __iter__(self):
37
  if self.generator is None:
38
  self.generator = torch.Generator()
 
43
  self.generator.manual_seed(seed)
44
  yield from super().__iter__()
45
  self.set_epoch(self.epoch + 1)
 
46
  def set_epoch(self, epoch: int):
47
  "Sets the current iteration of the sampler."
48
  self.epoch = epoch
 
 
49
  class BatchSamplerShard(BatchSampler):
50
  """
51
  Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will
52
  always yield a number of batches that is a round multiple of `num_processes` and that all have the same size.
53
  Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration
54
  at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
 
55
  Args:
56
  batch_sampler (`torch.utils.data.sampler.BatchSampler`):
57
  The batch sampler to split in several shards.
 
62
  split_batches (`bool`, *optional*, defaults to `False`):
63
  Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
64
  yielding different full batches on each process.
 
65
  On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in:
 
66
  - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if
67
  this argument is set to `False`.
68
  - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`
 
70
  even_batches (`bool`, *optional*, defaults to `True`):
71
  Whether or not to loop back at the beginning of the sampler when the number of samples is not a round
72
  multiple of (original batch size / number of processes).
 
73
  <Tip warning={true}>
 
74
  `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
75
  equal to `False`
 
76
  </Tip>"""
 
77
  def __init__(
78
  self,
79
  batch_sampler: BatchSampler,
 
99
  "You need to use `even_batches=False` when the batch sampler has no batch size. If you "
100
  "are not calling this method directly, set `accelerator.even_batches=False` instead."
101
  )
 
102
  @property
103
  def total_length(self):
104
  return len(self.batch_sampler)
 
105
  def __len__(self):
106
  if self.split_batches:
107
  # Split batches does not change the length of the batch sampler
 
119
  else:
120
  # Otherwise it depends on the process index.
121
  return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length
 
122
  def __iter__(self):
123
  return self._iter_with_split() if self.split_batches else self._iter_with_no_split()
 
124
  def _iter_with_split(self):
125
  initial_data = []
126
  batch_length = self.batch_sampler.batch_size // self.num_processes
 
130
  if len(batch) == self.batch_size:
131
  # If the batch is full, we yield the part of it this process is responsible of.
132
  yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
 
133
  # If drop_last is True of the last batch was full, iteration is over, otherwise...
134
  if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size:
135
  if not self.even_batches:
 
141
  initial_data += initial_data
142
  batch = batch + initial_data
143
  yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
 
144
  def _iter_with_no_split(self):
145
  initial_data = []
146
  batch_to_yield = []
 
157
  ):
158
  yield batch_to_yield
159
  batch_to_yield = []
 
160
  # If drop_last is True, iteration is over, otherwise...
161
  if not self.drop_last and len(initial_data) > 0:
162
  if not self.even_batches:
 
166
  # ... we yield the complete batch we had saved before if it has the proper length
167
  if len(batch_to_yield) == self.batch_size:
168
  yield batch_to_yield
 
169
  # For degenerate cases where the dataset has less than num_process * batch_size samples
170
  while len(initial_data) < self.num_processes * self.batch_size:
171
  initial_data += initial_data
 
172
  # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next
173
  if len(batch) == self.batch_size:
174
  batch = []
175
  idx += 1
 
176
  # Make sure we yield a multiple of self.num_processes batches
177
  cycle_index = 0
178
  while idx % self.num_processes != 0 or len(batch) > 0:
 
183
  cycle_index = end_index
184
  batch = []
185
  idx += 1
 
 
186
  class IterableDatasetShard(IterableDataset):
187
  """
188
  Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will
 
190
  `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the
191
  `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would
192
  be too small or loop with indices from the beginning.
 
193
  Args:
194
  dataset (`torch.utils.data.dataset.IterableDataset`):
195
  The batch sampler to split in several shards.
 
206
  split_batches (`bool`, *optional*, defaults to `False`):
207
  Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
208
  yielding different full batches on each process.
 
209
  On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in:
 
210
  - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this
211
  argument is set to `False`.
212
  - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if
 
232
  self.num_processes = num_processes
233
  self.process_index = process_index
234
  self.split_batches = split_batches
 
235
  def set_epoch(self, epoch):
236
  self.epoch = epoch
237
  if hasattr(self.dataset, "set_epoch"):
238
  self.dataset.set_epoch(epoch)
 
239
  def __len__(self):
240
  # We will just raise the downstream error if the underlying dataset is not sized
241
  if self.drop_last:
242
  return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
243
  else:
244
  return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
 
245
  def __iter__(self):
246
  if (
247
  not hasattr(self.dataset, "set_epoch")
 
252
  real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes)
253
  process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size
254
  process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size)
 
255
  first_batch = None
256
  current_batch = []
257
  for element in self.dataset:
 
263
  if first_batch is None:
264
  first_batch = current_batch.copy()
265
  current_batch = []
 
266
  # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning.
267
  if not self.drop_last and len(current_batch) > 0:
268
  if first_batch is None:
 
271
  current_batch += first_batch
272
  for i in process_slice:
273
  yield current_batch[i]
 
 
274
  class DataLoaderStateMixin:
275
  """
276
  Mixin class that adds a state to a `DataLoader` to keep track of the status inside the dataloader such as at the
277
  end of the iteration, the number of items in the dataset in the last batch relative to the batch size, and other
278
  useful information that might be needed.
 
279
  **Available attributes:**
 
280
  - **end_of_dataloader** (`bool`) -- Whether at the last iteration or batch
281
  - **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total
282
  batch size
 
283
  """
284
  def __init_subclass__(cls, **kwargs):
285
  cls.end_of_dataloader = False
286
  cls.remainder = -1
 
287
  def reset(self):
288
  self.end_of_dataloader = False
289
  self.remainder = -1
 
290
  def begin(self):
291
  "Prepares the gradient state for the current dataloader"
292
  self.reset()
 
295
  length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
296
  self.remainder = length % self.total_batch_size
297
  self.gradient_state._add_dataloader(self)
 
298
  def end(self):
299
  "Cleans up the gradient state after exiting the dataloader"
300
  self.gradient_state._remove_dataloader(self)
 
 
301
  class DataLoaderShard(DataLoader, DataLoaderStateMixin):
302
  """
303
  Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup.
 
304
  Args:
305
  dataset (`torch.utils.data.dataset.Dataset`):
306
  The dataset to use to build this datalaoder.
 
309
  rng_types (list of `str` or [`~utils.RNGType`]):
310
  The list of random number generators to synchronize at the beginning of each iteration. Should be one or
311
  several of:
 
312
  - `"torch"`: the base torch random number generator
313
  - `"cuda"`: the CUDA random number generator (GPU only)
314
  - `"xla"`: the XLA random number generator (TPU only)
 
319
  The number of batches to skip at the beginning.
320
  kwargs:
321
  All other keyword arguments to pass to the regular `DataLoader` initialization.
 
322
  **Available attributes:**
 
323
  - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
324
  Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
325
  number of processes
 
326
  - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
327
  """
328
  def __init__(
 
343
  self.gradient_state = GradientState()
344
  self._drop_last = _drop_last
345
  self.iteration = 0
 
346
  def __iter__(self):
347
  if self.rng_types is not None:
348
  synchronize_rng_states(self.rng_types, self.synchronized_generator)
349
  self.begin()
 
350
  self.set_epoch(self.iteration)
351
  dataloader_iter = super().__iter__()
352
  # We iterate one batch ahead to check when we are at the end
 
354
  current_batch = next(dataloader_iter)
355
  except StopIteration:
356
  yield
 
357
  batch_index = 0
358
  while True:
359
  try:
 
370
  if batch_index >= self.skip_batches:
371
  yield current_batch
372
  break
 
373
  self.iteration += 1
374
  self.end()
 
375
  def set_epoch(self, epoch: int):
376
  # In case it is manually passed in, the user can set it to what they like
377
  if self.iteration != epoch:
 
382
  # or in general HF datasets `Datasets`
383
  elif hasattr(self.dataset, "set_epoch"):
384
  self.dataset.set_epoch(epoch)
 
385
  @property
386
  def total_batch_size(self):
387
  batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler
 
390
  if getattr(batch_sampler, "split_batches", False)
391
  else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1))
392
  )
 
393
  @property
394
  def total_dataset_length(self):
395
  if hasattr(self.dataset, "total_length"):
396
  return self.dataset.total_length
397
  else:
398
  return len(self.dataset)
 
 
399
  if is_tpu_available(check_device=False):
400
  import torch_xla.distributed.parallel_loader as xpl
 
401
  class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):
402
  """
403
  Wrapper for the xpl.MpDeviceLoader class that knows the total batch size.
 
404
  XLA preloading threads will all call DataLoaderShard's __iter__(). Remove rng_types from DataLoaderShard to
405
  prevent it from using the XLA device in the preloading threads, and synchronize the RNG once from the main
406
  thread only.
 
407
  **Available attributes:**
 
408
  - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
409
  Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
410
  number of processes
 
411
  - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
412
  """
413
  def __init__(self, dataloader: DataLoaderShard, device: torch.device):
414
  super().__init__(dataloader, device)
415
  self._rng_types = self._loader.rng_types
416
  self._loader.rng_types = None
 
417
  def __iter__(self):
418
  if self._rng_types is not None:
419
  synchronize_rng_states(self._rng_types, self._loader.synchronized_generator)
 
420
  return super().__iter__()
 
421
  @property
422
  def total_batch_size(self):
423
  return self._loader.total_batch_size
 
424
  @property
425
  def total_dataset_length(self):
426
  return self._loader.total_dataset_length
 
427
  @property
428
  def batch_sampler(self):
429
  return self._loader.batch_sampler
 
 
430
  class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
431
  """
432
  Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each
433
  process their part of the batch.
 
434
  Args:
435
  split_batches (`bool`, *optional*, defaults to `False`):
436
  Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
 
441
  size of the `dataloader` is a round multiple of `batch_size`.
442
  skip_batches (`int`, *optional*, defaults to 0):
443
  The number of batches to skip at the beginning of an iteration.
 
444
  **Available attributes:**
 
445
  - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
446
  Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
447
  number of processes
 
448
  - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
449
  """
450
  def __init__(
 
453
  shuffle = False
454
  if is_torch_version(">=", "1.11.0"):
455
  from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe
 
456
  # We need to save the shuffling state of the DataPipe
457
  if isinstance(dataset, ShufflerIterDataPipe):
458
  shuffle = dataset._shuffle_enabled
 
460
  self.split_batches = split_batches
461
  if shuffle:
462
  torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
 
463
  self.gradient_state = GradientState()
464
  self.state = AcceleratorState()
465
  self._drop_last = _drop_last
466
  self.skip_batches = skip_batches
 
467
  self.slice_fn = slice_tensors if slice_fn is None else slice_fn
468
  self.iteration = 0
 
469
  def _fetch_batches(self, iterator):
470
  batches, batch = None, None
471
  # On process 0, we gather the batch to dispatch.
 
502
  batch_info = [None, True]
503
  broadcast_object_list(batch_info)
504
  return batch, batch_info
 
505
  def __iter__(self):
506
  self.begin()
507
  self.set_epoch(self.iteration)
 
520
  batch_index = 0
521
  while not stop_iteration:
522
  batch, batch_info = next_batch, next_batch_info
 
523
  if self.state.process_index != 0:
524
  # Initialize tensors on other processes than process 0.
525
  batch = initialize_tensors(batch_info[0])
526
  batch = send_to_device(batch, self.state.device)
527
  # Broadcast the batch before splitting it.
528
  batch = broadcast(batch, from_process=0)
 
529
  if not self._drop_last and first_batch is None:
530
  # We keep at least num processes elements of the first batch to be able to complete the last batch
531
  first_batch = self.slice_fn(
 
534
  process_index=self.state.process_index,
535
  num_processes=self.state.num_processes,
536
  )
 
537
  if batch is None:
538
  raise ValueError(
539
  f"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration."
540
  )
 
541
  observed_batch_size = find_batch_size(batch)
542
  batch_size = observed_batch_size // self.state.num_processes
 
543
  stop_iteration = self._stop_iteration
544
  if not stop_iteration:
545
  # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in
 
548
  # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.
549
  if self._stop_iteration and next_batch_info[0] is None:
550
  stop_iteration = True
 
551
  if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0:
552
  # If the last batch is not complete, let's add the first batch to it.
553
  batch = concatenate([batch, first_batch], dim=0)
554
  # Batch size computation above is wrong, it's off by 1 so we fix it.
555
  batch_size += 1
 
556
  data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)
557
  batch = self.slice_fn(
558
  batch,
 
560
  process_index=self.state.process_index,
561
  num_processes=self.state.num_processes,
562
  )
 
563
  if stop_iteration:
564
  self.end_of_dataloader = True
565
  self.remainder = observed_batch_size
 
568
  batch_index += 1
569
  self.iteration += 1
570
  self.end()
 
571
  def set_epoch(self, epoch: int):
572
  # In case it is manually passed in, the user can set it to what they like
573
  if self.iteration != epoch:
 
576
  self.batch_sampler.sampler.set_epoch(epoch)
577
  elif hasattr(self.dataset, "set_epoch"):
578
  self.dataset.set_epoch(epoch)
 
579
  def __len__(self):
580
  whole_length = super().__len__()
581
  if self.split_batches:
 
584
  return whole_length // self.state.num_processes
585
  else:
586
  return math.ceil(whole_length / self.state.num_processes)
 
587
  @property
588
  def total_batch_size(self):
589
  return (
590
  self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes)
591
  )
 
592
  @property
593
  def total_dataset_length(self):
594
  return len(self.dataset)
 
 
595
  def prepare_data_loader(
596
  dataloader: DataLoader,
597
  device: Optional[torch.device] = None,
 
606
  ) -> DataLoader:
607
  """
608
  Wraps a PyTorch `DataLoader` to generate batches for one of the processes only.
 
609
  Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration
610
  at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
 
611
  Args:
612
  dataloader (`torch.utils.data.dataloader.DataLoader`):
613
  The data loader to split across several devices.
 
622
  Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
623
  yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
624
  `num_processes` batches at each iteration).
 
625
  Another way to see this is that the observed batch size will be the same as the initial `dataloader` if
626
  this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes`
627
  otherwise.
 
628
  Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of
629
  `batch_size`.
630
  put_on_device (`bool`, *optional*, defaults to `False`):
 
633
  rng_types (list of `str` or [`~utils.RNGType`]):
634
  The list of random number generators to synchronize at the beginning of each iteration. Should be one or
635
  several of:
 
636
  - `"torch"`: the base torch random number generator
637
  - `"cuda"`: the CUDA random number generator (GPU only)
638
  - `"xla"`: the XLA random number generator (TPU only)
639
  - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your
640
  dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.
 
641
  dispatch_batches (`bool`, *optional*):
642
  If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches
643
  are split and broadcast to each process. Will default to `True` when the underlying dataset is an
 
650
  If passed, this function will be used to slice tensors across `num_processes`. Will default to
651
  [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be
652
  ignored otherwise.
 
653
  Returns:
654
  `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches
 
655
  <Tip warning={true}>
 
656
  `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
657
  equal to `False`
 
658
  </Tip>
659
  """
660
  if dispatch_batches is None:
 
662
  dispatch_batches = False
663
  else:
664
  dispatch_batches = isinstance(dataloader.dataset, IterableDataset)
 
665
  if dispatch_batches and not put_on_device:
666
  raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.")
667
  # Grab defaults from AcceleratorState
 
670
  num_processes = state.num_processes
671
  if process_index is None:
672
  process_index = state.process_index
 
673
  # Sanity check
674
  if split_batches and dataloader.batch_size > 1 and dataloader.batch_size % num_processes != 0:
675
  raise ValueError(
676
  f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) "
677
  f"needs to be a round multiple of the number of processes ({num_processes})."
678
  )
 
679
  new_dataset = dataloader.dataset
680
  # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it
681
  new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None
 
697
  num_samples=sampler._num_samples,
698
  generator=getattr(sampler, "generator", torch.Generator()),
699
  )
 
700
  # No change if no multiprocess
701
  if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:
702
  if isinstance(new_dataset, IterableDataset):
 
719
  split_batches=split_batches,
720
  even_batches=even_batches,
721
  )
 
722
  # We ignore all of those since they are all dealt with by our new_batch_sampler
723
  ignore_kwargs = [
724
  "batch_size",
 
727
  "batch_sampler",
728
  "drop_last",
729
  ]
 
730
  if rng_types is not None and synchronized_generator is None and "generator" in rng_types:
731
  rng_types.remove("generator")
 
732
  kwargs = {
733
  k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
734
  for k in _PYTORCH_DATALOADER_KWARGS
735
  if k not in ignore_kwargs
736
  }
 
737
  # Need to provide batch_size as batch_sampler is None for Iterable dataset
738
  if new_batch_sampler is None:
739
  kwargs["drop_last"] = dataloader.drop_last
 
776
  _drop_last=dataloader.drop_last,
777
  **kwargs,
778
  )
 
779
  if state.distributed_type == DistributedType.TPU:
780
  return MpDeviceLoaderWrapper(dataloader, device)
781
  return dataloader
 
 
782
  class SkipBatchSampler(BatchSampler):
783
  """
784
  A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`.
 
786
  def __init__(self, batch_sampler, skip_batches=0):
787
  self.batch_sampler = batch_sampler
788
  self.skip_batches = skip_batches
 
789
  def __iter__(self):
790
  for index, samples in enumerate(self.batch_sampler):
791
  if index >= self.skip_batches:
792
  yield samples
 
793
  @property
794
  def total_length(self):
795
  return len(self.batch_sampler)
 
796
  def __len__(self):
797
  return len(self.batch_sampler) - self.skip_batches
 
 
798
  class SkipDataLoader(DataLoader):
799
  """
800
  Subclass of a PyTorch `DataLoader` that will skip the first batches.
 
801
  Args:
802
  dataset (`torch.utils.data.dataset.Dataset`):
803
  The dataset to use to build this datalaoder.
 
809
  def __init__(self, dataset, skip_batches=0, **kwargs):
810
  super().__init__(dataset, **kwargs)
811
  self.skip_batches = skip_batches
 
812
  def __iter__(self):
813
  for index, batch in enumerate(super().__iter__()):
814
  if index >= self.skip_batches:
815
  yield batch
 
 
816
  def skip_first_batches(dataloader, num_batches=0):
817
  """
818
  Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
 
825
  sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
826
  batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
827
  new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches)
 
828
  # We ignore all of those since they are all dealt with by our new_batch_sampler
829
  ignore_kwargs = [
830
  "batch_size",
 
833
  "batch_sampler",
834
  "drop_last",
835
  ]
 
836
  kwargs = {
837
  k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
838
  for k in _PYTORCH_DATALOADER_KWARGS
839
  if k not in ignore_kwargs
840
  }
 
841
  # Need to provide batch_size as batch_sampler is None for Iterable dataset
842
  if new_batch_sampler is None:
843
  kwargs["drop_last"] = dataloader.drop_last
844
  kwargs["batch_size"] = dataloader.batch_size
 
845
  if isinstance(dataloader, DataLoaderDispatcher):
846
  if new_batch_sampler is None:
847
  # Need to manually skip batches in the dataloader
 
875
  dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs)
876
  else:
877
  dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs)
 
878
  return dataloader
src/hooks.py CHANGED
@@ -2,99 +2,76 @@ class ModelHook:
2
  """
3
  A hook that contains callbacks to be executed just before and after the forward method of a model. The difference
4
  with PyTorch existing hooks is that they get passed along the kwargs.
5
-
6
  Class attribute:
7
  - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under
8
  the `torch.no_grad()` context manager.
9
  """
10
  no_grad = False
11
-
12
  def init_hook(self, module):
13
  """
14
  To be executed when the hook is attached to the module.
15
-
16
  Args:
17
  module (`torch.nn.Module`): The module attached to this hook.
18
  """
19
  return module
20
-
21
  def pre_forward(self, module, *args, **kwargs):
22
  """
23
  To be executed just before the forward method of the model.
24
-
25
  Args:
26
  module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.
27
  args (`Tuple[Any]`): The positional arguments passed to the module.
28
  kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.
29
-
30
  Returns:
31
  `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.
32
  """
33
  return args, kwargs
34
-
35
  def post_forward(self, module, output):
36
  """
37
  To be executed just after the forward method of the model.
38
-
39
  Args:
40
  module (`torch.nn.Module`): The module whose forward pass been executed just before this event.
41
  output (`Any`): The output of the module.
42
-
43
  Returns:
44
  `Any`: The processed `output`.
45
  """
46
  return output
47
-
48
  def detach_hook(self, module):
49
  """
50
  To be executed when the hook is detached from a module.
51
-
52
  Args:
53
  module (`torch.nn.Module`): The module detached from this hook.
54
  """
55
  return module
56
-
57
-
58
  class SequentialHook(ModelHook):
59
  """
60
  A hook that can contain several hooks and iterates through them at each event.
61
  """
62
  def __init__(self, *hooks):
63
  self.hooks = hooks
64
-
65
  def init_hook(self, module):
66
  for hook in self.hooks:
67
  module = hook.init_hook(module)
68
  return module
69
-
70
  def pre_forward(self, module, *args, **kwargs):
71
  for hook in self.hooks:
72
  args, kwargs = hook.pre_forward(module, *args, **kwargs)
73
  return args, kwargs
74
-
75
  def post_forward(self, module, output):
76
  for hook in self.hooks:
77
  output = hook.post_forward(module, output)
78
  return output
79
-
80
  def detach_hook(self, module):
81
  for hook in self.hooks:
82
  module = hook.detach_hook(module)
83
  return module
84
-
85
-
86
  def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False):
87
  """
88
  Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove
89
  this behavior and restore the original `forward` method, use `remove_hook_from_module`.
90
-
91
  <Tip warning={true}>
92
-
93
  If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks
94
  together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class.
95
-
96
  </Tip>
97
-
98
  Args:
99
  module (`torch.nn.Module`):
100
  The module to attach a hook to.
@@ -102,7 +79,6 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False)
102
  The hook to attach.
103
  append (`bool`, *optional*, defaults to `False`):
104
  Whether the hook should be chained with an existing one (if module already contains a hook) or not.
105
-
106
  Returns:
107
  `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can
108
  be discarded).
@@ -111,17 +87,14 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False)
111
  old_hook = module._hf_hook
112
  remove_hook_from_module(module)
113
  hook = SequentialHook(old_hook, hook)
114
-
115
  if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"):
116
  # If we already put some hook on this module, we replace it with the new one.
117
  old_forward = module._old_forward
118
  else:
119
  old_forward = module.forward
120
  module._old_forward = old_forward
121
-
122
  module = hook.init_hook(module)
123
  module._hf_hook = hook
124
-
125
  def new_forward(module, *args, **kwargs):
126
  args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
127
  if module._hf_hook.no_grad:
@@ -130,20 +103,14 @@ def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False)
130
  else:
131
  output = module._old_forward(*args, **kwargs)
132
  return module._hf_hook.post_forward(module, output)
133
-
134
  module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
135
-
136
  return module
137
-
138
-
139
  def remove_hook_from_module(module: nn.Module, recurse=False):
140
  """
141
  Removes any hook attached to a module via `add_hook_to_module`.
142
-
143
  Args:
144
  module (`torch.nn.Module`): The module to attach a hook to.
145
  recurse (`bool`, **optional**): Whether to remove the hooks recursively
146
-
147
  Returns:
148
  `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can
149
  be discarded).
@@ -151,23 +118,17 @@ def remove_hook_from_module(module: nn.Module, recurse=False):
151
  if hasattr(module, "_hf_hook"):
152
  module._hf_hook.detach_hook(module)
153
  delattr(module, "_hf_hook")
154
-
155
  if hasattr(module, "_old_forward"):
156
  module.forward = module._old_forward
157
  delattr(module, "_old_forward")
158
-
159
  if recurse:
160
  for child in module.children():
161
  remove_hook_from_module(child, recurse)
162
-
163
  return module
164
-
165
-
166
  class AlignDevicesHook(ModelHook):
167
  """
168
  A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the
169
  associated module, potentially offloading the weights after the forward pass.
170
-
171
  Args:
172
  execution_device (`torch.device`, *optional*):
173
  The device on which inputs and model weights should be placed before the forward pass.
@@ -199,19 +160,16 @@ class AlignDevicesHook(ModelHook):
199
  self.offload_buffers = offload_buffers
200
  self.place_submodules = place_submodules
201
  self.skip_keys = skip_keys
202
-
203
  # Will contain the input device when `io_same_device=True`.
204
  self.input_device = None
205
  self.param_original_devices = {}
206
  self.buffer_original_devices = {}
207
-
208
  def __repr__(self):
209
  return (
210
  f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, "
211
  f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, "
212
  f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})"
213
  )
214
-
215
  def init_hook(self, module):
216
  if not self.offload and self.execution_device is not None:
217
  for name, _ in named_module_tensors(module, recurse=self.place_submodules):
@@ -237,9 +195,7 @@ class AlignDevicesHook(ModelHook):
237
  elif self.offload_buffers and self.execution_device is not None:
238
  for name in get_non_persistent_buffers(module, recurse=self.place_submodules):
239
  set_module_tensor_to_device(module, name, self.execution_device)
240
-
241
  return module
242
-
243
  def pre_forward(self, module, *args, **kwargs):
244
  if self.io_same_device:
245
  self.input_device = find_device([args, kwargs])
@@ -257,11 +213,9 @@ class AlignDevicesHook(ModelHook):
257
  set_module_tensor_to_device(
258
  module, name, self.execution_device, value=self.weights_map[name], fp16_statistics=fp16_statistics
259
  )
260
-
261
  return send_to_device(args, self.execution_device), send_to_device(
262
  kwargs, self.execution_device, skip_keys=self.skip_keys
263
  )
264
-
265
  def post_forward(self, module, output):
266
  if self.offload:
267
  for name, _ in named_module_tensors(
@@ -274,20 +228,15 @@ class AlignDevicesHook(ModelHook):
274
  if type(module).__name__ == "Linear8bitLt":
275
  module.state.SCB = None
276
  module.state.CxB = None
277
-
278
  if self.io_same_device and self.input_device is not None:
279
  output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
280
-
281
  return output
282
-
283
  def detach_hook(self, module):
284
  if self.offload:
285
  for name, device in self.original_devices.items():
286
  if device != torch.device("meta"):
287
  set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))
288
  return module
289
-
290
-
291
  def attach_execution_device_hook(
292
  module: torch.nn.Module,
293
  execution_device: Union[int, str, torch.device],
@@ -297,7 +246,6 @@ def attach_execution_device_hook(
297
  """
298
  Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right
299
  execution device
300
-
301
  Args:
302
  module (`torch.nn.Module`):
303
  The module where we want to attach the hooks.
@@ -313,15 +261,11 @@ def attach_execution_device_hook(
313
  """
314
  if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0:
315
  add_hook_to_module(module, AlignDevicesHook(execution_device, skip_keys=skip_keys))
316
-
317
  # Break the recursion if we get to a preload module.
318
  if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes:
319
  return
320
-
321
  for child in module.children():
322
  attach_execution_device_hook(child, execution_device)
323
-
324
-
325
  def attach_align_device_hook(
326
  module: torch.nn.Module,
327
  execution_device: Optional[torch.device] = None,
@@ -335,7 +279,6 @@ def attach_align_device_hook(
335
  """
336
  Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or
337
  buffers.
338
-
339
  Args:
340
  module (`torch.nn.Module`):
341
  The module where we want to attach the hooks.
@@ -362,7 +305,6 @@ def attach_align_device_hook(
362
  full_offload = (
363
  offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes
364
  )
365
-
366
  if len(list(directs)) > 0 or full_offload:
367
  if weights_map is not None:
368
  prefix = f"{module_name}." if len(module_name) > 0 else ""
@@ -378,11 +320,9 @@ def attach_align_device_hook(
378
  skip_keys=skip_keys,
379
  )
380
  add_hook_to_module(module, hook, append=True)
381
-
382
  # We stop the recursion in case we hit the full offload.
383
  if full_offload:
384
  return
385
-
386
  # Recurse on all children of the module.
387
  for child_name, child in module.named_children():
388
  child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
@@ -396,20 +336,15 @@ def attach_align_device_hook(
396
  preload_module_classes=preload_module_classes,
397
  skip_keys=skip_keys,
398
  )
399
-
400
-
401
  def remove_hook_from_submodules(module: nn.Module):
402
  """
403
  Recursively removes all hooks attached on the submodules of a given model.
404
-
405
  Args:
406
  module (`torch.nn.Module`): The module on which to remove all hooks.
407
  """
408
  remove_hook_from_module(module)
409
  for child in module.children():
410
  remove_hook_from_submodules(child)
411
-
412
-
413
  def attach_align_device_hook_on_blocks(
414
  module: nn.Module,
415
  execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None,
@@ -422,7 +357,6 @@ def attach_align_device_hook_on_blocks(
422
  ):
423
  """
424
  Attaches `AlignDevicesHook` to all blocks of a given model as needed.
425
-
426
  Args:
427
  module (`torch.nn.Module`):
428
  The module where we want to attach the hooks.
@@ -464,12 +398,10 @@ def attach_align_device_hook_on_blocks(
464
  skip_keys=skip_keys,
465
  )
466
  return
467
-
468
  if not isinstance(execution_device, Mapping):
469
  execution_device = {key: execution_device for key in offload.keys()}
470
  if not isinstance(offload, Mapping):
471
  offload = {key: offload for key in execution_device.keys()}
472
-
473
  if module_name in execution_device and module_name in offload and not offload[module_name]:
474
  hook = AlignDevicesHook(
475
  execution_device=execution_device[module_name],
@@ -505,7 +437,6 @@ def attach_align_device_hook_on_blocks(
505
  elif module_name == "":
506
  hook = AlignDevicesHook(execution_device=execution_device.get(""), io_same_device=True, skip_keys=skip_keys)
507
  add_hook_to_module(module, hook)
508
-
509
  for child_name, child in module.named_children():
510
  child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
511
  attach_align_device_hook_on_blocks(
@@ -518,13 +449,10 @@ def attach_align_device_hook_on_blocks(
518
  preload_module_classes=preload_module_classes,
519
  skip_keys=skip_keys,
520
  )
521
-
522
-
523
  class CpuOffload(ModelHook):
524
  """
525
  Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after
526
  the forward, the user needs to call the `init_hook` method again for this.
527
-
528
  Args:
529
  execution_device(`str`, `int` or `torch.device`, *optional*):
530
  The device on which the model should be executed. Will default to the MPS device if it's available, then
@@ -540,19 +468,14 @@ class CpuOffload(ModelHook):
540
  prev_module_hook: Optional["UserCpuOffloadHook"] = None,
541
  ):
542
  self.prev_module_hook = prev_module_hook
543
-
544
  self.execution_device = execution_device if execution_device is not None else PartialState().default_device
545
-
546
  def init_hook(self, module):
547
  return module.to("cpu")
548
-
549
  def pre_forward(self, module, *args, **kwargs):
550
  if self.prev_module_hook is not None:
551
  self.prev_module_hook.offload()
552
  module.to(self.execution_device)
553
  return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device)
554
-
555
-
556
  class UserCpuOffloadHook:
557
  """
558
  A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
@@ -561,9 +484,7 @@ class UserCpuOffloadHook:
561
  def __init__(self, model, hook):
562
  self.model = model
563
  self.hook = hook
564
-
565
  def offload(self):
566
  self.hook.init_hook(self.model)
567
-
568
  def remove(self):
569
  remove_hook_from_module(self.model)
 
2
  """
3
  A hook that contains callbacks to be executed just before and after the forward method of a model. The difference
4
  with PyTorch existing hooks is that they get passed along the kwargs.
 
5
  Class attribute:
6
  - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under
7
  the `torch.no_grad()` context manager.
8
  """
9
  no_grad = False
 
10
  def init_hook(self, module):
11
  """
12
  To be executed when the hook is attached to the module.
 
13
  Args:
14
  module (`torch.nn.Module`): The module attached to this hook.
15
  """
16
  return module
 
17
  def pre_forward(self, module, *args, **kwargs):
18
  """
19
  To be executed just before the forward method of the model.
 
20
  Args:
21
  module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.
22
  args (`Tuple[Any]`): The positional arguments passed to the module.
23
  kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.
 
24
  Returns:
25
  `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.
26
  """
27
  return args, kwargs
 
28
  def post_forward(self, module, output):
29
  """
30
  To be executed just after the forward method of the model.
 
31
  Args:
32
  module (`torch.nn.Module`): The module whose forward pass been executed just before this event.
33
  output (`Any`): The output of the module.
 
34
  Returns:
35
  `Any`: The processed `output`.
36
  """
37
  return output
 
38
  def detach_hook(self, module):
39
  """
40
  To be executed when the hook is detached from a module.
 
41
  Args:
42
  module (`torch.nn.Module`): The module detached from this hook.
43
  """
44
  return module
 
 
45
  class SequentialHook(ModelHook):
46
  """
47
  A hook that can contain several hooks and iterates through them at each event.
48
  """
49
  def __init__(self, *hooks):
50
  self.hooks = hooks
 
51
  def init_hook(self, module):
52
  for hook in self.hooks:
53
  module = hook.init_hook(module)
54
  return module
 
55
  def pre_forward(self, module, *args, **kwargs):
56
  for hook in self.hooks:
57
  args, kwargs = hook.pre_forward(module, *args, **kwargs)
58
  return args, kwargs
 
59
  def post_forward(self, module, output):
60
  for hook in self.hooks:
61
  output = hook.post_forward(module, output)
62
  return output
 
63
  def detach_hook(self, module):
64
  for hook in self.hooks:
65
  module = hook.detach_hook(module)
66
  return module
 
 
67
  def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False):
68
  """
69
  Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove
70
  this behavior and restore the original `forward` method, use `remove_hook_from_module`.
 
71
  <Tip warning={true}>
 
72
  If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks
73
  together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class.
 
74
  </Tip>
 
75
  Args:
76
  module (`torch.nn.Module`):
77
  The module to attach a hook to.
 
79
  The hook to attach.
80
  append (`bool`, *optional*, defaults to `False`):
81
  Whether the hook should be chained with an existing one (if module already contains a hook) or not.
 
82
  Returns:
83
  `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can
84
  be discarded).
 
87
  old_hook = module._hf_hook
88
  remove_hook_from_module(module)
89
  hook = SequentialHook(old_hook, hook)
 
90
  if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"):
91
  # If we already put some hook on this module, we replace it with the new one.
92
  old_forward = module._old_forward
93
  else:
94
  old_forward = module.forward
95
  module._old_forward = old_forward
 
96
  module = hook.init_hook(module)
97
  module._hf_hook = hook
 
98
  def new_forward(module, *args, **kwargs):
99
  args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
100
  if module._hf_hook.no_grad:
 
103
  else:
104
  output = module._old_forward(*args, **kwargs)
105
  return module._hf_hook.post_forward(module, output)
 
106
  module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
 
107
  return module
 
 
108
  def remove_hook_from_module(module: nn.Module, recurse=False):
109
  """
110
  Removes any hook attached to a module via `add_hook_to_module`.
 
111
  Args:
112
  module (`torch.nn.Module`): The module to attach a hook to.
113
  recurse (`bool`, **optional**): Whether to remove the hooks recursively
 
114
  Returns:
115
  `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can
116
  be discarded).
 
118
  if hasattr(module, "_hf_hook"):
119
  module._hf_hook.detach_hook(module)
120
  delattr(module, "_hf_hook")
 
121
  if hasattr(module, "_old_forward"):
122
  module.forward = module._old_forward
123
  delattr(module, "_old_forward")
 
124
  if recurse:
125
  for child in module.children():
126
  remove_hook_from_module(child, recurse)
 
127
  return module
 
 
128
  class AlignDevicesHook(ModelHook):
129
  """
130
  A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the
131
  associated module, potentially offloading the weights after the forward pass.
 
132
  Args:
133
  execution_device (`torch.device`, *optional*):
134
  The device on which inputs and model weights should be placed before the forward pass.
 
160
  self.offload_buffers = offload_buffers
161
  self.place_submodules = place_submodules
162
  self.skip_keys = skip_keys
 
163
  # Will contain the input device when `io_same_device=True`.
164
  self.input_device = None
165
  self.param_original_devices = {}
166
  self.buffer_original_devices = {}
 
167
  def __repr__(self):
168
  return (
169
  f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, "
170
  f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, "
171
  f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})"
172
  )
 
173
  def init_hook(self, module):
174
  if not self.offload and self.execution_device is not None:
175
  for name, _ in named_module_tensors(module, recurse=self.place_submodules):
 
195
  elif self.offload_buffers and self.execution_device is not None:
196
  for name in get_non_persistent_buffers(module, recurse=self.place_submodules):
197
  set_module_tensor_to_device(module, name, self.execution_device)
 
198
  return module
 
199
  def pre_forward(self, module, *args, **kwargs):
200
  if self.io_same_device:
201
  self.input_device = find_device([args, kwargs])
 
213
  set_module_tensor_to_device(
214
  module, name, self.execution_device, value=self.weights_map[name], fp16_statistics=fp16_statistics
215
  )
 
216
  return send_to_device(args, self.execution_device), send_to_device(
217
  kwargs, self.execution_device, skip_keys=self.skip_keys
218
  )
 
219
  def post_forward(self, module, output):
220
  if self.offload:
221
  for name, _ in named_module_tensors(
 
228
  if type(module).__name__ == "Linear8bitLt":
229
  module.state.SCB = None
230
  module.state.CxB = None
 
231
  if self.io_same_device and self.input_device is not None:
232
  output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
 
233
  return output
 
234
  def detach_hook(self, module):
235
  if self.offload:
236
  for name, device in self.original_devices.items():
237
  if device != torch.device("meta"):
238
  set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))
239
  return module
 
 
240
  def attach_execution_device_hook(
241
  module: torch.nn.Module,
242
  execution_device: Union[int, str, torch.device],
 
246
  """
247
  Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right
248
  execution device
 
249
  Args:
250
  module (`torch.nn.Module`):
251
  The module where we want to attach the hooks.
 
261
  """
262
  if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0:
263
  add_hook_to_module(module, AlignDevicesHook(execution_device, skip_keys=skip_keys))
 
264
  # Break the recursion if we get to a preload module.
265
  if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes:
266
  return
 
267
  for child in module.children():
268
  attach_execution_device_hook(child, execution_device)
 
 
269
  def attach_align_device_hook(
270
  module: torch.nn.Module,
271
  execution_device: Optional[torch.device] = None,
 
279
  """
280
  Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or
281
  buffers.
 
282
  Args:
283
  module (`torch.nn.Module`):
284
  The module where we want to attach the hooks.
 
305
  full_offload = (
306
  offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes
307
  )
 
308
  if len(list(directs)) > 0 or full_offload:
309
  if weights_map is not None:
310
  prefix = f"{module_name}." if len(module_name) > 0 else ""
 
320
  skip_keys=skip_keys,
321
  )
322
  add_hook_to_module(module, hook, append=True)
 
323
  # We stop the recursion in case we hit the full offload.
324
  if full_offload:
325
  return
 
326
  # Recurse on all children of the module.
327
  for child_name, child in module.named_children():
328
  child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
 
336
  preload_module_classes=preload_module_classes,
337
  skip_keys=skip_keys,
338
  )
 
 
339
  def remove_hook_from_submodules(module: nn.Module):
340
  """
341
  Recursively removes all hooks attached on the submodules of a given model.
 
342
  Args:
343
  module (`torch.nn.Module`): The module on which to remove all hooks.
344
  """
345
  remove_hook_from_module(module)
346
  for child in module.children():
347
  remove_hook_from_submodules(child)
 
 
348
  def attach_align_device_hook_on_blocks(
349
  module: nn.Module,
350
  execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None,
 
357
  ):
358
  """
359
  Attaches `AlignDevicesHook` to all blocks of a given model as needed.
 
360
  Args:
361
  module (`torch.nn.Module`):
362
  The module where we want to attach the hooks.
 
398
  skip_keys=skip_keys,
399
  )
400
  return
 
401
  if not isinstance(execution_device, Mapping):
402
  execution_device = {key: execution_device for key in offload.keys()}
403
  if not isinstance(offload, Mapping):
404
  offload = {key: offload for key in execution_device.keys()}
 
405
  if module_name in execution_device and module_name in offload and not offload[module_name]:
406
  hook = AlignDevicesHook(
407
  execution_device=execution_device[module_name],
 
437
  elif module_name == "":
438
  hook = AlignDevicesHook(execution_device=execution_device.get(""), io_same_device=True, skip_keys=skip_keys)
439
  add_hook_to_module(module, hook)
 
440
  for child_name, child in module.named_children():
441
  child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
442
  attach_align_device_hook_on_blocks(
 
449
  preload_module_classes=preload_module_classes,
450
  skip_keys=skip_keys,
451
  )
 
 
452
  class CpuOffload(ModelHook):
453
  """
454
  Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after
455
  the forward, the user needs to call the `init_hook` method again for this.
 
456
  Args:
457
  execution_device(`str`, `int` or `torch.device`, *optional*):
458
  The device on which the model should be executed. Will default to the MPS device if it's available, then
 
468
  prev_module_hook: Optional["UserCpuOffloadHook"] = None,
469
  ):
470
  self.prev_module_hook = prev_module_hook
 
471
  self.execution_device = execution_device if execution_device is not None else PartialState().default_device
 
472
  def init_hook(self, module):
473
  return module.to("cpu")
 
474
  def pre_forward(self, module, *args, **kwargs):
475
  if self.prev_module_hook is not None:
476
  self.prev_module_hook.offload()
477
  module.to(self.execution_device)
478
  return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device)
 
 
479
  class UserCpuOffloadHook:
480
  """
481
  A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
 
484
  def __init__(self, model, hook):
485
  self.model = model
486
  self.hook = hook
 
487
  def offload(self):
488
  self.hook.init_hook(self.model)
 
489
  def remove(self):
490
  remove_hook_from_module(self.model)
src/launchers.py CHANGED
@@ -1,8 +1,6 @@
1
  def test_launch():
2
  "Verify a `PartialState` can be initialized."
3
  _ = PartialState()
4
-
5
-
6
  def notebook_launcher(
7
  function,
8
  args=(),
@@ -16,17 +14,12 @@ def notebook_launcher(
16
  """
17
  Launches a training function, using several processes or multiple nodes if it's possible in the current environment
18
  (TPU with multiple cores for instance).
19
-
20
  <Tip warning={true}>
21
-
22
  To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If
23
  any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability.
24
-
25
  Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none
26
  of those calls have been made.
27
-
28
  </Tip>
29
-
30
  Args:
31
  function (`Callable`):
32
  The training function to execute. If it accepts arguments, the first argument should be the index of the
@@ -46,19 +39,13 @@ def notebook_launcher(
46
  The rank of the current node.
47
  num_nodes (`int`, *optional*, defaults to 1):
48
  The number of nodes to use for training.
49
-
50
  Example:
51
-
52
  ```python
53
  # Assume this is defined in a Jupyter Notebook on an instance with two GPUs
54
  from accelerate import notebook_launcher
55
-
56
-
57
  def train(*args):
58
  # Your training function here
59
  ...
60
-
61
-
62
  notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16")
63
  ```
64
  """
@@ -69,18 +56,15 @@ def notebook_launcher(
69
  in_kaggle = True
70
  elif "IPython" in sys.modules:
71
  in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython())
72
-
73
  try:
74
  mixed_precision = PrecisionType(mixed_precision.lower())
75
  except ValueError:
76
  raise ValueError(
77
  f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
78
  )
79
-
80
  if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME", None) is not None):
81
  # TPU launch
82
  import torch_xla.distributed.xla_multiprocessing as xmp
83
-
84
  if len(AcceleratorState._shared_state) > 0:
85
  raise ValueError(
86
  "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
@@ -89,7 +73,6 @@ def notebook_launcher(
89
  )
90
  if num_processes is None:
91
  num_processes = 8
92
-
93
  launcher = PrepareForLaunch(function, distributed_type="TPU")
94
  print(f"Launching a training on {num_processes} TPU cores.")
95
  xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork")
@@ -111,7 +94,6 @@ def notebook_launcher(
111
  # Multi-GPU launch
112
  from torch.multiprocessing import start_processes
113
  from torch.multiprocessing.spawn import ProcessRaisedException
114
-
115
  if len(AcceleratorState._shared_state) > 0:
116
  raise ValueError(
117
  "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
@@ -128,7 +110,6 @@ def notebook_launcher(
128
  for lib_name in problematic_imports:
129
  err += f"\n\t* `{lib_name}`"
130
  raise RuntimeError(err)
131
-
132
  patched_env = dict(
133
  nproc=num_processes,
134
  node_rank=node_rank,
@@ -137,12 +118,10 @@ def notebook_launcher(
137
  master_port=use_port,
138
  mixed_precision=mixed_precision,
139
  )
140
-
141
  # Check for CUDA P2P and IB issues
142
  if not check_cuda_p2p_ib_support():
143
  patched_env["nccl_p2p_disable"] = "1"
144
  patched_env["nccl_ib_disable"] = "1"
145
-
146
  # torch.distributed will expect a few environment variable to be here. We set the ones common to each
147
  # process here (the other ones will be set be the launcher).
148
  with patch_environment(**patched_env):
@@ -177,7 +156,6 @@ def notebook_launcher(
177
  ) from e
178
  else:
179
  raise RuntimeError(f"An issue was found when launching the training: {e}") from e
180
-
181
  else:
182
  # No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
183
  if is_mps_available():
@@ -188,19 +166,13 @@ def notebook_launcher(
188
  else:
189
  print("Launching training on CPU.")
190
  function(*args)
191
-
192
-
193
  def debug_launcher(function, args=(), num_processes=2):
194
  """
195
  Launches a training function using several processes on CPU for debugging purposes.
196
-
197
  <Tip warning={true}>
198
-
199
  This function is provided for internal testing and debugging, but it's not intended for real trainings. It will
200
  only use the CPU.
201
-
202
  </Tip>
203
-
204
  Args:
205
  function (`Callable`):
206
  The training function to execute.
@@ -210,7 +182,6 @@ def debug_launcher(function, args=(), num_processes=2):
210
  The number of processes to use for training.
211
  """
212
  from torch.multiprocessing import start_processes
213
-
214
  with tempfile.NamedTemporaryFile() as tmp_file:
215
  # torch.distributed will expect a few environment variable to be here. We set the ones common to each
216
  # process here (the other ones will be set be the launcher).
 
1
  def test_launch():
2
  "Verify a `PartialState` can be initialized."
3
  _ = PartialState()
 
 
4
  def notebook_launcher(
5
  function,
6
  args=(),
 
14
  """
15
  Launches a training function, using several processes or multiple nodes if it's possible in the current environment
16
  (TPU with multiple cores for instance).
 
17
  <Tip warning={true}>
 
18
  To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If
19
  any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability.
 
20
  Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none
21
  of those calls have been made.
 
22
  </Tip>
 
23
  Args:
24
  function (`Callable`):
25
  The training function to execute. If it accepts arguments, the first argument should be the index of the
 
39
  The rank of the current node.
40
  num_nodes (`int`, *optional*, defaults to 1):
41
  The number of nodes to use for training.
 
42
  Example:
 
43
  ```python
44
  # Assume this is defined in a Jupyter Notebook on an instance with two GPUs
45
  from accelerate import notebook_launcher
 
 
46
  def train(*args):
47
  # Your training function here
48
  ...
 
 
49
  notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16")
50
  ```
51
  """
 
56
  in_kaggle = True
57
  elif "IPython" in sys.modules:
58
  in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython())
 
59
  try:
60
  mixed_precision = PrecisionType(mixed_precision.lower())
61
  except ValueError:
62
  raise ValueError(
63
  f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
64
  )
 
65
  if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME", None) is not None):
66
  # TPU launch
67
  import torch_xla.distributed.xla_multiprocessing as xmp
 
68
  if len(AcceleratorState._shared_state) > 0:
69
  raise ValueError(
70
  "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
 
73
  )
74
  if num_processes is None:
75
  num_processes = 8
 
76
  launcher = PrepareForLaunch(function, distributed_type="TPU")
77
  print(f"Launching a training on {num_processes} TPU cores.")
78
  xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork")
 
94
  # Multi-GPU launch
95
  from torch.multiprocessing import start_processes
96
  from torch.multiprocessing.spawn import ProcessRaisedException
 
97
  if len(AcceleratorState._shared_state) > 0:
98
  raise ValueError(
99
  "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
 
110
  for lib_name in problematic_imports:
111
  err += f"\n\t* `{lib_name}`"
112
  raise RuntimeError(err)
 
113
  patched_env = dict(
114
  nproc=num_processes,
115
  node_rank=node_rank,
 
118
  master_port=use_port,
119
  mixed_precision=mixed_precision,
120
  )
 
121
  # Check for CUDA P2P and IB issues
122
  if not check_cuda_p2p_ib_support():
123
  patched_env["nccl_p2p_disable"] = "1"
124
  patched_env["nccl_ib_disable"] = "1"
 
125
  # torch.distributed will expect a few environment variable to be here. We set the ones common to each
126
  # process here (the other ones will be set be the launcher).
127
  with patch_environment(**patched_env):
 
156
  ) from e
157
  else:
158
  raise RuntimeError(f"An issue was found when launching the training: {e}") from e
 
159
  else:
160
  # No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
161
  if is_mps_available():
 
166
  else:
167
  print("Launching training on CPU.")
168
  function(*args)
 
 
169
  def debug_launcher(function, args=(), num_processes=2):
170
  """
171
  Launches a training function using several processes on CPU for debugging purposes.
 
172
  <Tip warning={true}>
 
173
  This function is provided for internal testing and debugging, but it's not intended for real trainings. It will
174
  only use the CPU.
 
175
  </Tip>
 
176
  Args:
177
  function (`Callable`):
178
  The training function to execute.
 
182
  The number of processes to use for training.
183
  """
184
  from torch.multiprocessing import start_processes
 
185
  with tempfile.NamedTemporaryFile() as tmp_file:
186
  # torch.distributed will expect a few environment variable to be here. We set the ones common to each
187
  # process here (the other ones will be set be the launcher).
src/local_sgd.py CHANGED
@@ -2,40 +2,29 @@ class LocalSGD:
2
  """
3
  A helper class to support local SGD on top of Accelerator. It simply runs a given number of updates independently
4
  on each device, and averages model weights every K synchronization step.
5
-
6
  It should be used only in the multi-GPU (or multi-CPU) setup without extensions such as DeepSpeed. In particular,
7
  this is a simple implementation that cannot support scenarios such as model parallelism.
8
-
9
-
10
  Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes
11
  back to at least:
12
-
13
  Zhang, J., De Sa, C., Mitliagkas, I., & Ré, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint
14
  arXiv:1606.07365.](https://arxiv.org/abs/1606.07365)
15
-
16
  We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of).
17
-
18
  Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on
19
  Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767)
20
-
21
  """
22
  def __enter__(self):
23
  if self.enabled:
24
  self.model_sync_obj = self.model.no_sync()
25
  self.model_sync_obj.__enter__()
26
-
27
  return self
28
-
29
  def __exit__(self, type, value, tb):
30
  if self.enabled:
31
  # Average all models on exit
32
  self._sync_and_avg_model_params()
33
  self.model_sync_obj.__exit__(type, value, tb)
34
-
35
  def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool = True):
36
  """
37
  Constructor.
38
-
39
  Args:
40
  model (`torch.nn.Module):
41
  The model whose parameters we need to average.
@@ -58,7 +47,6 @@ class LocalSGD:
58
  self.accelerator = accelerator
59
  self.model = model
60
  self.local_sgd_steps = local_sgd_steps
61
-
62
  def step(self):
63
  """
64
  This function makes a "step" and synchronizes model parameters if necessary.
@@ -66,10 +54,8 @@ class LocalSGD:
66
  self.num_steps += 1
67
  if not self.enabled:
68
  return
69
-
70
  if self.num_steps % self.local_sgd_steps == 0:
71
  self._sync_and_avg_model_params()
72
-
73
  def _sync_and_avg_model_params(self):
74
  """
75
  Synchronize + Average model parameters across all GPUs
 
2
  """
3
  A helper class to support local SGD on top of Accelerator. It simply runs a given number of updates independently
4
  on each device, and averages model weights every K synchronization step.
 
5
  It should be used only in the multi-GPU (or multi-CPU) setup without extensions such as DeepSpeed. In particular,
6
  this is a simple implementation that cannot support scenarios such as model parallelism.
 
 
7
  Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes
8
  back to at least:
 
9
  Zhang, J., De Sa, C., Mitliagkas, I., & Ré, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint
10
  arXiv:1606.07365.](https://arxiv.org/abs/1606.07365)
 
11
  We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of).
 
12
  Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on
13
  Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767)
 
14
  """
15
  def __enter__(self):
16
  if self.enabled:
17
  self.model_sync_obj = self.model.no_sync()
18
  self.model_sync_obj.__enter__()
 
19
  return self
 
20
  def __exit__(self, type, value, tb):
21
  if self.enabled:
22
  # Average all models on exit
23
  self._sync_and_avg_model_params()
24
  self.model_sync_obj.__exit__(type, value, tb)
 
25
  def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool = True):
26
  """
27
  Constructor.
 
28
  Args:
29
  model (`torch.nn.Module):
30
  The model whose parameters we need to average.
 
47
  self.accelerator = accelerator
48
  self.model = model
49
  self.local_sgd_steps = local_sgd_steps
 
50
  def step(self):
51
  """
52
  This function makes a "step" and synchronizes model parameters if necessary.
 
54
  self.num_steps += 1
55
  if not self.enabled:
56
  return
 
57
  if self.num_steps % self.local_sgd_steps == 0:
58
  self._sync_and_avg_model_params()
 
59
  def _sync_and_avg_model_params(self):
60
  """
61
  Synchronize + Average model parameters across all GPUs
src/logging.py CHANGED
@@ -1,10 +1,8 @@
1
  class MultiProcessAdapter(logging.LoggerAdapter):
2
  """
3
  An adapter to assist with logging in multiprocess.
4
-
5
  `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes
6
  or only the main executed one. Default is `main_process_only=True`.
7
-
8
  Does not require an `Accelerator` object to be created first.
9
  """
10
  @staticmethod
@@ -12,18 +10,14 @@ class MultiProcessAdapter(logging.LoggerAdapter):
12
  "Check if log should be performed"
13
  state = PartialState()
14
  return not main_process_only or (main_process_only and state.is_main_process)
15
-
16
  def log(self, level, msg, *args, **kwargs):
17
  """
18
  Delegates logger call after checking if we should log.
19
-
20
  Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes
21
  or only the main executed one. Default is `True` if not passed
22
-
23
  Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to
24
  read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not
25
  break with the previous behavior.
26
-
27
  `in_order` is ignored if `main_process_only` is passed.
28
  """
29
  if PartialState._shared_state == {}:
@@ -32,12 +26,10 @@ class MultiProcessAdapter(logging.LoggerAdapter):
32
  )
33
  main_process_only = kwargs.pop("main_process_only", True)
34
  in_order = kwargs.pop("in_order", False)
35
-
36
  if self.isEnabledFor(level):
37
  if self._should_log(main_process_only):
38
  msg, kwargs = self.process(msg, kwargs)
39
  self.logger.log(level, msg, *args, **kwargs)
40
-
41
  elif in_order:
42
  state = PartialState()
43
  for i in range(state.num_processes):
@@ -45,48 +37,36 @@ class MultiProcessAdapter(logging.LoggerAdapter):
45
  msg, kwargs = self.process(msg, kwargs)
46
  self.logger.log(level, msg, *args, **kwargs)
47
  state.wait_for_everyone()
48
-
49
  @functools.lru_cache(None)
50
  def warning_once(self, *args, **kwargs):
51
  """
52
  This method is identical to `logger.warning()`, but will emit the warning with the same message only once
53
-
54
  Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the
55
  cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to
56
  switch to another type of cache that includes the caller frame information in the hashing function.
57
  """
58
  self.warning(*args, **kwargs)
59
-
60
-
61
  def get_logger(name: str, log_level: str = None):
62
  """
63
  Returns a `logging.Logger` for `name` that can handle multiprocessing.
64
-
65
  If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all
66
  processes and in order, also pass `in_order=True`
67
-
68
  Args:
69
  name (`str`):
70
  The name for the logger, such as `__file__`
71
  log_level (`str`, *optional*):
72
  The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not
73
-
74
  Example:
75
-
76
  ```python
77
  >>> from accelerate.logging import get_logger
78
  >>> from accelerate import Accelerator
79
-
80
  >>> logger = get_logger(__name__)
81
-
82
  >>> accelerator = Accelerator()
83
  >>> logger.info("My log", main_process_only=False)
84
  >>> logger.debug("My log", main_process_only=True)
85
-
86
  >>> logger = get_logger(__name__, log_level="DEBUG")
87
  >>> logger.info("My log")
88
  >>> logger.debug("My second log")
89
-
90
  >>> array = ["a", "b", "c", "d"]
91
  >>> letter_at_rank = array[accelerator.process_index]
92
  >>> logger.info(letter_at_rank, in_order=True)
 
1
  class MultiProcessAdapter(logging.LoggerAdapter):
2
  """
3
  An adapter to assist with logging in multiprocess.
 
4
  `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes
5
  or only the main executed one. Default is `main_process_only=True`.
 
6
  Does not require an `Accelerator` object to be created first.
7
  """
8
  @staticmethod
 
10
  "Check if log should be performed"
11
  state = PartialState()
12
  return not main_process_only or (main_process_only and state.is_main_process)
 
13
  def log(self, level, msg, *args, **kwargs):
14
  """
15
  Delegates logger call after checking if we should log.
 
16
  Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes
17
  or only the main executed one. Default is `True` if not passed
 
18
  Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to
19
  read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not
20
  break with the previous behavior.
 
21
  `in_order` is ignored if `main_process_only` is passed.
22
  """
23
  if PartialState._shared_state == {}:
 
26
  )
27
  main_process_only = kwargs.pop("main_process_only", True)
28
  in_order = kwargs.pop("in_order", False)
 
29
  if self.isEnabledFor(level):
30
  if self._should_log(main_process_only):
31
  msg, kwargs = self.process(msg, kwargs)
32
  self.logger.log(level, msg, *args, **kwargs)
 
33
  elif in_order:
34
  state = PartialState()
35
  for i in range(state.num_processes):
 
37
  msg, kwargs = self.process(msg, kwargs)
38
  self.logger.log(level, msg, *args, **kwargs)
39
  state.wait_for_everyone()
 
40
  @functools.lru_cache(None)
41
  def warning_once(self, *args, **kwargs):
42
  """
43
  This method is identical to `logger.warning()`, but will emit the warning with the same message only once
 
44
  Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the
45
  cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to
46
  switch to another type of cache that includes the caller frame information in the hashing function.
47
  """
48
  self.warning(*args, **kwargs)
 
 
49
  def get_logger(name: str, log_level: str = None):
50
  """
51
  Returns a `logging.Logger` for `name` that can handle multiprocessing.
 
52
  If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all
53
  processes and in order, also pass `in_order=True`
 
54
  Args:
55
  name (`str`):
56
  The name for the logger, such as `__file__`
57
  log_level (`str`, *optional*):
58
  The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not
 
59
  Example:
 
60
  ```python
61
  >>> from accelerate.logging import get_logger
62
  >>> from accelerate import Accelerator
 
63
  >>> logger = get_logger(__name__)
 
64
  >>> accelerator = Accelerator()
65
  >>> logger.info("My log", main_process_only=False)
66
  >>> logger.debug("My log", main_process_only=True)
 
67
  >>> logger = get_logger(__name__, log_level="DEBUG")
68
  >>> logger.info("My log")
69
  >>> logger.debug("My second log")
 
70
  >>> array = ["a", "b", "c", "d"]
71
  >>> letter_at_rank = array[accelerator.process_index]
72
  >>> logger.info(letter_at_rank, in_order=True)
src/optimizer.py CHANGED
@@ -6,15 +6,11 @@ def move_to_device(state, device):
6
  elif isinstance(state, torch.Tensor):
7
  return state.to(device)
8
  return state
9
-
10
-
11
  class AcceleratedOptimizer(torch.optim.Optimizer):
12
  """
13
  Internal wrapper around a torch optimizer.
14
-
15
  Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient
16
  accumulation.
17
-
18
  Args:
19
  optimizer (`torch.optim.optimizer.Optimizer`):
20
  The optimizer to wrap.
@@ -31,12 +27,10 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
31
  self.gradient_state = GradientState()
32
  self.device_placement = device_placement
33
  self._is_overflow = False
34
-
35
  if self.scaler is not None:
36
  self._accelerate_step_called = False
37
  self._optimizer_original_step_method = self.optimizer.step
38
  self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
39
-
40
  # Handle device placement
41
  if device_placement:
42
  state_dict = self.optimizer.state_dict()
@@ -45,42 +39,32 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
45
  else:
46
  state_dict = move_to_device(state_dict, self.accelerator_state.device)
47
  self.optimizer.load_state_dict(state_dict)
48
-
49
  @property
50
  def state(self):
51
  return self.optimizer.state
52
-
53
  @state.setter
54
  def state(self, state):
55
  self.optimizer.state = state
56
-
57
  @property
58
  def param_groups(self):
59
  return self.optimizer.param_groups
60
-
61
  @param_groups.setter
62
  def param_groups(self, param_groups):
63
  self.optimizer.param_groups = param_groups
64
-
65
  @property
66
  def defaults(self):
67
  return self.optimizer.defaults
68
-
69
  @defaults.setter
70
  def defaults(self, defaults):
71
  self.optimizer.defaults = defaults
72
-
73
  def add_param_group(self, param_group):
74
  self.optimizer.add_param_group(param_group)
75
-
76
  def load_state_dict(self, state_dict):
77
  if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement:
78
  xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
79
  self.optimizer.load_state_dict(state_dict)
80
-
81
  def state_dict(self):
82
  return self.optimizer.state_dict()
83
-
84
  def zero_grad(self, set_to_none=None):
85
  if self.gradient_state.sync_gradients:
86
  accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters
@@ -92,7 +76,6 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
92
  if set_to_none is not None:
93
  raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.")
94
  self.optimizer.zero_grad()
95
-
96
  def step(self, closure=None):
97
  if self.gradient_state.sync_gradients:
98
  if self.accelerator_state.distributed_type == DistributedType.TPU:
@@ -100,10 +83,8 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
100
  xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)
101
  elif self.scaler is not None:
102
  self.optimizer.step = self._optimizer_patched_step_method
103
-
104
  self.scaler.step(self.optimizer, closure)
105
  self.scaler.update()
106
-
107
  if not self._accelerate_step_called:
108
  # If the optimizer step was skipped, gradient overflow was detected.
109
  self._is_overflow = True
@@ -115,11 +96,9 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
115
  self._accelerate_step_called = False
116
  else:
117
  self.optimizer.step(closure)
118
-
119
  def _switch_parameters(self, parameters_map):
120
  for param_group in self.optimizer.param_groups:
121
  param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
122
-
123
  @property
124
  def is_overflow(self):
125
  """Whether or not the optimizer step was done, or skipped because of gradient overflow."""
@@ -129,12 +108,10 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
129
  FutureWarning,
130
  )
131
  return self._is_overflow
132
-
133
  @property
134
  def step_was_skipped(self):
135
  """Whether or not the optimizer step was skipped."""
136
  return self._is_overflow
137
-
138
  def __getstate__(self):
139
  _ignored_keys = [
140
  "_accelerate_step_called",
@@ -142,18 +119,14 @@ class AcceleratedOptimizer(torch.optim.Optimizer):
142
  "_optimizer_patched_step_method",
143
  ]
144
  return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys}
145
-
146
  def __setstate__(self, state):
147
  self.__dict__.update(state)
148
  if self.scaler is not None:
149
  self._accelerate_step_called = False
150
  self._optimizer_original_step_method = self.optimizer.step
151
  self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
152
-
153
-
154
  def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method):
155
  def patched_step(*args, **kwargs):
156
  accelerated_optimizer._accelerate_step_called = True
157
  return method(*args, **kwargs)
158
-
159
  return patched_step
 
6
  elif isinstance(state, torch.Tensor):
7
  return state.to(device)
8
  return state
 
 
9
  class AcceleratedOptimizer(torch.optim.Optimizer):
10
  """
11
  Internal wrapper around a torch optimizer.
 
12
  Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient
13
  accumulation.
 
14
  Args:
15
  optimizer (`torch.optim.optimizer.Optimizer`):
16
  The optimizer to wrap.
 
27
  self.gradient_state = GradientState()
28
  self.device_placement = device_placement
29
  self._is_overflow = False
 
30
  if self.scaler is not None:
31
  self._accelerate_step_called = False
32
  self._optimizer_original_step_method = self.optimizer.step
33
  self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
 
34
  # Handle device placement
35
  if device_placement:
36
  state_dict = self.optimizer.state_dict()
 
39
  else:
40
  state_dict = move_to_device(state_dict, self.accelerator_state.device)
41
  self.optimizer.load_state_dict(state_dict)
 
42
  @property
43
  def state(self):
44
  return self.optimizer.state
 
45
  @state.setter
46
  def state(self, state):
47
  self.optimizer.state = state
 
48
  @property
49
  def param_groups(self):
50
  return self.optimizer.param_groups
 
51
  @param_groups.setter
52
  def param_groups(self, param_groups):
53
  self.optimizer.param_groups = param_groups
 
54
  @property
55
  def defaults(self):
56
  return self.optimizer.defaults
 
57
  @defaults.setter
58
  def defaults(self, defaults):
59
  self.optimizer.defaults = defaults
 
60
  def add_param_group(self, param_group):
61
  self.optimizer.add_param_group(param_group)
 
62
  def load_state_dict(self, state_dict):
63
  if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement:
64
  xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
65
  self.optimizer.load_state_dict(state_dict)
 
66
  def state_dict(self):
67
  return self.optimizer.state_dict()
 
68
  def zero_grad(self, set_to_none=None):
69
  if self.gradient_state.sync_gradients:
70
  accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters
 
76
  if set_to_none is not None:
77
  raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.")
78
  self.optimizer.zero_grad()
 
79
  def step(self, closure=None):
80
  if self.gradient_state.sync_gradients:
81
  if self.accelerator_state.distributed_type == DistributedType.TPU:
 
83
  xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)
84
  elif self.scaler is not None:
85
  self.optimizer.step = self._optimizer_patched_step_method
 
86
  self.scaler.step(self.optimizer, closure)
87
  self.scaler.update()
 
88
  if not self._accelerate_step_called:
89
  # If the optimizer step was skipped, gradient overflow was detected.
90
  self._is_overflow = True
 
96
  self._accelerate_step_called = False
97
  else:
98
  self.optimizer.step(closure)
 
99
  def _switch_parameters(self, parameters_map):
100
  for param_group in self.optimizer.param_groups:
101
  param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
 
102
  @property
103
  def is_overflow(self):
104
  """Whether or not the optimizer step was done, or skipped because of gradient overflow."""
 
108
  FutureWarning,
109
  )
110
  return self._is_overflow
 
111
  @property
112
  def step_was_skipped(self):
113
  """Whether or not the optimizer step was skipped."""
114
  return self._is_overflow
 
115
  def __getstate__(self):
116
  _ignored_keys = [
117
  "_accelerate_step_called",
 
119
  "_optimizer_patched_step_method",
120
  ]
121
  return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys}
 
122
  def __setstate__(self, state):
123
  self.__dict__.update(state)
124
  if self.scaler is not None:
125
  self._accelerate_step_called = False
126
  self._optimizer_original_step_method = self.optimizer.step
127
  self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
 
 
128
  def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method):
129
  def patched_step(*args, **kwargs):
130
  accelerated_optimizer._accelerate_step_called = True
131
  return method(*args, **kwargs)
 
132
  return patched_step
src/scheduler.py CHANGED
@@ -1,16 +1,11 @@
1
-
2
-
3
  # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
4
-
5
  class AcceleratedScheduler:
6
  """
7
  A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful
8
  to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed
9
  precision training)
10
-
11
  When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always
12
  step the scheduler to account for it.
13
-
14
  Args:
15
  scheduler (`torch.optim.lr_scheduler._LRScheduler`):
16
  The scheduler to wrap.
@@ -29,19 +24,16 @@ class AcceleratedScheduler:
29
  self.split_batches = split_batches
30
  self.step_with_optimizer = step_with_optimizer
31
  self.gradient_state = GradientState()
32
-
33
  def step(self, *args, **kwargs):
34
  if not self.step_with_optimizer:
35
  # No link between scheduler and optimizer -> just step
36
  self.scheduler.step(*args, **kwargs)
37
  return
38
-
39
  # Otherwise, first make sure the optimizer was stepped.
40
  if not self.gradient_state.sync_gradients:
41
  if self.gradient_state.adjust_scheduler:
42
  self.scheduler._step_count += 1
43
  return
44
-
45
  for opt in self.optimizers:
46
  if opt.step_was_skipped:
47
  return
@@ -59,19 +51,14 @@ class AcceleratedScheduler:
59
  self.scheduler.step(*args, **kwargs)
60
  else:
61
  self.scheduler.step(*args, **kwargs)
62
-
63
  # Passthroughs
64
  def get_last_lr(self):
65
  return self.scheduler.get_last_lr()
66
-
67
  def state_dict(self):
68
  return self.scheduler.state_dict()
69
-
70
  def load_state_dict(self, state_dict):
71
  self.scheduler.load_state_dict(state_dict)
72
-
73
  def get_lr(self):
74
  return self.scheduler.get_lr()
75
-
76
  def print_lr(self, *args, **kwargs):
77
  return self.scheduler.print_lr(*args, **kwargs)
 
 
 
1
  # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
 
2
  class AcceleratedScheduler:
3
  """
4
  A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful
5
  to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed
6
  precision training)
 
7
  When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always
8
  step the scheduler to account for it.
 
9
  Args:
10
  scheduler (`torch.optim.lr_scheduler._LRScheduler`):
11
  The scheduler to wrap.
 
24
  self.split_batches = split_batches
25
  self.step_with_optimizer = step_with_optimizer
26
  self.gradient_state = GradientState()
 
27
  def step(self, *args, **kwargs):
28
  if not self.step_with_optimizer:
29
  # No link between scheduler and optimizer -> just step
30
  self.scheduler.step(*args, **kwargs)
31
  return
 
32
  # Otherwise, first make sure the optimizer was stepped.
33
  if not self.gradient_state.sync_gradients:
34
  if self.gradient_state.adjust_scheduler:
35
  self.scheduler._step_count += 1
36
  return
 
37
  for opt in self.optimizers:
38
  if opt.step_was_skipped:
39
  return
 
51
  self.scheduler.step(*args, **kwargs)
52
  else:
53
  self.scheduler.step(*args, **kwargs)
 
54
  # Passthroughs
55
  def get_last_lr(self):
56
  return self.scheduler.get_last_lr()
 
57
  def state_dict(self):
58
  return self.scheduler.state_dict()
 
59
  def load_state_dict(self, state_dict):
60
  self.scheduler.load_state_dict(state_dict)
 
61
  def get_lr(self):
62
  return self.scheduler.get_lr()
 
63
  def print_lr(self, *args, **kwargs):
64
  return self.scheduler.print_lr(*args, **kwargs)
src/state.py CHANGED
@@ -1,58 +1,40 @@
1
  logger = logging.getLogger(__name__)
2
-
3
-
4
  def is_initialized() -> bool:
5
  """
6
  Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`,
7
  but works as a module method.
8
  """
9
  return AcceleratorState._shared_state != {}
10
-
11
-
12
  # Lambda function that does nothing
13
  def do_nothing(*args, **kwargs):
14
  return None
15
-
16
-
17
  class ThreadLocalSharedDict(threading.local):
18
  """
19
  Descriptor that holds a dict shared between instances of a class in the same thread.
20
-
21
  Note: Descriptors have slightly different semantics than just a dict field on its own.
22
  `PartialState(...)._shared_state` and `PartialState._shared_state` (instance vs class) give the same value: the
23
  underlying _storage dict. Likewise, `PartialState(...)._shared_state = {...}` overrides the _storage dict inside
24
  the descriptor as you would expect. However, `PartialState._shared_state = {}` actually replaces the descriptor
25
  object with a dict instead Thus, you should modify the _storage dict in-place (e.g. `_shared_state.clear()`).
26
-
27
  See Python documentation for an explanation of descriptors: https://docs.python.org/3/howto/descriptor.html
28
-
29
  This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3).
30
-
31
  See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3
32
  """
33
  def __init__(self, thread_local: bool = False):
34
  self._storage = {}
35
-
36
  def __get__(self, obj, objtype=None):
37
  return self._storage
38
-
39
  def __set__(self, obj, value):
40
  self._storage = value
41
-
42
-
43
  # Prefer global shared dictionary, except when using TPU.
44
  SharedDict = dict if not is_tpu_available(check_device=False) else ThreadLocalSharedDict
45
-
46
-
47
  # Inspired by Alex Martelli's 'Borg'.
48
  class PartialState:
49
  """
50
  Singleton class that has information about the current training environment and functions to help with process
51
  control. Designed to be used when only process control and device execution states are needed. Does *not* need to
52
  be initialized from `Accelerator`.
53
-
54
  **Available attributes:**
55
-
56
  - **device** (`torch.device`) -- The device to use.
57
  - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
58
  in use.
@@ -67,7 +49,6 @@ class PartialState:
67
  - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
68
  """
69
  _shared_state = SharedDict()
70
-
71
  def __init__(self, cpu: bool = False, **kwargs):
72
  self.__dict__ = self._shared_state
73
  if not self.initialized:
@@ -82,14 +63,12 @@ class PartialState:
82
  os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true"
83
  and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO
84
  )
85
-
86
  if use_sagemaker_dp and not cpu:
87
  if (
88
  os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL
89
  ) or use_sagemaker_dp:
90
  self.distributed_type = DistributedType.MULTI_GPU
91
  import smdistributed.dataparallel.torch.torch_smddp # noqa
92
-
93
  if not torch.distributed.is_initialized():
94
  torch.distributed.init_process_group(backend="smddp")
95
  self.backend = "smddp"
@@ -116,7 +95,6 @@ class PartialState:
116
  self.distributed_type = DistributedType.DEEPSPEED
117
  if not torch.distributed.is_initialized():
118
  from deepspeed import comm as dist
119
-
120
  # DeepSpeed always uses nccl
121
  kwargs.pop("backend", None)
122
  if is_xpu_available and is_ccl_available():
@@ -127,7 +105,6 @@ class PartialState:
127
  else:
128
  self.backend = "nccl"
129
  dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
130
-
131
  self.num_processes = torch.distributed.get_world_size()
132
  self.process_index = torch.distributed.get_rank()
133
  self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
@@ -230,7 +207,6 @@ class PartialState:
230
  and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0
231
  ):
232
  import psutil
233
-
234
  num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)
235
  if num_cpu_threads_per_process == 0:
236
  num_cpu_threads_per_process = 1
@@ -261,12 +237,9 @@ class PartialState:
261
  )
262
  self.num_processes = 1
263
  self.process_index = self.local_process_index = 0
264
-
265
  if self.device is None:
266
  self.device = torch.device("cpu") if cpu else self.default_device
267
-
268
  self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0)
269
-
270
  def __repr__(self) -> str:
271
  return (
272
  f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n"
@@ -275,36 +248,30 @@ class PartialState:
275
  f"Local process index: {self.local_process_index}\n"
276
  f"Device: {self.device}\n"
277
  )
278
-
279
  @staticmethod
280
  def _reset_state():
281
  "Resets `_shared_state`, is used internally and should not be called"
282
  PartialState._shared_state.clear()
283
-
284
  @property
285
  def initialized(self) -> bool:
286
  "Returns whether the `PartialState` has been initialized"
287
  return self._shared_state != {}
288
-
289
  @property
290
  def use_distributed(self):
291
  """
292
  Whether the Accelerator is configured for distributed training
293
  """
294
  return self.distributed_type != DistributedType.NO and self.num_processes > 1
295
-
296
  @property
297
  def is_last_process(self) -> bool:
298
  "Returns whether the current process is the last one"
299
  return self.process_index == self.num_processes - 1
300
-
301
  @property
302
  def is_main_process(self) -> bool:
303
  "Returns whether the current process is the main process"
304
  return (
305
  self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process
306
  )
307
-
308
  @property
309
  def is_local_main_process(self) -> bool:
310
  "Returns whether the current process is the main process on the local node"
@@ -313,19 +280,15 @@ class PartialState:
313
  if self.distributed_type != DistributedType.MEGATRON_LM
314
  else self.is_last_process
315
  )
316
-
317
  def wait_for_everyone(self):
318
  """
319
  Will stop the execution of the current process until every other process has reached that point (so this does
320
  nothing when the script is only run in one process). Useful to do before saving a model.
321
-
322
  Example:
323
-
324
  ```python
325
  >>> # Assuming two GPU processes
326
  >>> import time
327
  >>> from accelerate.state import PartialState
328
-
329
  >>> state = PartialState()
330
  >>> if state.is_main_process:
331
  ... time.sleep(2)
@@ -347,24 +310,18 @@ class PartialState:
347
  torch.distributed.barrier()
348
  elif self.distributed_type == DistributedType.TPU:
349
  xm.rendezvous("accelerate.utils.wait_for_everyone")
350
-
351
  def _goes_first(self, is_main: bool):
352
  if not is_main:
353
  self.wait_for_everyone()
354
-
355
  yield
356
-
357
  if is_main:
358
  self.wait_for_everyone()
359
-
360
  @contextmanager
361
  def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
362
  """
363
  Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
364
  distributed inference, such as with different prompts.
365
-
366
  Note that when using a `dict`, all keys need to have the same number of elements.
367
-
368
  Args:
369
  inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
370
  The input to split between processes.
@@ -372,14 +329,10 @@ class PartialState:
372
  Whether to apply padding by repeating the last element of the input so that all processes have the same
373
  number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
374
  in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
375
-
376
-
377
  Example:
378
-
379
  ```python
380
  # Assume there are two processes
381
  from accelerate import PartialState
382
-
383
  state = PartialState()
384
  with state.split_between_processes(["A", "B", "C"]) as inputs:
385
  print(inputs)
@@ -387,7 +340,6 @@ class PartialState:
387
  ["A", "B"]
388
  # Process 1
389
  ["C"]
390
-
391
  with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
392
  print(inputs)
393
  # Process 0
@@ -410,7 +362,6 @@ class PartialState:
410
  end_index = start_index + num_samples_per_process
411
  if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1):
412
  end_index = length
413
-
414
  def _split_values(inputs, start_index, end_index):
415
  if isinstance(inputs, (list, tuple, torch.Tensor)):
416
  if start_index >= len(inputs):
@@ -420,7 +371,6 @@ class PartialState:
420
  if apply_padding:
421
  if isinstance(result, torch.Tensor):
422
  from accelerate.utils import pad_across_processes, send_to_device
423
-
424
  # The tensor needs to be on the device before we can pad it
425
  tensorized_result = send_to_device(result, self.device)
426
  result = pad_across_processes(tensorized_result, pad_index=inputs[-1])
@@ -433,21 +383,15 @@ class PartialState:
433
  return inputs
434
  else:
435
  return inputs
436
-
437
  yield _split_values(inputs, start_index, end_index)
438
-
439
  @contextmanager
440
  def main_process_first(self):
441
  """
442
  Lets the main process go first inside a with block.
443
-
444
  The other processes will enter the with block after the main process exits.
445
-
446
  Example:
447
-
448
  ```python
449
  >>> from accelerate import Accelerator
450
-
451
  >>> accelerator = Accelerator()
452
  >>> with accelerator.main_process_first():
453
  ... # This will be printed first by process 0 then in a seemingly
@@ -456,19 +400,14 @@ class PartialState:
456
  ```
457
  """
458
  yield from self._goes_first(self.is_main_process)
459
-
460
  @contextmanager
461
  def local_main_process_first(self):
462
  """
463
  Lets the local main process go inside a with block.
464
-
465
  The other processes will enter the with block after the main process exits.
466
-
467
  Example:
468
-
469
  ```python
470
  >>> from accelerate.state import PartialState
471
-
472
  >>> state = PartialState()
473
  >>> with state.local_main_process_first():
474
  ... # This will be printed first by local process 0 then in a seemingly
@@ -477,27 +416,18 @@ class PartialState:
477
  ```
478
  """
479
  yield from self._goes_first(self.is_local_main_process)
480
-
481
  def on_main_process(self, function: Callable[..., Any] = None):
482
  """
483
  Decorator that only runs the decorated function on the main process.
484
-
485
  Args:
486
  function (`Callable`): The function to decorate.
487
-
488
  Example:
489
-
490
  ```python
491
  >>> from accelerate.state import PartialState
492
-
493
  >>> state = PartialState()
494
-
495
-
496
  >>> @state.on_main_process
497
  ... def print_something():
498
  ... print("This will be printed by process 0 only.")
499
-
500
-
501
  >>> print_something()
502
  "This will be printed by process 0 only"
503
  ```
@@ -507,27 +437,19 @@ class PartialState:
507
  if self.is_main_process or not self.use_distributed:
508
  return function
509
  return do_nothing
510
-
511
  def on_local_main_process(self, function: Callable[..., Any] = None):
512
  """
513
  Decorator that only runs the decorated function on the local main process.
514
-
515
  Args:
516
  function (`Callable`): The function to decorate.
517
-
518
  Example:
519
  ```python
520
  # Assume we have 2 servers with 4 processes each.
521
  from accelerate.state import PartialState
522
-
523
  state = PartialState()
524
-
525
-
526
  @state.on_local_main_process
527
  def print_something():
528
  print("This will be printed by process 0 only on each server.")
529
-
530
-
531
  print_something()
532
  # On server 1:
533
  "This will be printed by process 0 only"
@@ -538,27 +460,19 @@ class PartialState:
538
  if self.is_local_main_process or not self.use_distributed:
539
  return function
540
  return do_nothing
541
-
542
  def on_last_process(self, function: Callable[..., Any]):
543
  """
544
  Decorator that only runs the decorated function on the last process.
545
-
546
  Args:
547
  function (`Callable`): The function to decorate.
548
-
549
  Example:
550
  ```python
551
  # Assume we have 4 processes.
552
  from accelerate.state import PartialState
553
-
554
  state = PartialState()
555
-
556
-
557
  @state.on_last_process
558
  def print_something():
559
  print(f"Printed on process {state.process_index}")
560
-
561
-
562
  print_something()
563
  "Printed on process 3"
564
  ```
@@ -566,30 +480,22 @@ class PartialState:
566
  if self.is_last_process or not self.use_distributed:
567
  return function
568
  return do_nothing
569
-
570
  def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
571
  """
572
  Decorator that only runs the decorated function on the process with the given index.
573
-
574
  Args:
575
  function (`Callable`, `optional`):
576
  The function to decorate.
577
  process_index (`int`, `optional`):
578
  The index of the process on which to run the function.
579
-
580
  Example:
581
  ```python
582
  # Assume we have 4 processes.
583
  from accelerate.state import PartialState
584
-
585
  state = PartialState()
586
-
587
-
588
  @state.on_process(process_index=2)
589
  def print_something():
590
  print(f"Printed on process {state.process_index}")
591
-
592
-
593
  print_something()
594
  "Printed on process 2"
595
  ```
@@ -599,30 +505,22 @@ class PartialState:
599
  if (self.process_index == process_index) or (not self.use_distributed):
600
  return function
601
  return do_nothing
602
-
603
  def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
604
  """
605
  Decorator that only runs the decorated function on the process with the given index on the current node.
606
-
607
  Args:
608
  function (`Callable`, *optional*):
609
  The function to decorate.
610
  local_process_index (`int`, *optional*):
611
  The index of the local process on which to run the function.
612
-
613
  Example:
614
  ```python
615
  # Assume we have 2 servers with 4 processes each.
616
  from accelerate import Accelerator
617
-
618
  accelerator = Accelerator()
619
-
620
-
621
  @accelerator.on_local_process(local_process_index=2)
622
  def print_something():
623
  print(f"Printed on process {accelerator.local_process_index}")
624
-
625
-
626
  print_something()
627
  # On server 1:
628
  "Printed on process 2"
@@ -635,11 +533,9 @@ class PartialState:
635
  if (self.local_process_index == local_process_index) or (not self.use_distributed):
636
  return function
637
  return do_nothing
638
-
639
  def print(self, *args, **kwargs):
640
  if self.is_local_main_process:
641
  print(*args, **kwargs)
642
-
643
  @property
644
  def default_device(self) -> torch.device:
645
  """
@@ -660,14 +556,10 @@ class PartialState:
660
  return torch.device("npu")
661
  else:
662
  return torch.device("cpu")
663
-
664
-
665
  class AcceleratorState:
666
  """
667
  Singleton class that has information about the current training environment.
668
-
669
  **Available attributes:**
670
-
671
  - **device** (`torch.device`) -- The device to use.
672
  - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
673
  in use.
@@ -683,7 +575,6 @@ class AcceleratorState:
683
  - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
684
  """
685
  _shared_state = SharedDict()
686
-
687
  def __init__(
688
  self,
689
  mixed_precision: str = None,
@@ -722,7 +613,6 @@ class AcceleratorState:
722
  "or higher, compute capability of 8.9 or higher). Will use FP16 instead."
723
  )
724
  mixed_precision = "fp16"
725
-
726
  self.dynamo_plugin = dynamo_plugin
727
  if not _from_accelerator:
728
  raise ValueError(
@@ -771,7 +661,6 @@ class AcceleratorState:
771
  if self._mixed_precision != "no":
772
  fsdp_plugin.set_mixed_precision(self._mixed_precision)
773
  self.fsdp_plugin = fsdp_plugin
774
-
775
  if (
776
  self.dynamo_plugin.backend != DynamoBackend.NO
777
  and self._mixed_precision == "no"
@@ -779,17 +668,14 @@ class AcceleratorState:
779
  ):
780
  torch.backends.cuda.matmul.allow_tf32 = True
781
  PartialState._shared_state["distributed_type"] = self.distributed_type
782
-
783
  @property
784
  def initialized(self) -> bool:
785
  return self._shared_state != PartialState._shared_state
786
-
787
  def __repr__(self):
788
  repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n"
789
  if self.distributed_type == DistributedType.DEEPSPEED:
790
  repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n"
791
  return repr
792
-
793
  def _check_initialized(self, mixed_precision=None, cpu=None):
794
  "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized"
795
  if self.initialized:
@@ -802,7 +688,6 @@ class AcceleratorState:
802
  and self.distributed_type != DistributedType.DEEPSPEED
803
  ):
804
  raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'"))
805
-
806
  # For backward compatibility
807
  @property
808
  def use_fp16(self):
@@ -812,7 +697,6 @@ class AcceleratorState:
812
  FutureWarning,
813
  )
814
  return self._mixed_precision != "no"
815
-
816
  @property
817
  def mixed_precision(self):
818
  if self.distributed_type == DistributedType.DEEPSPEED:
@@ -826,47 +710,38 @@ class AcceleratorState:
826
  else:
827
  mixed_precision = self._mixed_precision
828
  return mixed_precision
829
-
830
  @staticmethod
831
  def _reset_state(reset_partial_state: bool = False):
832
  "Resets `_shared_state`, is used internally and should not be called"
833
  AcceleratorState._shared_state.clear()
834
  if reset_partial_state:
835
  PartialState._reset_state()
836
-
837
  @property
838
  def use_distributed(self):
839
  """
840
  Whether the Accelerator is configured for distributed training
841
  """
842
  return PartialState().use_distributed
843
-
844
  @property
845
  def is_last_process(self) -> bool:
846
  "Returns whether the current process is the last one"
847
  return PartialState().is_last_process
848
-
849
  @property
850
  def is_main_process(self) -> bool:
851
  "Returns whether the current process is the main process"
852
  return PartialState().is_main_process
853
-
854
  @property
855
  def is_local_main_process(self) -> bool:
856
  "Returns whether the current process is the main process on the local node"
857
  return PartialState().is_local_main_process
858
-
859
  def wait_for_everyone(self):
860
  PartialState().wait_for_everyone()
861
-
862
  @contextmanager
863
  def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
864
  """
865
  Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
866
  distributed inference, such as with different prompts.
867
-
868
  Note that when using a `dict`, all keys need to have the same number of elements.
869
-
870
  Args:
871
  inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
872
  The input to split between processes.
@@ -874,14 +749,10 @@ class AcceleratorState:
874
  Whether to apply padding by repeating the last element of the input so that all processes have the same
875
  number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
876
  in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
877
-
878
-
879
  Example:
880
-
881
  ```python
882
  # Assume there are two processes
883
  from accelerate.state import AcceleratorState
884
-
885
  state = AcceleratorState()
886
  with state.split_between_processes(["A", "B", "C"]) as inputs:
887
  print(inputs)
@@ -889,7 +760,6 @@ class AcceleratorState:
889
  ["A", "B"]
890
  # Process 1
891
  ["C"]
892
-
893
  with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
894
  print(inputs)
895
  # Process 0
@@ -900,37 +770,28 @@ class AcceleratorState:
900
  """
901
  with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
902
  yield inputs
903
-
904
  @contextmanager
905
  def main_process_first(self):
906
  """
907
  Lets the main process go first inside a with block.
908
-
909
  The other processes will enter the with block after the main process exits.
910
  """
911
  with PartialState().main_process_first():
912
  yield
913
-
914
  @contextmanager
915
  def local_main_process_first(self):
916
  """
917
  Lets the local main process go inside a with block.
918
-
919
  The other processes will enter the with block after the main process exits.
920
  """
921
  with PartialState().local_main_process_first():
922
  yield
923
-
924
  def print(self, *args, **kwargs):
925
  PartialState().print(*args, **kwargs)
926
-
927
-
928
  class GradientState:
929
  """
930
  Singleton class that has information related to gradient synchronization for gradient accumulation
931
-
932
  **Available attributes:**
933
-
934
  - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader
935
  - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader
936
  - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices
@@ -944,7 +805,6 @@ class GradientState:
944
  iteration and the number of total steps reset
945
  """
946
  _shared_state = SharedDict()
947
-
948
  def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None):
949
  self.__dict__ = self._shared_state
950
  if not self.initialized:
@@ -954,45 +814,37 @@ class GradientState:
954
  self.plugin_kwargs = (
955
  gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {}
956
  )
957
-
958
  # Plugin args are different and can be updated
959
  if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs():
960
  self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs()
961
-
962
  @property
963
  def num_steps(self) -> int:
964
  "Returns the number of steps to accumulate over"
965
  return self.plugin_kwargs.get("num_steps", 1)
966
-
967
  @property
968
  def adjust_scheduler(self) -> bool:
969
  "Returns whether the scheduler should be adjusted"
970
  return self.plugin_kwargs.get("adjust_scheduler", False)
971
-
972
  @property
973
  def sync_with_dataloader(self) -> bool:
974
  "Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset"
975
  return self.plugin_kwargs.get("sync_with_dataloader", True)
976
-
977
  @property
978
  def initialized(self) -> bool:
979
  "Returns whether the `GradientState` has been initialized"
980
  return GradientState._shared_state != {}
981
-
982
  @property
983
  def end_of_dataloader(self) -> bool:
984
  "Returns whether we have reached the end of the current dataloader"
985
  if not self.in_dataloader:
986
  return False
987
  return self.active_dataloader.end_of_dataloader
988
-
989
  @property
990
  def remainder(self) -> int:
991
  "Returns the number of extra samples that were added from padding the dataloader"
992
  if not self.in_dataloader:
993
  return -1
994
  return self.active_dataloader.remainder
995
-
996
  def __repr__(self):
997
  return (
998
  f"Sync Gradients: {self.sync_gradients}\n"
@@ -1000,26 +852,21 @@ class GradientState:
1000
  f"Extra samples added: {self.remainder}\n"
1001
  f"Gradient accumulation plugin: {self.plugin_kwargs}\n"
1002
  )
1003
-
1004
  def _set_sync_gradients(self, sync_gradients):
1005
  "Private function that sets whether gradients should be synchronized. Users should not have to call this."
1006
  self.sync_gradients = sync_gradients
1007
-
1008
  def _add_dataloader(self, dataloader):
1009
  "Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this."
1010
  self.active_dataloader = dataloader
1011
  self.dataloader_references.append(self.active_dataloader)
1012
-
1013
  def _remove_dataloader(self, dataloader):
1014
  "Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this."
1015
  self.dataloader_references.remove(dataloader)
1016
  self.active_dataloader = self.dataloader_references[-1]
1017
-
1018
  @property
1019
  def in_dataloader(self) -> bool:
1020
  "Returns whether the current process is in a dataloader"
1021
  return self.active_dataloader is not None
1022
-
1023
  @staticmethod
1024
  def _reset_state():
1025
  "Resets `_shared_state`, is used internally and should not be called"
 
1
  logger = logging.getLogger(__name__)
 
 
2
  def is_initialized() -> bool:
3
  """
4
  Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`,
5
  but works as a module method.
6
  """
7
  return AcceleratorState._shared_state != {}
 
 
8
  # Lambda function that does nothing
9
  def do_nothing(*args, **kwargs):
10
  return None
 
 
11
  class ThreadLocalSharedDict(threading.local):
12
  """
13
  Descriptor that holds a dict shared between instances of a class in the same thread.
 
14
  Note: Descriptors have slightly different semantics than just a dict field on its own.
15
  `PartialState(...)._shared_state` and `PartialState._shared_state` (instance vs class) give the same value: the
16
  underlying _storage dict. Likewise, `PartialState(...)._shared_state = {...}` overrides the _storage dict inside
17
  the descriptor as you would expect. However, `PartialState._shared_state = {}` actually replaces the descriptor
18
  object with a dict instead Thus, you should modify the _storage dict in-place (e.g. `_shared_state.clear()`).
 
19
  See Python documentation for an explanation of descriptors: https://docs.python.org/3/howto/descriptor.html
 
20
  This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3).
 
21
  See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3
22
  """
23
  def __init__(self, thread_local: bool = False):
24
  self._storage = {}
 
25
  def __get__(self, obj, objtype=None):
26
  return self._storage
 
27
  def __set__(self, obj, value):
28
  self._storage = value
 
 
29
  # Prefer global shared dictionary, except when using TPU.
30
  SharedDict = dict if not is_tpu_available(check_device=False) else ThreadLocalSharedDict
 
 
31
  # Inspired by Alex Martelli's 'Borg'.
32
  class PartialState:
33
  """
34
  Singleton class that has information about the current training environment and functions to help with process
35
  control. Designed to be used when only process control and device execution states are needed. Does *not* need to
36
  be initialized from `Accelerator`.
 
37
  **Available attributes:**
 
38
  - **device** (`torch.device`) -- The device to use.
39
  - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
40
  in use.
 
49
  - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
50
  """
51
  _shared_state = SharedDict()
 
52
  def __init__(self, cpu: bool = False, **kwargs):
53
  self.__dict__ = self._shared_state
54
  if not self.initialized:
 
63
  os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true"
64
  and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO
65
  )
 
66
  if use_sagemaker_dp and not cpu:
67
  if (
68
  os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL
69
  ) or use_sagemaker_dp:
70
  self.distributed_type = DistributedType.MULTI_GPU
71
  import smdistributed.dataparallel.torch.torch_smddp # noqa
 
72
  if not torch.distributed.is_initialized():
73
  torch.distributed.init_process_group(backend="smddp")
74
  self.backend = "smddp"
 
95
  self.distributed_type = DistributedType.DEEPSPEED
96
  if not torch.distributed.is_initialized():
97
  from deepspeed import comm as dist
 
98
  # DeepSpeed always uses nccl
99
  kwargs.pop("backend", None)
100
  if is_xpu_available and is_ccl_available():
 
105
  else:
106
  self.backend = "nccl"
107
  dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
 
108
  self.num_processes = torch.distributed.get_world_size()
109
  self.process_index = torch.distributed.get_rank()
110
  self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
 
207
  and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0
208
  ):
209
  import psutil
 
210
  num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)
211
  if num_cpu_threads_per_process == 0:
212
  num_cpu_threads_per_process = 1
 
237
  )
238
  self.num_processes = 1
239
  self.process_index = self.local_process_index = 0
 
240
  if self.device is None:
241
  self.device = torch.device("cpu") if cpu else self.default_device
 
242
  self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0)
 
243
  def __repr__(self) -> str:
244
  return (
245
  f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n"
 
248
  f"Local process index: {self.local_process_index}\n"
249
  f"Device: {self.device}\n"
250
  )
 
251
  @staticmethod
252
  def _reset_state():
253
  "Resets `_shared_state`, is used internally and should not be called"
254
  PartialState._shared_state.clear()
 
255
  @property
256
  def initialized(self) -> bool:
257
  "Returns whether the `PartialState` has been initialized"
258
  return self._shared_state != {}
 
259
  @property
260
  def use_distributed(self):
261
  """
262
  Whether the Accelerator is configured for distributed training
263
  """
264
  return self.distributed_type != DistributedType.NO and self.num_processes > 1
 
265
  @property
266
  def is_last_process(self) -> bool:
267
  "Returns whether the current process is the last one"
268
  return self.process_index == self.num_processes - 1
 
269
  @property
270
  def is_main_process(self) -> bool:
271
  "Returns whether the current process is the main process"
272
  return (
273
  self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process
274
  )
 
275
  @property
276
  def is_local_main_process(self) -> bool:
277
  "Returns whether the current process is the main process on the local node"
 
280
  if self.distributed_type != DistributedType.MEGATRON_LM
281
  else self.is_last_process
282
  )
 
283
  def wait_for_everyone(self):
284
  """
285
  Will stop the execution of the current process until every other process has reached that point (so this does
286
  nothing when the script is only run in one process). Useful to do before saving a model.
 
287
  Example:
 
288
  ```python
289
  >>> # Assuming two GPU processes
290
  >>> import time
291
  >>> from accelerate.state import PartialState
 
292
  >>> state = PartialState()
293
  >>> if state.is_main_process:
294
  ... time.sleep(2)
 
310
  torch.distributed.barrier()
311
  elif self.distributed_type == DistributedType.TPU:
312
  xm.rendezvous("accelerate.utils.wait_for_everyone")
 
313
  def _goes_first(self, is_main: bool):
314
  if not is_main:
315
  self.wait_for_everyone()
 
316
  yield
 
317
  if is_main:
318
  self.wait_for_everyone()
 
319
  @contextmanager
320
  def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
321
  """
322
  Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
323
  distributed inference, such as with different prompts.
 
324
  Note that when using a `dict`, all keys need to have the same number of elements.
 
325
  Args:
326
  inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
327
  The input to split between processes.
 
329
  Whether to apply padding by repeating the last element of the input so that all processes have the same
330
  number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
331
  in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
 
 
332
  Example:
 
333
  ```python
334
  # Assume there are two processes
335
  from accelerate import PartialState
 
336
  state = PartialState()
337
  with state.split_between_processes(["A", "B", "C"]) as inputs:
338
  print(inputs)
 
340
  ["A", "B"]
341
  # Process 1
342
  ["C"]
 
343
  with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
344
  print(inputs)
345
  # Process 0
 
362
  end_index = start_index + num_samples_per_process
363
  if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1):
364
  end_index = length
 
365
  def _split_values(inputs, start_index, end_index):
366
  if isinstance(inputs, (list, tuple, torch.Tensor)):
367
  if start_index >= len(inputs):
 
371
  if apply_padding:
372
  if isinstance(result, torch.Tensor):
373
  from accelerate.utils import pad_across_processes, send_to_device
 
374
  # The tensor needs to be on the device before we can pad it
375
  tensorized_result = send_to_device(result, self.device)
376
  result = pad_across_processes(tensorized_result, pad_index=inputs[-1])
 
383
  return inputs
384
  else:
385
  return inputs
 
386
  yield _split_values(inputs, start_index, end_index)
 
387
  @contextmanager
388
  def main_process_first(self):
389
  """
390
  Lets the main process go first inside a with block.
 
391
  The other processes will enter the with block after the main process exits.
 
392
  Example:
 
393
  ```python
394
  >>> from accelerate import Accelerator
 
395
  >>> accelerator = Accelerator()
396
  >>> with accelerator.main_process_first():
397
  ... # This will be printed first by process 0 then in a seemingly
 
400
  ```
401
  """
402
  yield from self._goes_first(self.is_main_process)
 
403
  @contextmanager
404
  def local_main_process_first(self):
405
  """
406
  Lets the local main process go inside a with block.
 
407
  The other processes will enter the with block after the main process exits.
 
408
  Example:
 
409
  ```python
410
  >>> from accelerate.state import PartialState
 
411
  >>> state = PartialState()
412
  >>> with state.local_main_process_first():
413
  ... # This will be printed first by local process 0 then in a seemingly
 
416
  ```
417
  """
418
  yield from self._goes_first(self.is_local_main_process)
 
419
  def on_main_process(self, function: Callable[..., Any] = None):
420
  """
421
  Decorator that only runs the decorated function on the main process.
 
422
  Args:
423
  function (`Callable`): The function to decorate.
 
424
  Example:
 
425
  ```python
426
  >>> from accelerate.state import PartialState
 
427
  >>> state = PartialState()
 
 
428
  >>> @state.on_main_process
429
  ... def print_something():
430
  ... print("This will be printed by process 0 only.")
 
 
431
  >>> print_something()
432
  "This will be printed by process 0 only"
433
  ```
 
437
  if self.is_main_process or not self.use_distributed:
438
  return function
439
  return do_nothing
 
440
  def on_local_main_process(self, function: Callable[..., Any] = None):
441
  """
442
  Decorator that only runs the decorated function on the local main process.
 
443
  Args:
444
  function (`Callable`): The function to decorate.
 
445
  Example:
446
  ```python
447
  # Assume we have 2 servers with 4 processes each.
448
  from accelerate.state import PartialState
 
449
  state = PartialState()
 
 
450
  @state.on_local_main_process
451
  def print_something():
452
  print("This will be printed by process 0 only on each server.")
 
 
453
  print_something()
454
  # On server 1:
455
  "This will be printed by process 0 only"
 
460
  if self.is_local_main_process or not self.use_distributed:
461
  return function
462
  return do_nothing
 
463
  def on_last_process(self, function: Callable[..., Any]):
464
  """
465
  Decorator that only runs the decorated function on the last process.
 
466
  Args:
467
  function (`Callable`): The function to decorate.
 
468
  Example:
469
  ```python
470
  # Assume we have 4 processes.
471
  from accelerate.state import PartialState
 
472
  state = PartialState()
 
 
473
  @state.on_last_process
474
  def print_something():
475
  print(f"Printed on process {state.process_index}")
 
 
476
  print_something()
477
  "Printed on process 3"
478
  ```
 
480
  if self.is_last_process or not self.use_distributed:
481
  return function
482
  return do_nothing
 
483
  def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
484
  """
485
  Decorator that only runs the decorated function on the process with the given index.
 
486
  Args:
487
  function (`Callable`, `optional`):
488
  The function to decorate.
489
  process_index (`int`, `optional`):
490
  The index of the process on which to run the function.
 
491
  Example:
492
  ```python
493
  # Assume we have 4 processes.
494
  from accelerate.state import PartialState
 
495
  state = PartialState()
 
 
496
  @state.on_process(process_index=2)
497
  def print_something():
498
  print(f"Printed on process {state.process_index}")
 
 
499
  print_something()
500
  "Printed on process 2"
501
  ```
 
505
  if (self.process_index == process_index) or (not self.use_distributed):
506
  return function
507
  return do_nothing
 
508
  def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
509
  """
510
  Decorator that only runs the decorated function on the process with the given index on the current node.
 
511
  Args:
512
  function (`Callable`, *optional*):
513
  The function to decorate.
514
  local_process_index (`int`, *optional*):
515
  The index of the local process on which to run the function.
 
516
  Example:
517
  ```python
518
  # Assume we have 2 servers with 4 processes each.
519
  from accelerate import Accelerator
 
520
  accelerator = Accelerator()
 
 
521
  @accelerator.on_local_process(local_process_index=2)
522
  def print_something():
523
  print(f"Printed on process {accelerator.local_process_index}")
 
 
524
  print_something()
525
  # On server 1:
526
  "Printed on process 2"
 
533
  if (self.local_process_index == local_process_index) or (not self.use_distributed):
534
  return function
535
  return do_nothing
 
536
  def print(self, *args, **kwargs):
537
  if self.is_local_main_process:
538
  print(*args, **kwargs)
 
539
  @property
540
  def default_device(self) -> torch.device:
541
  """
 
556
  return torch.device("npu")
557
  else:
558
  return torch.device("cpu")
 
 
559
  class AcceleratorState:
560
  """
561
  Singleton class that has information about the current training environment.
 
562
  **Available attributes:**
 
563
  - **device** (`torch.device`) -- The device to use.
564
  - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
565
  in use.
 
575
  - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
576
  """
577
  _shared_state = SharedDict()
 
578
  def __init__(
579
  self,
580
  mixed_precision: str = None,
 
613
  "or higher, compute capability of 8.9 or higher). Will use FP16 instead."
614
  )
615
  mixed_precision = "fp16"
 
616
  self.dynamo_plugin = dynamo_plugin
617
  if not _from_accelerator:
618
  raise ValueError(
 
661
  if self._mixed_precision != "no":
662
  fsdp_plugin.set_mixed_precision(self._mixed_precision)
663
  self.fsdp_plugin = fsdp_plugin
 
664
  if (
665
  self.dynamo_plugin.backend != DynamoBackend.NO
666
  and self._mixed_precision == "no"
 
668
  ):
669
  torch.backends.cuda.matmul.allow_tf32 = True
670
  PartialState._shared_state["distributed_type"] = self.distributed_type
 
671
  @property
672
  def initialized(self) -> bool:
673
  return self._shared_state != PartialState._shared_state
 
674
  def __repr__(self):
675
  repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n"
676
  if self.distributed_type == DistributedType.DEEPSPEED:
677
  repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n"
678
  return repr
 
679
  def _check_initialized(self, mixed_precision=None, cpu=None):
680
  "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized"
681
  if self.initialized:
 
688
  and self.distributed_type != DistributedType.DEEPSPEED
689
  ):
690
  raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'"))
 
691
  # For backward compatibility
692
  @property
693
  def use_fp16(self):
 
697
  FutureWarning,
698
  )
699
  return self._mixed_precision != "no"
 
700
  @property
701
  def mixed_precision(self):
702
  if self.distributed_type == DistributedType.DEEPSPEED:
 
710
  else:
711
  mixed_precision = self._mixed_precision
712
  return mixed_precision
 
713
  @staticmethod
714
  def _reset_state(reset_partial_state: bool = False):
715
  "Resets `_shared_state`, is used internally and should not be called"
716
  AcceleratorState._shared_state.clear()
717
  if reset_partial_state:
718
  PartialState._reset_state()
 
719
  @property
720
  def use_distributed(self):
721
  """
722
  Whether the Accelerator is configured for distributed training
723
  """
724
  return PartialState().use_distributed
 
725
  @property
726
  def is_last_process(self) -> bool:
727
  "Returns whether the current process is the last one"
728
  return PartialState().is_last_process
 
729
  @property
730
  def is_main_process(self) -> bool:
731
  "Returns whether the current process is the main process"
732
  return PartialState().is_main_process
 
733
  @property
734
  def is_local_main_process(self) -> bool:
735
  "Returns whether the current process is the main process on the local node"
736
  return PartialState().is_local_main_process
 
737
  def wait_for_everyone(self):
738
  PartialState().wait_for_everyone()
 
739
  @contextmanager
740
  def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
741
  """
742
  Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
743
  distributed inference, such as with different prompts.
 
744
  Note that when using a `dict`, all keys need to have the same number of elements.
 
745
  Args:
746
  inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
747
  The input to split between processes.
 
749
  Whether to apply padding by repeating the last element of the input so that all processes have the same
750
  number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
751
  in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
 
 
752
  Example:
 
753
  ```python
754
  # Assume there are two processes
755
  from accelerate.state import AcceleratorState
 
756
  state = AcceleratorState()
757
  with state.split_between_processes(["A", "B", "C"]) as inputs:
758
  print(inputs)
 
760
  ["A", "B"]
761
  # Process 1
762
  ["C"]
 
763
  with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
764
  print(inputs)
765
  # Process 0
 
770
  """
771
  with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
772
  yield inputs
 
773
  @contextmanager
774
  def main_process_first(self):
775
  """
776
  Lets the main process go first inside a with block.
 
777
  The other processes will enter the with block after the main process exits.
778
  """
779
  with PartialState().main_process_first():
780
  yield
 
781
  @contextmanager
782
  def local_main_process_first(self):
783
  """
784
  Lets the local main process go inside a with block.
 
785
  The other processes will enter the with block after the main process exits.
786
  """
787
  with PartialState().local_main_process_first():
788
  yield
 
789
  def print(self, *args, **kwargs):
790
  PartialState().print(*args, **kwargs)
 
 
791
  class GradientState:
792
  """
793
  Singleton class that has information related to gradient synchronization for gradient accumulation
 
794
  **Available attributes:**
 
795
  - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader
796
  - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader
797
  - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices
 
805
  iteration and the number of total steps reset
806
  """
807
  _shared_state = SharedDict()
 
808
  def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None):
809
  self.__dict__ = self._shared_state
810
  if not self.initialized:
 
814
  self.plugin_kwargs = (
815
  gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {}
816
  )
 
817
  # Plugin args are different and can be updated
818
  if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs():
819
  self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs()
 
820
  @property
821
  def num_steps(self) -> int:
822
  "Returns the number of steps to accumulate over"
823
  return self.plugin_kwargs.get("num_steps", 1)
 
824
  @property
825
  def adjust_scheduler(self) -> bool:
826
  "Returns whether the scheduler should be adjusted"
827
  return self.plugin_kwargs.get("adjust_scheduler", False)
 
828
  @property
829
  def sync_with_dataloader(self) -> bool:
830
  "Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset"
831
  return self.plugin_kwargs.get("sync_with_dataloader", True)
 
832
  @property
833
  def initialized(self) -> bool:
834
  "Returns whether the `GradientState` has been initialized"
835
  return GradientState._shared_state != {}
 
836
  @property
837
  def end_of_dataloader(self) -> bool:
838
  "Returns whether we have reached the end of the current dataloader"
839
  if not self.in_dataloader:
840
  return False
841
  return self.active_dataloader.end_of_dataloader
 
842
  @property
843
  def remainder(self) -> int:
844
  "Returns the number of extra samples that were added from padding the dataloader"
845
  if not self.in_dataloader:
846
  return -1
847
  return self.active_dataloader.remainder
 
848
  def __repr__(self):
849
  return (
850
  f"Sync Gradients: {self.sync_gradients}\n"
 
852
  f"Extra samples added: {self.remainder}\n"
853
  f"Gradient accumulation plugin: {self.plugin_kwargs}\n"
854
  )
 
855
  def _set_sync_gradients(self, sync_gradients):
856
  "Private function that sets whether gradients should be synchronized. Users should not have to call this."
857
  self.sync_gradients = sync_gradients
 
858
  def _add_dataloader(self, dataloader):
859
  "Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this."
860
  self.active_dataloader = dataloader
861
  self.dataloader_references.append(self.active_dataloader)
 
862
  def _remove_dataloader(self, dataloader):
863
  "Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this."
864
  self.dataloader_references.remove(dataloader)
865
  self.active_dataloader = self.dataloader_references[-1]
 
866
  @property
867
  def in_dataloader(self) -> bool:
868
  "Returns whether the current process is in a dataloader"
869
  return self.active_dataloader is not None
 
870
  @staticmethod
871
  def _reset_state():
872
  "Resets `_shared_state`, is used internally and should not be called"
src/tracking.py CHANGED
@@ -1,39 +1,25 @@
1
-
2
-
3
  # Expectation:
4
  # Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}
5
-
6
  _available_trackers = []
7
-
8
  if is_tensorboard_available():
9
  _available_trackers.append(LoggerType.TENSORBOARD)
10
-
11
  if is_wandb_available():
12
  _available_trackers.append(LoggerType.WANDB)
13
-
14
  if is_comet_ml_available():
15
  _available_trackers.append(LoggerType.COMETML)
16
-
17
  if is_aim_available():
18
  _available_trackers.append(LoggerType.AIM)
19
-
20
  if is_mlflow_available():
21
  _available_trackers.append(LoggerType.MLFLOW)
22
-
23
  if is_clearml_available():
24
  _available_trackers.append(LoggerType.CLEARML)
25
-
26
  if is_dvclive_available():
27
  _available_trackers.append(LoggerType.DVCLIVE)
28
-
29
  logger = get_logger(__name__)
30
-
31
-
32
  def on_main_process(function):
33
  """
34
  Decorator to selectively run the decorated function on the main process only based on the `main_process_only`
35
  attribute in a class.
36
-
37
  Checks at function execution rather than initialization time, not triggering the initialization of the
38
  `PartialState`.
39
  """
@@ -43,33 +29,23 @@ def on_main_process(function):
43
  return PartialState().on_main_process(function)(self, *args, **kwargs)
44
  else:
45
  return function(self, *args, **kwargs)
46
-
47
  return execute_on_main_process
48
-
49
-
50
  def get_available_trackers():
51
  "Returns a list of all supported available trackers in the system"
52
  return _available_trackers
53
-
54
-
55
  class GeneralTracker:
56
  """
57
  A base Tracker class to be used for all logging integration implementations.
58
-
59
  Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to
60
  [`Accelerator`].
61
-
62
  Should implement `name`, `requires_logging_directory`, and `tracker` properties such that:
63
-
64
  `name` (`str`): String representation of the tracker class name, such as "TensorBoard" `requires_logging_directory`
65
  (`bool`): Whether the logger requires a directory to store their logs. `tracker` (`object`): Should return internal
66
  tracking mechanism used by a tracker class (such as the `run` for wandb)
67
-
68
  Implementations can also include a `main_process_only` (`bool`) attribute to toggle if relevent logging, init, and
69
  other functions should occur on the main process or across all processes (by default will use `True`)
70
  """
71
  main_process_only = True
72
-
73
  def __init__(self, _blank=False):
74
  if not _blank:
75
  err = ""
@@ -79,7 +55,6 @@ class GeneralTracker:
79
  if len(err) > 0:
80
  err += ", "
81
  err += "`requires_logging_directory`"
82
-
83
  # as tracker is a @property that relies on post-init
84
  if "tracker" not in dir(self):
85
  if len(err) > 0:
@@ -91,24 +66,20 @@ class GeneralTracker:
91
  f"required attributes. Please define them in the class definition: "
92
  f"{err}"
93
  )
94
-
95
  def store_init_configuration(self, values: dict):
96
  """
97
  Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration
98
  functionality of a tracking API.
99
-
100
  Args:
101
  values (Dictionary `str` to `bool`, `str`, `float` or `int`):
102
  Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
103
  `str`, `float`, `int`, or `None`.
104
  """
105
  pass
106
-
107
  def log(self, values: dict, step: Optional[int], **kwargs):
108
  """
109
  Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with
110
  special behavior for the `step parameter.
111
-
112
  Args:
113
  values (Dictionary `str` to `str`, `float`, or `int`):
114
  Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.
@@ -116,19 +87,15 @@ class GeneralTracker:
116
  The run step. If included, the log will be affiliated with this step.
117
  """
118
  pass
119
-
120
  def finish(self):
121
  """
122
  Should run any finalizing functions within the tracking API. If the API should not have one, just don't
123
  overwrite that method.
124
  """
125
  pass
126
-
127
-
128
  class TensorBoardTracker(GeneralTracker):
129
  """
130
  A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.
131
-
132
  Args:
133
  run_name (`str`):
134
  The name of the experiment run
@@ -139,7 +106,6 @@ class TensorBoardTracker(GeneralTracker):
139
  """
140
  name = "tensorboard"
141
  requires_logging_directory = True
142
-
143
  @on_main_process
144
  def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs):
145
  try:
@@ -154,17 +120,14 @@ class TensorBoardTracker(GeneralTracker):
154
  logger.debug(
155
  "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
156
  )
157
-
158
  @property
159
  def tracker(self):
160
  return self.writer
161
-
162
  @on_main_process
163
  def store_init_configuration(self, values: dict):
164
  """
165
  Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
166
  hyperparameters in a yaml file for future use.
167
-
168
  Args:
169
  values (Dictionary `str` to `bool`, `str`, `float` or `int`):
170
  Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
@@ -182,12 +145,10 @@ class TensorBoardTracker(GeneralTracker):
182
  logger.error("Serialization to store hyperparameters failed")
183
  raise
184
  logger.debug("Stored initial configuration hyperparameters to TensorBoard and hparams yaml file")
185
-
186
  @on_main_process
187
  def log(self, values: dict, step: Optional[int] = None, **kwargs):
188
  """
189
  Logs `values` to the current run.
190
-
191
  Args:
192
  values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
193
  Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
@@ -208,12 +169,10 @@ class TensorBoardTracker(GeneralTracker):
208
  self.writer.add_scalars(k, v, global_step=step, **kwargs)
209
  self.writer.flush()
210
  logger.debug("Successfully logged to TensorBoard")
211
-
212
  @on_main_process
213
  def log_images(self, values: dict, step: Optional[int], **kwargs):
214
  """
215
  Logs `images` to the current run.
216
-
217
  Args:
218
  values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):
219
  Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
@@ -225,7 +184,6 @@ class TensorBoardTracker(GeneralTracker):
225
  for k, v in values.items():
226
  self.writer.add_images(k, v, global_step=step, **kwargs)
227
  logger.debug("Successfully logged images to TensorBoard")
228
-
229
  @on_main_process
230
  def finish(self):
231
  """
@@ -233,12 +191,9 @@ class TensorBoardTracker(GeneralTracker):
233
  """
234
  self.writer.close()
235
  logger.debug("TensorBoard writer closed")
236
-
237
-
238
  class WandBTracker(GeneralTracker):
239
  """
240
  A `Tracker` class that supports `wandb`. Should be initialized at the start of your script.
241
-
242
  Args:
243
  run_name (`str`):
244
  The name of the experiment run.
@@ -248,44 +203,35 @@ class WandBTracker(GeneralTracker):
248
  name = "wandb"
249
  requires_logging_directory = False
250
  main_process_only = False
251
-
252
  @on_main_process
253
  def __init__(self, run_name: str, **kwargs):
254
  super().__init__()
255
  self.run_name = run_name
256
-
257
  import wandb
258
-
259
  self.run = wandb.init(project=self.run_name, **kwargs)
260
  logger.debug(f"Initialized WandB project {self.run_name}")
261
  logger.debug(
262
  "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
263
  )
264
-
265
  @property
266
  def tracker(self):
267
  return self.run
268
-
269
  @on_main_process
270
  def store_init_configuration(self, values: dict):
271
  """
272
  Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
273
-
274
  Args:
275
  values (Dictionary `str` to `bool`, `str`, `float` or `int`):
276
  Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
277
  `str`, `float`, `int`, or `None`.
278
  """
279
  import wandb
280
-
281
  wandb.config.update(values, allow_val_change=True)
282
  logger.debug("Stored initial configuration hyperparameters to WandB")
283
-
284
  @on_main_process
285
  def log(self, values: dict, step: Optional[int] = None, **kwargs):
286
  """
287
  Logs `values` to the current run.
288
-
289
  Args:
290
  values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
291
  Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
@@ -297,12 +243,10 @@ class WandBTracker(GeneralTracker):
297
  """
298
  self.run.log(values, step=step, **kwargs)
299
  logger.debug("Successfully logged to WandB")
300
-
301
  @on_main_process
302
  def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
303
  """
304
  Logs `images` to the current run.
305
-
306
  Args:
307
  values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):
308
  Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
@@ -312,11 +256,9 @@ class WandBTracker(GeneralTracker):
312
  Additional key word arguments passed along to the `wandb.log` method.
313
  """
314
  import wandb
315
-
316
  for k, v in values.items():
317
  self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs)
318
  logger.debug("Successfully logged images to WandB")
319
-
320
  @on_main_process
321
  def log_table(
322
  self,
@@ -330,7 +272,6 @@ class WandBTracker(GeneralTracker):
330
  """
331
  Log a Table containing any object type (text, image, audio, video, molecule, html, etc). Can be defined either
332
  with `columns` and `data` or with `dataframe`.
333
-
334
  Args:
335
  table_name (`str`):
336
  The name to give to the logged table on the wandb workspace
@@ -344,10 +285,8 @@ class WandBTracker(GeneralTracker):
344
  The run step. If included, the log will be affiliated with this step.
345
  """
346
  import wandb
347
-
348
  values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)}
349
  self.log(values, step=step, **kwargs)
350
-
351
  @on_main_process
352
  def finish(self):
353
  """
@@ -355,14 +294,10 @@ class WandBTracker(GeneralTracker):
355
  """
356
  self.run.finish()
357
  logger.debug("WandB run closed")
358
-
359
-
360
  class CometMLTracker(GeneralTracker):
361
  """
362
  A `Tracker` class that supports `comet_ml`. Should be initialized at the start of your script.
363
-
364
  API keys must be stored in a Comet config file.
365
-
366
  Args:
367
  run_name (`str`):
368
  The name of the experiment run.
@@ -371,29 +306,23 @@ class CometMLTracker(GeneralTracker):
371
  """
372
  name = "comet_ml"
373
  requires_logging_directory = False
374
-
375
  @on_main_process
376
  def __init__(self, run_name: str, **kwargs):
377
  super().__init__()
378
  self.run_name = run_name
379
-
380
  from comet_ml import Experiment
381
-
382
  self.writer = Experiment(project_name=run_name, **kwargs)
383
  logger.debug(f"Initialized CometML project {self.run_name}")
384
  logger.debug(
385
  "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
386
  )
387
-
388
  @property
389
  def tracker(self):
390
  return self.writer
391
-
392
  @on_main_process
393
  def store_init_configuration(self, values: dict):
394
  """
395
  Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
396
-
397
  Args:
398
  values (Dictionary `str` to `bool`, `str`, `float` or `int`):
399
  Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
@@ -401,12 +330,10 @@ class CometMLTracker(GeneralTracker):
401
  """
402
  self.writer.log_parameters(values)
403
  logger.debug("Stored initial configuration hyperparameters to CometML")
404
-
405
  @on_main_process
406
  def log(self, values: dict, step: Optional[int] = None, **kwargs):
407
  """
408
  Logs `values` to the current run.
409
-
410
  Args:
411
  values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
412
  Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
@@ -427,7 +354,6 @@ class CometMLTracker(GeneralTracker):
427
  elif isinstance(v, dict):
428
  self.writer.log_metrics(v, step=step, **kwargs)
429
  logger.debug("Successfully logged to CometML")
430
-
431
  @on_main_process
432
  def finish(self):
433
  """
@@ -435,12 +361,9 @@ class CometMLTracker(GeneralTracker):
435
  """
436
  self.writer.end()
437
  logger.debug("CometML run closed")
438
-
439
-
440
  class AimTracker(GeneralTracker):
441
  """
442
  A `Tracker` class that supports `aim`. Should be initialized at the start of your script.
443
-
444
  Args:
445
  run_name (`str`):
446
  The name of the experiment run.
@@ -449,40 +372,32 @@ class AimTracker(GeneralTracker):
449
  """
450
  name = "aim"
451
  requires_logging_directory = True
452
-
453
  @on_main_process
454
  def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs):
455
  self.run_name = run_name
456
-
457
  from aim import Run
458
-
459
  self.writer = Run(repo=logging_dir, **kwargs)
460
  self.writer.name = self.run_name
461
  logger.debug(f"Initialized Aim project {self.run_name}")
462
  logger.debug(
463
  "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
464
  )
465
-
466
  @property
467
  def tracker(self):
468
  return self.writer
469
-
470
  @on_main_process
471
  def store_init_configuration(self, values: dict):
472
  """
473
  Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
474
-
475
  Args:
476
  values (`dict`):
477
  Values to be stored as initial hyperparameters as key-value pairs.
478
  """
479
  self.writer["hparams"] = values
480
-
481
  @on_main_process
482
  def log(self, values: dict, step: Optional[int], **kwargs):
483
  """
484
  Logs `values` to the current run.
485
-
486
  Args:
487
  values (`dict`):
488
  Values to be logged as key-value pairs.
@@ -494,12 +409,10 @@ class AimTracker(GeneralTracker):
494
  # Note: replace this with the dictionary support when merged
495
  for key, value in values.items():
496
  self.writer.track(value, name=key, step=step, **kwargs)
497
-
498
  @on_main_process
499
  def log_images(self, values: dict, step: Optional[int] = None, kwargs: Optional[Dict[str, dict]] = None):
500
  """
501
  Logs `images` to the current run.
502
-
503
  Args:
504
  values (`Dict[str, Union[np.ndarray, PIL.Image, Tuple[np.ndarray, str], Tuple[PIL.Image, str]]]`):
505
  Values to be logged as key-value pairs. The values need to have type `np.ndarray` or PIL.Image. If a
@@ -511,14 +424,11 @@ class AimTracker(GeneralTracker):
511
  keys `aim_image` and `track`, respectively.
512
  """
513
  import aim
514
-
515
  aim_image_kw = {}
516
  track_kw = {}
517
-
518
  if kwargs is not None:
519
  aim_image_kw = kwargs.get("aim_image", {})
520
  track_kw = kwargs.get("track", {})
521
-
522
  for key, value in values.items():
523
  if isinstance(value, tuple):
524
  img, caption = value
@@ -526,19 +436,15 @@ class AimTracker(GeneralTracker):
526
  img, caption = value, ""
527
  aim_image = aim.Image(img, caption=caption, **aim_image_kw)
528
  self.writer.track(aim_image, name=key, step=step, **track_kw)
529
-
530
  @on_main_process
531
  def finish(self):
532
  """
533
  Closes `aim` writer
534
  """
535
  self.writer.close()
536
-
537
-
538
  class MLflowTracker(GeneralTracker):
539
  """
540
  A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.
541
-
542
  Args:
543
  experiment_name (`str`, *optional*):
544
  Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.
@@ -564,7 +470,6 @@ class MLflowTracker(GeneralTracker):
564
  """
565
  name = "mlflow"
566
  requires_logging_directory = False
567
-
568
  @on_main_process
569
  def __init__(
570
  self,
@@ -581,11 +486,8 @@ class MLflowTracker(GeneralTracker):
581
  tags = os.getenv("MLFLOW_TAGS", tags)
582
  if isinstance(tags, str):
583
  tags = json.loads(tags)
584
-
585
  nested_run = os.getenv("MLFLOW_NESTED_RUN", nested_run)
586
-
587
  import mlflow
588
-
589
  exps = mlflow.search_experiments(filter_string=f"name = '{experiment_name}'")
590
  if len(exps) > 0:
591
  if len(exps) > 1:
@@ -597,7 +499,6 @@ class MLflowTracker(GeneralTracker):
597
  artifact_location=logging_dir,
598
  tags=tags,
599
  )
600
-
601
  self.active_run = mlflow.start_run(
602
  run_id=run_id,
603
  experiment_id=experiment_id,
@@ -606,27 +507,22 @@ class MLflowTracker(GeneralTracker):
606
  tags=tags,
607
  description=description,
608
  )
609
-
610
  logger.debug(f"Initialized mlflow experiment {experiment_name}")
611
  logger.debug(
612
  "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
613
  )
614
-
615
  @property
616
  def tracker(self):
617
  return self.active_run
618
-
619
  @on_main_process
620
  def store_init_configuration(self, values: dict):
621
  """
622
  Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
623
-
624
  Args:
625
  values (`dict`):
626
  Values to be stored as initial hyperparameters as key-value pairs.
627
  """
628
  import mlflow
629
-
630
  for name, value in list(values.items()):
631
  # internally, all values are converted to str in MLflow
632
  if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH:
@@ -635,20 +531,15 @@ class MLflowTracker(GeneralTracker):
635
  f" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute."
636
  )
637
  del values[name]
638
-
639
  values_list = list(values.items())
640
-
641
  # MLflow cannot log more than 100 values in one go, so we have to split it
642
  for i in range(0, len(values_list), mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH):
643
  mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH]))
644
-
645
  logger.debug("Stored initial configuration hyperparameters to MLflow")
646
-
647
  @on_main_process
648
  def log(self, values: dict, step: Optional[int]):
649
  """
650
  Logs `values` to the current run.
651
-
652
  Args:
653
  values (`dict`):
654
  Values to be logged as key-value pairs.
@@ -665,24 +556,18 @@ class MLflowTracker(GeneralTracker):
665
  "MLflow's log_metric() only accepts float and int types so we dropped this attribute."
666
  )
667
  import mlflow
668
-
669
  mlflow.log_metrics(metrics, step=step)
670
  logger.debug("Successfully logged to mlflow")
671
-
672
  @on_main_process
673
  def finish(self):
674
  """
675
  End the active MLflow run.
676
  """
677
  import mlflow
678
-
679
  mlflow.end_run()
680
-
681
-
682
  class ClearMLTracker(GeneralTracker):
683
  """
684
  A `Tracker` class that supports `clearml`. Should be initialized at the start of your script.
685
-
686
  Args:
687
  run_name (`str`, *optional*):
688
  Name of the experiment. Environment variables `CLEARML_PROJECT` and `CLEARML_TASK` have priority over this
@@ -692,43 +577,35 @@ class ClearMLTracker(GeneralTracker):
692
  """
693
  name = "clearml"
694
  requires_logging_directory = False
695
-
696
  @on_main_process
697
  def __init__(self, run_name: str = None, **kwargs):
698
  from clearml import Task
699
-
700
  current_task = Task.current_task()
701
  self._initialized_externally = False
702
  if current_task:
703
  self._initialized_externally = True
704
  self.task = current_task
705
  return
706
-
707
  kwargs.setdefault("project_name", os.environ.get("CLEARML_PROJECT", run_name))
708
  kwargs.setdefault("task_name", os.environ.get("CLEARML_TASK", run_name))
709
  self.task = Task.init(**kwargs)
710
-
711
  @property
712
  def tracker(self):
713
  return self.task
714
-
715
  @on_main_process
716
  def store_init_configuration(self, values: dict):
717
  """
718
  Connect configuration dictionary to the Task object. Should be run at the beginning of your experiment.
719
-
720
  Args:
721
  values (`dict`):
722
  Values to be stored as initial hyperparameters as key-value pairs.
723
  """
724
  return self.task.connect_configuration(values)
725
-
726
  @on_main_process
727
  def log(self, values: Dict[str, Union[int, float]], step: Optional[int] = None, **kwargs):
728
  """
729
  Logs `values` dictionary to the current run. The dictionary keys must be strings. The dictionary values must be
730
  ints or floats
731
-
732
  Args:
733
  values (`Dict[str, Union[int, float]]`):
734
  Values to be logged as key-value pairs. If the key starts with 'eval_'/'test_'/'train_', the value will
@@ -756,12 +633,10 @@ class ClearMLTracker(GeneralTracker):
756
  continue
757
  title, series = ClearMLTracker._get_title_series(k)
758
  clearml_logger.report_scalar(title=title, series=series, value=v, iteration=step, **kwargs)
759
-
760
  @on_main_process
761
  def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
762
  """
763
  Logs `images` to the current run.
764
-
765
  Args:
766
  values (`Dict[str, List[Union[np.ndarray, PIL.Image]]`):
767
  Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
@@ -774,7 +649,6 @@ class ClearMLTracker(GeneralTracker):
774
  for k, v in values.items():
775
  title, series = ClearMLTracker._get_title_series(k)
776
  clearml_logger.report_image(title=title, series=series, iteration=step, image=v, **kwargs)
777
-
778
  @on_main_process
779
  def log_table(
780
  self,
@@ -787,7 +661,6 @@ class ClearMLTracker(GeneralTracker):
787
  ):
788
  """
789
  Log a Table to the task. Can be defined eitherwith `columns` and `data` or with `dataframe`.
790
-
791
  Args:
792
  table_name (`str`):
793
  The name of the table
@@ -812,7 +685,6 @@ class ClearMLTracker(GeneralTracker):
812
  to_report = [columns] + data if columns else data
813
  title, series = ClearMLTracker._get_title_series(table_name)
814
  self.task.get_logger().report_table(title=title, series=series, table_plot=to_report, iteration=step, **kwargs)
815
-
816
  @on_main_process
817
  def finish(self):
818
  """
@@ -821,66 +693,52 @@ class ClearMLTracker(GeneralTracker):
821
  """
822
  if self.task and not self._initialized_externally:
823
  self.task.close()
824
-
825
  @staticmethod
826
  def _get_title_series(name):
827
  for prefix in ["eval", "test", "train"]:
828
  if name.startswith(prefix + "_"):
829
  return name[len(prefix) + 1 :], prefix
830
  return name, "train"
831
-
832
-
833
  class DVCLiveTracker(GeneralTracker):
834
  """
835
  A `Tracker` class that supports `dvclive`. Should be initialized at the start of your script.
836
-
837
  Args:
838
  run_name (`str`, *optional*):
839
  Ignored for dvclive. See `kwargs` instead.
840
  kwargs:
841
  Additional key word arguments passed along to [`dvclive.Live()`](https://dvc.org/doc/dvclive/live).
842
-
843
  Example:
844
-
845
  ```py
846
  from accelerate import Accelerator
847
-
848
  accelerator = Accelerator(log_with="dvclive")
849
  accelerator.init_trackers(project_name="my_project", init_kwargs={"dvclive": {"dir": "my_directory"}})
850
  ```
851
  """
852
  name = "dvclive"
853
  requires_logging_directory = False
854
-
855
  @on_main_process
856
  def __init__(self, run_name: Optional[str] = None, live: Optional[Any] = None, **kwargs):
857
  from dvclive import Live
858
-
859
  super().__init__()
860
  self.live = live if live is not None else Live(**kwargs)
861
-
862
  @property
863
  def tracker(self):
864
  return self.live
865
-
866
  @on_main_process
867
  def store_init_configuration(self, values: dict):
868
  """
869
  Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
870
  hyperparameters in a yaml file for future use.
871
-
872
  Args:
873
  values (Dictionary `str` to `bool`, `str`, `float`, `int`, or a List or Dict of those types):
874
  Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
875
  `str`, `float`, or `int`.
876
  """
877
  self.live.log_params(values)
878
-
879
  @on_main_process
880
  def log(self, values: dict, step: Optional[int] = None, **kwargs):
881
  """
882
  Logs `values` to the current run.
883
-
884
  Args:
885
  values (Dictionary `str` to `str`, `float`, or `int`):
886
  Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.
@@ -890,7 +748,6 @@ class DVCLiveTracker(GeneralTracker):
890
  Additional key word arguments passed along to `dvclive.Live.log_metric()`.
891
  """
892
  from dvclive.plots import Metric
893
-
894
  if step is not None:
895
  self.live.step = step
896
  for k, v in values.items():
@@ -903,15 +760,12 @@ class DVCLiveTracker(GeneralTracker):
903
  "This invocation of DVCLive's Live.log_metric() "
904
  "is incorrect so we dropped this attribute."
905
  )
906
-
907
  @on_main_process
908
  def finish(self):
909
  """
910
  Closes `dvclive.Live()`.
911
  """
912
  self.live.end()
913
-
914
-
915
  LOGGER_TYPE_TO_CLASS = {
916
  "aim": AimTracker,
917
  "comet_ml": CometMLTracker,
@@ -921,8 +775,6 @@ LOGGER_TYPE_TO_CLASS = {
921
  "clearml": ClearMLTracker,
922
  "dvclive": DVCLiveTracker,
923
  }
924
-
925
-
926
  def filter_trackers(
927
  log_with: List[Union[str, LoggerType, GeneralTracker]],
928
  logging_dir: Union[str, os.PathLike] = None,
@@ -933,11 +785,9 @@ def filter_trackers(
933
  - Filters out repeats of tracker types
934
  - If `all` is in `log_with`, will return all trackers in the environment
935
  - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None`
936
-
937
  Args:
938
  log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):
939
  A list of loggers to be setup for experiment tracking. Should be one or several of:
940
-
941
  - `"all"`
942
  - `"tensorboard"`
943
  - `"wandb"`
@@ -974,5 +824,4 @@ def filter_trackers(
974
  loggers.append(log_type)
975
  else:
976
  logger.debug(f"Tried adding logger {log_type}, but package is unavailable in the system.")
977
-
978
  return loggers
 
 
 
1
  # Expectation:
2
  # Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}
 
3
  _available_trackers = []
 
4
  if is_tensorboard_available():
5
  _available_trackers.append(LoggerType.TENSORBOARD)
 
6
  if is_wandb_available():
7
  _available_trackers.append(LoggerType.WANDB)
 
8
  if is_comet_ml_available():
9
  _available_trackers.append(LoggerType.COMETML)
 
10
  if is_aim_available():
11
  _available_trackers.append(LoggerType.AIM)
 
12
  if is_mlflow_available():
13
  _available_trackers.append(LoggerType.MLFLOW)
 
14
  if is_clearml_available():
15
  _available_trackers.append(LoggerType.CLEARML)
 
16
  if is_dvclive_available():
17
  _available_trackers.append(LoggerType.DVCLIVE)
 
18
  logger = get_logger(__name__)
 
 
19
  def on_main_process(function):
20
  """
21
  Decorator to selectively run the decorated function on the main process only based on the `main_process_only`
22
  attribute in a class.
 
23
  Checks at function execution rather than initialization time, not triggering the initialization of the
24
  `PartialState`.
25
  """
 
29
  return PartialState().on_main_process(function)(self, *args, **kwargs)
30
  else:
31
  return function(self, *args, **kwargs)
 
32
  return execute_on_main_process
 
 
33
  def get_available_trackers():
34
  "Returns a list of all supported available trackers in the system"
35
  return _available_trackers
 
 
36
  class GeneralTracker:
37
  """
38
  A base Tracker class to be used for all logging integration implementations.
 
39
  Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to
40
  [`Accelerator`].
 
41
  Should implement `name`, `requires_logging_directory`, and `tracker` properties such that:
 
42
  `name` (`str`): String representation of the tracker class name, such as "TensorBoard" `requires_logging_directory`
43
  (`bool`): Whether the logger requires a directory to store their logs. `tracker` (`object`): Should return internal
44
  tracking mechanism used by a tracker class (such as the `run` for wandb)
 
45
  Implementations can also include a `main_process_only` (`bool`) attribute to toggle if relevent logging, init, and
46
  other functions should occur on the main process or across all processes (by default will use `True`)
47
  """
48
  main_process_only = True
 
49
  def __init__(self, _blank=False):
50
  if not _blank:
51
  err = ""
 
55
  if len(err) > 0:
56
  err += ", "
57
  err += "`requires_logging_directory`"
 
58
  # as tracker is a @property that relies on post-init
59
  if "tracker" not in dir(self):
60
  if len(err) > 0:
 
66
  f"required attributes. Please define them in the class definition: "
67
  f"{err}"
68
  )
 
69
  def store_init_configuration(self, values: dict):
70
  """
71
  Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration
72
  functionality of a tracking API.
 
73
  Args:
74
  values (Dictionary `str` to `bool`, `str`, `float` or `int`):
75
  Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
76
  `str`, `float`, `int`, or `None`.
77
  """
78
  pass
 
79
  def log(self, values: dict, step: Optional[int], **kwargs):
80
  """
81
  Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with
82
  special behavior for the `step parameter.
 
83
  Args:
84
  values (Dictionary `str` to `str`, `float`, or `int`):
85
  Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.
 
87
  The run step. If included, the log will be affiliated with this step.
88
  """
89
  pass
 
90
  def finish(self):
91
  """
92
  Should run any finalizing functions within the tracking API. If the API should not have one, just don't
93
  overwrite that method.
94
  """
95
  pass
 
 
96
  class TensorBoardTracker(GeneralTracker):
97
  """
98
  A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.
 
99
  Args:
100
  run_name (`str`):
101
  The name of the experiment run
 
106
  """
107
  name = "tensorboard"
108
  requires_logging_directory = True
 
109
  @on_main_process
110
  def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs):
111
  try:
 
120
  logger.debug(
121
  "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
122
  )
 
123
  @property
124
  def tracker(self):
125
  return self.writer
 
126
  @on_main_process
127
  def store_init_configuration(self, values: dict):
128
  """
129
  Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
130
  hyperparameters in a yaml file for future use.
 
131
  Args:
132
  values (Dictionary `str` to `bool`, `str`, `float` or `int`):
133
  Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
 
145
  logger.error("Serialization to store hyperparameters failed")
146
  raise
147
  logger.debug("Stored initial configuration hyperparameters to TensorBoard and hparams yaml file")
 
148
  @on_main_process
149
  def log(self, values: dict, step: Optional[int] = None, **kwargs):
150
  """
151
  Logs `values` to the current run.
 
152
  Args:
153
  values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
154
  Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
 
169
  self.writer.add_scalars(k, v, global_step=step, **kwargs)
170
  self.writer.flush()
171
  logger.debug("Successfully logged to TensorBoard")
 
172
  @on_main_process
173
  def log_images(self, values: dict, step: Optional[int], **kwargs):
174
  """
175
  Logs `images` to the current run.
 
176
  Args:
177
  values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):
178
  Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
 
184
  for k, v in values.items():
185
  self.writer.add_images(k, v, global_step=step, **kwargs)
186
  logger.debug("Successfully logged images to TensorBoard")
 
187
  @on_main_process
188
  def finish(self):
189
  """
 
191
  """
192
  self.writer.close()
193
  logger.debug("TensorBoard writer closed")
 
 
194
  class WandBTracker(GeneralTracker):
195
  """
196
  A `Tracker` class that supports `wandb`. Should be initialized at the start of your script.
 
197
  Args:
198
  run_name (`str`):
199
  The name of the experiment run.
 
203
  name = "wandb"
204
  requires_logging_directory = False
205
  main_process_only = False
 
206
  @on_main_process
207
  def __init__(self, run_name: str, **kwargs):
208
  super().__init__()
209
  self.run_name = run_name
 
210
  import wandb
 
211
  self.run = wandb.init(project=self.run_name, **kwargs)
212
  logger.debug(f"Initialized WandB project {self.run_name}")
213
  logger.debug(
214
  "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
215
  )
 
216
  @property
217
  def tracker(self):
218
  return self.run
 
219
  @on_main_process
220
  def store_init_configuration(self, values: dict):
221
  """
222
  Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
 
223
  Args:
224
  values (Dictionary `str` to `bool`, `str`, `float` or `int`):
225
  Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
226
  `str`, `float`, `int`, or `None`.
227
  """
228
  import wandb
 
229
  wandb.config.update(values, allow_val_change=True)
230
  logger.debug("Stored initial configuration hyperparameters to WandB")
 
231
  @on_main_process
232
  def log(self, values: dict, step: Optional[int] = None, **kwargs):
233
  """
234
  Logs `values` to the current run.
 
235
  Args:
236
  values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
237
  Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
 
243
  """
244
  self.run.log(values, step=step, **kwargs)
245
  logger.debug("Successfully logged to WandB")
 
246
  @on_main_process
247
  def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
248
  """
249
  Logs `images` to the current run.
 
250
  Args:
251
  values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):
252
  Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
 
256
  Additional key word arguments passed along to the `wandb.log` method.
257
  """
258
  import wandb
 
259
  for k, v in values.items():
260
  self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs)
261
  logger.debug("Successfully logged images to WandB")
 
262
  @on_main_process
263
  def log_table(
264
  self,
 
272
  """
273
  Log a Table containing any object type (text, image, audio, video, molecule, html, etc). Can be defined either
274
  with `columns` and `data` or with `dataframe`.
 
275
  Args:
276
  table_name (`str`):
277
  The name to give to the logged table on the wandb workspace
 
285
  The run step. If included, the log will be affiliated with this step.
286
  """
287
  import wandb
 
288
  values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)}
289
  self.log(values, step=step, **kwargs)
 
290
  @on_main_process
291
  def finish(self):
292
  """
 
294
  """
295
  self.run.finish()
296
  logger.debug("WandB run closed")
 
 
297
  class CometMLTracker(GeneralTracker):
298
  """
299
  A `Tracker` class that supports `comet_ml`. Should be initialized at the start of your script.
 
300
  API keys must be stored in a Comet config file.
 
301
  Args:
302
  run_name (`str`):
303
  The name of the experiment run.
 
306
  """
307
  name = "comet_ml"
308
  requires_logging_directory = False
 
309
  @on_main_process
310
  def __init__(self, run_name: str, **kwargs):
311
  super().__init__()
312
  self.run_name = run_name
 
313
  from comet_ml import Experiment
 
314
  self.writer = Experiment(project_name=run_name, **kwargs)
315
  logger.debug(f"Initialized CometML project {self.run_name}")
316
  logger.debug(
317
  "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
318
  )
 
319
  @property
320
  def tracker(self):
321
  return self.writer
 
322
  @on_main_process
323
  def store_init_configuration(self, values: dict):
324
  """
325
  Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
 
326
  Args:
327
  values (Dictionary `str` to `bool`, `str`, `float` or `int`):
328
  Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
 
330
  """
331
  self.writer.log_parameters(values)
332
  logger.debug("Stored initial configuration hyperparameters to CometML")
 
333
  @on_main_process
334
  def log(self, values: dict, step: Optional[int] = None, **kwargs):
335
  """
336
  Logs `values` to the current run.
 
337
  Args:
338
  values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
339
  Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
 
354
  elif isinstance(v, dict):
355
  self.writer.log_metrics(v, step=step, **kwargs)
356
  logger.debug("Successfully logged to CometML")
 
357
  @on_main_process
358
  def finish(self):
359
  """
 
361
  """
362
  self.writer.end()
363
  logger.debug("CometML run closed")
 
 
364
  class AimTracker(GeneralTracker):
365
  """
366
  A `Tracker` class that supports `aim`. Should be initialized at the start of your script.
 
367
  Args:
368
  run_name (`str`):
369
  The name of the experiment run.
 
372
  """
373
  name = "aim"
374
  requires_logging_directory = True
 
375
  @on_main_process
376
  def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs):
377
  self.run_name = run_name
 
378
  from aim import Run
 
379
  self.writer = Run(repo=logging_dir, **kwargs)
380
  self.writer.name = self.run_name
381
  logger.debug(f"Initialized Aim project {self.run_name}")
382
  logger.debug(
383
  "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
384
  )
 
385
  @property
386
  def tracker(self):
387
  return self.writer
 
388
  @on_main_process
389
  def store_init_configuration(self, values: dict):
390
  """
391
  Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
 
392
  Args:
393
  values (`dict`):
394
  Values to be stored as initial hyperparameters as key-value pairs.
395
  """
396
  self.writer["hparams"] = values
 
397
  @on_main_process
398
  def log(self, values: dict, step: Optional[int], **kwargs):
399
  """
400
  Logs `values` to the current run.
 
401
  Args:
402
  values (`dict`):
403
  Values to be logged as key-value pairs.
 
409
  # Note: replace this with the dictionary support when merged
410
  for key, value in values.items():
411
  self.writer.track(value, name=key, step=step, **kwargs)
 
412
  @on_main_process
413
  def log_images(self, values: dict, step: Optional[int] = None, kwargs: Optional[Dict[str, dict]] = None):
414
  """
415
  Logs `images` to the current run.
 
416
  Args:
417
  values (`Dict[str, Union[np.ndarray, PIL.Image, Tuple[np.ndarray, str], Tuple[PIL.Image, str]]]`):
418
  Values to be logged as key-value pairs. The values need to have type `np.ndarray` or PIL.Image. If a
 
424
  keys `aim_image` and `track`, respectively.
425
  """
426
  import aim
 
427
  aim_image_kw = {}
428
  track_kw = {}
 
429
  if kwargs is not None:
430
  aim_image_kw = kwargs.get("aim_image", {})
431
  track_kw = kwargs.get("track", {})
 
432
  for key, value in values.items():
433
  if isinstance(value, tuple):
434
  img, caption = value
 
436
  img, caption = value, ""
437
  aim_image = aim.Image(img, caption=caption, **aim_image_kw)
438
  self.writer.track(aim_image, name=key, step=step, **track_kw)
 
439
  @on_main_process
440
  def finish(self):
441
  """
442
  Closes `aim` writer
443
  """
444
  self.writer.close()
 
 
445
  class MLflowTracker(GeneralTracker):
446
  """
447
  A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.
 
448
  Args:
449
  experiment_name (`str`, *optional*):
450
  Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.
 
470
  """
471
  name = "mlflow"
472
  requires_logging_directory = False
 
473
  @on_main_process
474
  def __init__(
475
  self,
 
486
  tags = os.getenv("MLFLOW_TAGS", tags)
487
  if isinstance(tags, str):
488
  tags = json.loads(tags)
 
489
  nested_run = os.getenv("MLFLOW_NESTED_RUN", nested_run)
 
490
  import mlflow
 
491
  exps = mlflow.search_experiments(filter_string=f"name = '{experiment_name}'")
492
  if len(exps) > 0:
493
  if len(exps) > 1:
 
499
  artifact_location=logging_dir,
500
  tags=tags,
501
  )
 
502
  self.active_run = mlflow.start_run(
503
  run_id=run_id,
504
  experiment_id=experiment_id,
 
507
  tags=tags,
508
  description=description,
509
  )
 
510
  logger.debug(f"Initialized mlflow experiment {experiment_name}")
511
  logger.debug(
512
  "Make sure to log any initial configurations with `self.store_init_configuration` before training!"
513
  )
 
514
  @property
515
  def tracker(self):
516
  return self.active_run
 
517
  @on_main_process
518
  def store_init_configuration(self, values: dict):
519
  """
520
  Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
 
521
  Args:
522
  values (`dict`):
523
  Values to be stored as initial hyperparameters as key-value pairs.
524
  """
525
  import mlflow
 
526
  for name, value in list(values.items()):
527
  # internally, all values are converted to str in MLflow
528
  if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH:
 
531
  f" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute."
532
  )
533
  del values[name]
 
534
  values_list = list(values.items())
 
535
  # MLflow cannot log more than 100 values in one go, so we have to split it
536
  for i in range(0, len(values_list), mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH):
537
  mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH]))
 
538
  logger.debug("Stored initial configuration hyperparameters to MLflow")
 
539
  @on_main_process
540
  def log(self, values: dict, step: Optional[int]):
541
  """
542
  Logs `values` to the current run.
 
543
  Args:
544
  values (`dict`):
545
  Values to be logged as key-value pairs.
 
556
  "MLflow's log_metric() only accepts float and int types so we dropped this attribute."
557
  )
558
  import mlflow
 
559
  mlflow.log_metrics(metrics, step=step)
560
  logger.debug("Successfully logged to mlflow")
 
561
  @on_main_process
562
  def finish(self):
563
  """
564
  End the active MLflow run.
565
  """
566
  import mlflow
 
567
  mlflow.end_run()
 
 
568
  class ClearMLTracker(GeneralTracker):
569
  """
570
  A `Tracker` class that supports `clearml`. Should be initialized at the start of your script.
 
571
  Args:
572
  run_name (`str`, *optional*):
573
  Name of the experiment. Environment variables `CLEARML_PROJECT` and `CLEARML_TASK` have priority over this
 
577
  """
578
  name = "clearml"
579
  requires_logging_directory = False
 
580
  @on_main_process
581
  def __init__(self, run_name: str = None, **kwargs):
582
  from clearml import Task
 
583
  current_task = Task.current_task()
584
  self._initialized_externally = False
585
  if current_task:
586
  self._initialized_externally = True
587
  self.task = current_task
588
  return
 
589
  kwargs.setdefault("project_name", os.environ.get("CLEARML_PROJECT", run_name))
590
  kwargs.setdefault("task_name", os.environ.get("CLEARML_TASK", run_name))
591
  self.task = Task.init(**kwargs)
 
592
  @property
593
  def tracker(self):
594
  return self.task
 
595
  @on_main_process
596
  def store_init_configuration(self, values: dict):
597
  """
598
  Connect configuration dictionary to the Task object. Should be run at the beginning of your experiment.
 
599
  Args:
600
  values (`dict`):
601
  Values to be stored as initial hyperparameters as key-value pairs.
602
  """
603
  return self.task.connect_configuration(values)
 
604
  @on_main_process
605
  def log(self, values: Dict[str, Union[int, float]], step: Optional[int] = None, **kwargs):
606
  """
607
  Logs `values` dictionary to the current run. The dictionary keys must be strings. The dictionary values must be
608
  ints or floats
 
609
  Args:
610
  values (`Dict[str, Union[int, float]]`):
611
  Values to be logged as key-value pairs. If the key starts with 'eval_'/'test_'/'train_', the value will
 
633
  continue
634
  title, series = ClearMLTracker._get_title_series(k)
635
  clearml_logger.report_scalar(title=title, series=series, value=v, iteration=step, **kwargs)
 
636
  @on_main_process
637
  def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
638
  """
639
  Logs `images` to the current run.
 
640
  Args:
641
  values (`Dict[str, List[Union[np.ndarray, PIL.Image]]`):
642
  Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
 
649
  for k, v in values.items():
650
  title, series = ClearMLTracker._get_title_series(k)
651
  clearml_logger.report_image(title=title, series=series, iteration=step, image=v, **kwargs)
 
652
  @on_main_process
653
  def log_table(
654
  self,
 
661
  ):
662
  """
663
  Log a Table to the task. Can be defined eitherwith `columns` and `data` or with `dataframe`.
 
664
  Args:
665
  table_name (`str`):
666
  The name of the table
 
685
  to_report = [columns] + data if columns else data
686
  title, series = ClearMLTracker._get_title_series(table_name)
687
  self.task.get_logger().report_table(title=title, series=series, table_plot=to_report, iteration=step, **kwargs)
 
688
  @on_main_process
689
  def finish(self):
690
  """
 
693
  """
694
  if self.task and not self._initialized_externally:
695
  self.task.close()
 
696
  @staticmethod
697
  def _get_title_series(name):
698
  for prefix in ["eval", "test", "train"]:
699
  if name.startswith(prefix + "_"):
700
  return name[len(prefix) + 1 :], prefix
701
  return name, "train"
 
 
702
  class DVCLiveTracker(GeneralTracker):
703
  """
704
  A `Tracker` class that supports `dvclive`. Should be initialized at the start of your script.
 
705
  Args:
706
  run_name (`str`, *optional*):
707
  Ignored for dvclive. See `kwargs` instead.
708
  kwargs:
709
  Additional key word arguments passed along to [`dvclive.Live()`](https://dvc.org/doc/dvclive/live).
 
710
  Example:
 
711
  ```py
712
  from accelerate import Accelerator
 
713
  accelerator = Accelerator(log_with="dvclive")
714
  accelerator.init_trackers(project_name="my_project", init_kwargs={"dvclive": {"dir": "my_directory"}})
715
  ```
716
  """
717
  name = "dvclive"
718
  requires_logging_directory = False
 
719
  @on_main_process
720
  def __init__(self, run_name: Optional[str] = None, live: Optional[Any] = None, **kwargs):
721
  from dvclive import Live
 
722
  super().__init__()
723
  self.live = live if live is not None else Live(**kwargs)
 
724
  @property
725
  def tracker(self):
726
  return self.live
 
727
  @on_main_process
728
  def store_init_configuration(self, values: dict):
729
  """
730
  Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
731
  hyperparameters in a yaml file for future use.
 
732
  Args:
733
  values (Dictionary `str` to `bool`, `str`, `float`, `int`, or a List or Dict of those types):
734
  Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
735
  `str`, `float`, or `int`.
736
  """
737
  self.live.log_params(values)
 
738
  @on_main_process
739
  def log(self, values: dict, step: Optional[int] = None, **kwargs):
740
  """
741
  Logs `values` to the current run.
 
742
  Args:
743
  values (Dictionary `str` to `str`, `float`, or `int`):
744
  Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.
 
748
  Additional key word arguments passed along to `dvclive.Live.log_metric()`.
749
  """
750
  from dvclive.plots import Metric
 
751
  if step is not None:
752
  self.live.step = step
753
  for k, v in values.items():
 
760
  "This invocation of DVCLive's Live.log_metric() "
761
  "is incorrect so we dropped this attribute."
762
  )
 
763
  @on_main_process
764
  def finish(self):
765
  """
766
  Closes `dvclive.Live()`.
767
  """
768
  self.live.end()
 
 
769
  LOGGER_TYPE_TO_CLASS = {
770
  "aim": AimTracker,
771
  "comet_ml": CometMLTracker,
 
775
  "clearml": ClearMLTracker,
776
  "dvclive": DVCLiveTracker,
777
  }
 
 
778
  def filter_trackers(
779
  log_with: List[Union[str, LoggerType, GeneralTracker]],
780
  logging_dir: Union[str, os.PathLike] = None,
 
785
  - Filters out repeats of tracker types
786
  - If `all` is in `log_with`, will return all trackers in the environment
787
  - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None`
 
788
  Args:
789
  log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):
790
  A list of loggers to be setup for experiment tracking. Should be one or several of:
 
791
  - `"all"`
792
  - `"tensorboard"`
793
  - `"wandb"`
 
824
  loggers.append(log_type)
825
  else:
826
  logger.debug(f"Tried adding logger {log_type}, but package is unavailable in the system.")
 
827
  return loggers
src/utils/bnb.py CHANGED
@@ -1,6 +1,4 @@
1
  logger = logging.getLogger(__name__)
2
-
3
-
4
  def load_and_quantize_model(
5
  model: torch.nn.Module,
6
  bnb_quantization_config: BnbQuantizationConfig,
@@ -15,7 +13,6 @@ def load_and_quantize_model(
15
  This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the
16
  model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the
17
  model is already loaded, we will quantize the model and put the model on the GPU,
18
-
19
  Args:
20
  model (`torch.nn.Module`):
21
  Input model. The model can be already loaded or on the meta device
@@ -40,13 +37,11 @@ def load_and_quantize_model(
40
  offload_state_dict (`bool`, *optional*, defaults to `False`):
41
  If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
42
  the weight of the CPU state dict + the biggest shard does not fit.
43
-
44
  Returns:
45
  `torch.nn.Module`: The quantized model
46
  """
47
  load_in_4bit = bnb_quantization_config.load_in_4bit
48
  load_in_8bit = bnb_quantization_config.load_in_8bit
49
-
50
  if load_in_8bit and not is_8bit_bnb_available():
51
  raise ImportError(
52
  "You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
@@ -57,31 +52,25 @@ def load_and_quantize_model(
57
  "You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
58
  "make sure you have the latest version of `bitsandbytes` installed."
59
  )
60
-
61
  modules_on_cpu = []
62
  # custom device map
63
  if isinstance(device_map, dict) and len(device_map.keys()) > 1:
64
  modules_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
65
-
66
  # We keep some modules such as the lm_head in their original dtype for numerical stability reasons
67
  if bnb_quantization_config.skip_modules is None:
68
  bnb_quantization_config.skip_modules = get_keys_to_not_convert(model)
69
-
70
  # add cpu modules to skip modules only for 4-bit modules
71
  if load_in_4bit:
72
  bnb_quantization_config.skip_modules.extend(modules_on_cpu)
73
  modules_to_not_convert = bnb_quantization_config.skip_modules
74
-
75
  # We add the modules we want to keep in full precision
76
  if bnb_quantization_config.keep_in_fp32_modules is None:
77
  bnb_quantization_config.keep_in_fp32_modules = []
78
  keep_in_fp32_modules = bnb_quantization_config.keep_in_fp32_modules
79
  modules_to_not_convert.extend(keep_in_fp32_modules)
80
-
81
  # compatibility with peft
82
  model.is_loaded_in_4bit = load_in_4bit
83
  model.is_loaded_in_8bit = load_in_8bit
84
-
85
  model_device = get_parameter_device(model)
86
  if model_device.type != "meta":
87
  # quantization of an already loaded model
@@ -115,18 +104,15 @@ def load_and_quantize_model(
115
  "We move the model to cuda."
116
  )
117
  return model
118
-
119
  elif weights_location is None:
120
  raise RuntimeError(
121
  f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} "
122
  )
123
-
124
  else:
125
  with init_empty_weights():
126
  model = replace_with_bnb_layers(
127
  model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert
128
  )
129
-
130
  device_map = get_quantized_model_device_map(
131
  model,
132
  bnb_quantization_config,
@@ -136,9 +122,7 @@ def load_and_quantize_model(
136
  )
137
  if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
138
  offload_state_dict = True
139
-
140
  offload = any(x in list(device_map.values()) for x in ["cpu", "disk"])
141
-
142
  load_checkpoint_in_model(
143
  model,
144
  weights_location,
@@ -150,8 +134,6 @@ def load_and_quantize_model(
150
  offload_8bit_bnb=load_in_8bit and offload,
151
  )
152
  return dispatch_model(model, device_map=device_map, offload_dir=offload_folder)
153
-
154
-
155
  def get_quantized_model_device_map(
156
  model, bnb_quantization_config, device_map=None, max_memory=None, no_split_module_classes=None
157
  ):
@@ -161,14 +143,12 @@ def get_quantized_model_device_map(
161
  else:
162
  raise RuntimeError("No GPU found. A GPU is needed for quantization.")
163
  logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
164
-
165
  if isinstance(device_map, str):
166
  if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
167
  raise ValueError(
168
  "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
169
  "'sequential'."
170
  )
171
-
172
  special_dtypes = {}
173
  special_dtypes.update(
174
  {
@@ -184,12 +164,10 @@ def get_quantized_model_device_map(
184
  if any(m in name for m in bnb_quantization_config.keep_in_fp32_modules)
185
  }
186
  )
187
-
188
  kwargs = {}
189
  kwargs["special_dtypes"] = special_dtypes
190
  kwargs["no_split_module_classes"] = no_split_module_classes
191
  kwargs["dtype"] = bnb_quantization_config.target_dtype
192
-
193
  # get max_memory for each device.
194
  if device_map != "sequential":
195
  max_memory = get_balanced_memory(
@@ -198,14 +176,11 @@ def get_quantized_model_device_map(
198
  max_memory=max_memory,
199
  **kwargs,
200
  )
201
-
202
  kwargs["max_memory"] = max_memory
203
  device_map = infer_auto_device_map(model, **kwargs)
204
-
205
  if isinstance(device_map, dict):
206
  # check if don't have any quantized module on the cpu
207
  modules_not_to_convert = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fp32_modules
208
-
209
  device_map_without_some_modules = {
210
  key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
211
  }
@@ -228,13 +203,10 @@ def get_quantized_model_device_map(
228
  )
229
  del device_map_without_some_modules
230
  return device_map
231
-
232
-
233
  def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None):
234
  """
235
  A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules or by `bnb.nn.Linear4bit`
236
  modules from the `bitsandbytes`library. The function will be run recursively and replace `torch.nn.Linear` modules.
237
-
238
  Parameters:
239
  model (`torch.nn.Module`):
240
  Input model or `torch.nn.Module` as the function is run recursively.
@@ -247,7 +219,6 @@ def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_conve
247
  """
248
  if modules_to_not_convert is None:
249
  modules_to_not_convert = []
250
-
251
  model, has_been_replaced = _replace_with_bnb_layers(
252
  model, bnb_quantization_config, modules_to_not_convert, current_key_name
253
  )
@@ -259,8 +230,6 @@ def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_conve
259
  " a bug."
260
  )
261
  return model
262
-
263
-
264
  def _replace_with_bnb_layers(
265
  model,
266
  bnb_quantization_config,
@@ -269,12 +238,10 @@ def _replace_with_bnb_layers(
269
  ):
270
  """
271
  Private method that wraps the recursion for module replacement.
272
-
273
  Returns the converted model and a boolean that indicates if the conversion has been successfull or not.
274
  """
275
  # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily
276
  import bitsandbytes as bnb
277
-
278
  has_been_replaced = False
279
  for name, module in model.named_children():
280
  if current_key_name is None:
@@ -325,15 +292,12 @@ def _replace_with_bnb_layers(
325
  # Remove the last key for recursion
326
  current_key_name.pop(-1)
327
  return model, has_been_replaced
328
-
329
-
330
  def get_keys_to_not_convert(model):
331
  r"""
332
  An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules
333
  we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want
334
  to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in
335
  int8.
336
-
337
  Parameters:
338
  model (`torch.nn.Module`):
339
  Input model
@@ -341,7 +305,6 @@ def get_keys_to_not_convert(model):
341
  # Create a copy of the model
342
  with init_empty_weights():
343
  tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager`
344
-
345
  tied_params = find_tied_parameters(tied_model)
346
  # For compatibility with Accelerate < 0.18
347
  if isinstance(tied_params, dict):
@@ -349,24 +312,19 @@ def get_keys_to_not_convert(model):
349
  else:
350
  tied_keys = sum(tied_params, [])
351
  has_tied_params = len(tied_keys) > 0
352
-
353
  # Check if it is a base model
354
  is_base_model = False
355
  if hasattr(model, "base_model_prefix"):
356
  is_base_model = not hasattr(model, model.base_model_prefix)
357
-
358
  # Ignore this for base models (BertModel, GPT2Model, etc.)
359
  if (not has_tied_params) and is_base_model:
360
  return []
361
-
362
  # otherwise they have an attached head
363
  list_modules = list(model.named_children())
364
  list_last_module = [list_modules[-1][0]]
365
-
366
  # add last module together with tied weights
367
  intersection = set(list_last_module) - set(tied_keys)
368
  list_untouched = list(set(tied_keys)) + list(intersection)
369
-
370
  # remove ".weight" from the keys
371
  names_to_remove = [".weight", ".bias"]
372
  filtered_module_names = []
@@ -375,25 +333,17 @@ def get_keys_to_not_convert(model):
375
  if name_to_remove in name:
376
  name = name.replace(name_to_remove, "")
377
  filtered_module_names.append(name)
378
-
379
  return filtered_module_names
380
-
381
-
382
  def has_4bit_bnb_layers(model):
383
  """Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model"""
384
  # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily
385
  import bitsandbytes as bnb
386
-
387
  for m in model.modules():
388
  if isinstance(m, bnb.nn.Linear4bit):
389
  return True
390
  return False
391
-
392
-
393
  def get_parameter_device(parameter: nn.Module):
394
  return next(parameter.parameters()).device
395
-
396
-
397
  def quantize_and_offload_8bit(model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics):
398
  # if it is not quantized, we quantize and offload the quantized weights and the SCB stats
399
  if fp16_statistics is None:
@@ -421,5 +371,4 @@ def quantize_and_offload_8bit(model, param, param_name, new_dtype, offload_folde
421
  else:
422
  offload_weight(param, param_name, offload_folder, index=offload_index)
423
  offload_weight(fp16_statistics, param_name.replace("weight", "SCB"), offload_folder, index=offload_index)
424
-
425
  set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype, value=torch.empty(*param.size()))
 
1
  logger = logging.getLogger(__name__)
 
 
2
  def load_and_quantize_model(
3
  model: torch.nn.Module,
4
  bnb_quantization_config: BnbQuantizationConfig,
 
13
  This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the
14
  model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the
15
  model is already loaded, we will quantize the model and put the model on the GPU,
 
16
  Args:
17
  model (`torch.nn.Module`):
18
  Input model. The model can be already loaded or on the meta device
 
37
  offload_state_dict (`bool`, *optional*, defaults to `False`):
38
  If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
39
  the weight of the CPU state dict + the biggest shard does not fit.
 
40
  Returns:
41
  `torch.nn.Module`: The quantized model
42
  """
43
  load_in_4bit = bnb_quantization_config.load_in_4bit
44
  load_in_8bit = bnb_quantization_config.load_in_8bit
 
45
  if load_in_8bit and not is_8bit_bnb_available():
46
  raise ImportError(
47
  "You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
 
52
  "You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
53
  "make sure you have the latest version of `bitsandbytes` installed."
54
  )
 
55
  modules_on_cpu = []
56
  # custom device map
57
  if isinstance(device_map, dict) and len(device_map.keys()) > 1:
58
  modules_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
 
59
  # We keep some modules such as the lm_head in their original dtype for numerical stability reasons
60
  if bnb_quantization_config.skip_modules is None:
61
  bnb_quantization_config.skip_modules = get_keys_to_not_convert(model)
 
62
  # add cpu modules to skip modules only for 4-bit modules
63
  if load_in_4bit:
64
  bnb_quantization_config.skip_modules.extend(modules_on_cpu)
65
  modules_to_not_convert = bnb_quantization_config.skip_modules
 
66
  # We add the modules we want to keep in full precision
67
  if bnb_quantization_config.keep_in_fp32_modules is None:
68
  bnb_quantization_config.keep_in_fp32_modules = []
69
  keep_in_fp32_modules = bnb_quantization_config.keep_in_fp32_modules
70
  modules_to_not_convert.extend(keep_in_fp32_modules)
 
71
  # compatibility with peft
72
  model.is_loaded_in_4bit = load_in_4bit
73
  model.is_loaded_in_8bit = load_in_8bit
 
74
  model_device = get_parameter_device(model)
75
  if model_device.type != "meta":
76
  # quantization of an already loaded model
 
104
  "We move the model to cuda."
105
  )
106
  return model
 
107
  elif weights_location is None:
108
  raise RuntimeError(
109
  f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} "
110
  )
 
111
  else:
112
  with init_empty_weights():
113
  model = replace_with_bnb_layers(
114
  model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert
115
  )
 
116
  device_map = get_quantized_model_device_map(
117
  model,
118
  bnb_quantization_config,
 
122
  )
123
  if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
124
  offload_state_dict = True
 
125
  offload = any(x in list(device_map.values()) for x in ["cpu", "disk"])
 
126
  load_checkpoint_in_model(
127
  model,
128
  weights_location,
 
134
  offload_8bit_bnb=load_in_8bit and offload,
135
  )
136
  return dispatch_model(model, device_map=device_map, offload_dir=offload_folder)
 
 
137
  def get_quantized_model_device_map(
138
  model, bnb_quantization_config, device_map=None, max_memory=None, no_split_module_classes=None
139
  ):
 
143
  else:
144
  raise RuntimeError("No GPU found. A GPU is needed for quantization.")
145
  logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
 
146
  if isinstance(device_map, str):
147
  if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
148
  raise ValueError(
149
  "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
150
  "'sequential'."
151
  )
 
152
  special_dtypes = {}
153
  special_dtypes.update(
154
  {
 
164
  if any(m in name for m in bnb_quantization_config.keep_in_fp32_modules)
165
  }
166
  )
 
167
  kwargs = {}
168
  kwargs["special_dtypes"] = special_dtypes
169
  kwargs["no_split_module_classes"] = no_split_module_classes
170
  kwargs["dtype"] = bnb_quantization_config.target_dtype
 
171
  # get max_memory for each device.
172
  if device_map != "sequential":
173
  max_memory = get_balanced_memory(
 
176
  max_memory=max_memory,
177
  **kwargs,
178
  )
 
179
  kwargs["max_memory"] = max_memory
180
  device_map = infer_auto_device_map(model, **kwargs)
 
181
  if isinstance(device_map, dict):
182
  # check if don't have any quantized module on the cpu
183
  modules_not_to_convert = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fp32_modules
 
184
  device_map_without_some_modules = {
185
  key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
186
  }
 
203
  )
204
  del device_map_without_some_modules
205
  return device_map
 
 
206
  def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None):
207
  """
208
  A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules or by `bnb.nn.Linear4bit`
209
  modules from the `bitsandbytes`library. The function will be run recursively and replace `torch.nn.Linear` modules.
 
210
  Parameters:
211
  model (`torch.nn.Module`):
212
  Input model or `torch.nn.Module` as the function is run recursively.
 
219
  """
220
  if modules_to_not_convert is None:
221
  modules_to_not_convert = []
 
222
  model, has_been_replaced = _replace_with_bnb_layers(
223
  model, bnb_quantization_config, modules_to_not_convert, current_key_name
224
  )
 
230
  " a bug."
231
  )
232
  return model
 
 
233
  def _replace_with_bnb_layers(
234
  model,
235
  bnb_quantization_config,
 
238
  ):
239
  """
240
  Private method that wraps the recursion for module replacement.
 
241
  Returns the converted model and a boolean that indicates if the conversion has been successfull or not.
242
  """
243
  # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily
244
  import bitsandbytes as bnb
 
245
  has_been_replaced = False
246
  for name, module in model.named_children():
247
  if current_key_name is None:
 
292
  # Remove the last key for recursion
293
  current_key_name.pop(-1)
294
  return model, has_been_replaced
 
 
295
  def get_keys_to_not_convert(model):
296
  r"""
297
  An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules
298
  we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want
299
  to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in
300
  int8.
 
301
  Parameters:
302
  model (`torch.nn.Module`):
303
  Input model
 
305
  # Create a copy of the model
306
  with init_empty_weights():
307
  tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager`
 
308
  tied_params = find_tied_parameters(tied_model)
309
  # For compatibility with Accelerate < 0.18
310
  if isinstance(tied_params, dict):
 
312
  else:
313
  tied_keys = sum(tied_params, [])
314
  has_tied_params = len(tied_keys) > 0
 
315
  # Check if it is a base model
316
  is_base_model = False
317
  if hasattr(model, "base_model_prefix"):
318
  is_base_model = not hasattr(model, model.base_model_prefix)
 
319
  # Ignore this for base models (BertModel, GPT2Model, etc.)
320
  if (not has_tied_params) and is_base_model:
321
  return []
 
322
  # otherwise they have an attached head
323
  list_modules = list(model.named_children())
324
  list_last_module = [list_modules[-1][0]]
 
325
  # add last module together with tied weights
326
  intersection = set(list_last_module) - set(tied_keys)
327
  list_untouched = list(set(tied_keys)) + list(intersection)
 
328
  # remove ".weight" from the keys
329
  names_to_remove = [".weight", ".bias"]
330
  filtered_module_names = []
 
333
  if name_to_remove in name:
334
  name = name.replace(name_to_remove, "")
335
  filtered_module_names.append(name)
 
336
  return filtered_module_names
 
 
337
  def has_4bit_bnb_layers(model):
338
  """Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model"""
339
  # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily
340
  import bitsandbytes as bnb
 
341
  for m in model.modules():
342
  if isinstance(m, bnb.nn.Linear4bit):
343
  return True
344
  return False
 
 
345
  def get_parameter_device(parameter: nn.Module):
346
  return next(parameter.parameters()).device
 
 
347
  def quantize_and_offload_8bit(model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics):
348
  # if it is not quantized, we quantize and offload the quantized weights and the SCB stats
349
  if fp16_statistics is None:
 
371
  else:
372
  offload_weight(param, param_name, offload_folder, index=offload_index)
373
  offload_weight(fp16_statistics, param_name.replace("weight", "SCB"), offload_folder, index=offload_index)
 
374
  set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype, value=torch.empty(*param.size()))
src/utils/constants.py CHANGED
@@ -21,9 +21,7 @@ FSDP_PYTORCH_VERSION = "2.1.0"
21
  FSDP_MODEL_NAME = "pytorch_model_fsdp"
22
  DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich", "mpich"]
23
  TORCH_DYNAMO_MODES = ["default", "reduce-overhead", "max-autotune"]
24
-
25
  STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
26
-
27
  # These are the args for `torch.distributed.launch` for pytorch < 1.9
28
  TORCH_LAUNCH_PARAMS = [
29
  "nnodes",
@@ -50,6 +48,5 @@ TORCH_LAUNCH_PARAMS = [
50
  "master_addr",
51
  "master_port",
52
  ]
53
-
54
  CUDA_DISTRIBUTED_TYPES = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
55
  TORCH_DISTRIBUTED_OPERATION_TYPES = CUDA_DISTRIBUTED_TYPES + ["MULTI_NPU", "MULTI_XPU", "MULTI_CPU"]
 
21
  FSDP_MODEL_NAME = "pytorch_model_fsdp"
22
  DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich", "mpich"]
23
  TORCH_DYNAMO_MODES = ["default", "reduce-overhead", "max-autotune"]
 
24
  STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
 
25
  # These are the args for `torch.distributed.launch` for pytorch < 1.9
26
  TORCH_LAUNCH_PARAMS = [
27
  "nnodes",
 
48
  "master_addr",
49
  "master_port",
50
  ]
 
51
  CUDA_DISTRIBUTED_TYPES = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
52
  TORCH_DISTRIBUTED_OPERATION_TYPES = CUDA_DISTRIBUTED_TYPES + ["MULTI_NPU", "MULTI_XPU", "MULTI_CPU"]
src/utils/dataclasses.py CHANGED
@@ -7,41 +7,32 @@ class KwargsHandler:
7
  """
8
  def to_dict(self):
9
  return copy.deepcopy(self.__dict__)
10
-
11
  def to_kwargs(self):
12
  """
13
  Returns a dictionary containing the attributes with values different from the default of this class.
14
  """
15
  # import clear_environment here to avoid circular import problem
16
  from .other import clear_environment
17
-
18
  with clear_environment():
19
  default_dict = self.__class__().to_dict()
20
  this_dict = self.to_dict()
21
  return {k: v for k, v in this_dict.items() if default_dict[k] != v}
22
-
23
-
24
  @dataclass
25
  class AutocastKwargs(KwargsHandler):
26
  """
27
  Use this object in your [`Accelerator`] to customize how `torch.autocast` behaves. Please refer to the
28
  documentation of this [context manager](https://pytorch.org/docs/stable/amp.html#torch.autocast) for more
29
  information on each argument.
30
-
31
  Example:
32
-
33
  ```python
34
  from accelerate import Accelerator
35
  from accelerate.utils import AutocastKwargs
36
-
37
  kwargs = AutocastKwargs(cache_enabled=True)
38
  accelerator = Accelerator(kwargs_handlers=[kwargs])
39
  ```
40
  """
41
  enabled: bool = True
42
  cache_enabled: bool = None
43
-
44
-
45
  @dataclass
46
  class DistributedDataParallelKwargs(KwargsHandler):
47
  """
@@ -49,21 +40,14 @@ class DistributedDataParallelKwargs(KwargsHandler):
49
  `torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this
50
  [wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more
51
  information on each argument.
52
-
53
  <Tip warning={true}>
54
-
55
  `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.
56
-
57
  `static_graph` is only available in PyTorch 1.11.0 and later versions.
58
-
59
  </Tip>
60
-
61
  Example:
62
-
63
  ```python
64
  from accelerate import Accelerator
65
  from accelerate.utils import DistributedDataParallelKwargs
66
-
67
  kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
68
  accelerator = Accelerator(kwargs_handlers=[kwargs])
69
  ```
@@ -75,27 +59,19 @@ class DistributedDataParallelKwargs(KwargsHandler):
75
  check_reduction: bool = False
76
  gradient_as_bucket_view: bool = False
77
  static_graph: bool = False
78
-
79
-
80
  @dataclass
81
  class GradScalerKwargs(KwargsHandler):
82
  """
83
  Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the
84
  `torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this
85
  [scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument.
86
-
87
  <Tip warning={true}>
88
-
89
  `GradScaler` is only available in PyTorch 1.5.0 and later versions.
90
-
91
  </Tip>
92
-
93
  Example:
94
-
95
  ```python
96
  from accelerate import Accelerator
97
  from accelerate.utils import GradScalerKwargs
98
-
99
  kwargs = GradScalerKwargs(backoff_filter=0.25)
100
  accelerator = Accelerator(kwargs_handlers=[kwargs])
101
  ```
@@ -105,8 +81,6 @@ class GradScalerKwargs(KwargsHandler):
105
  backoff_factor: float = 0.5
106
  growth_interval: int = 2000
107
  enabled: bool = True
108
-
109
-
110
  @dataclass
111
  class InitProcessGroupKwargs(KwargsHandler):
112
  """
@@ -114,12 +88,10 @@ class InitProcessGroupKwargs(KwargsHandler):
114
  to the documentation of this
115
  [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more
116
  information on each argument.
117
-
118
  ```python
119
  from datetime import timedelta
120
  from accelerate import Accelerator
121
  from accelerate.utils import InitProcessGroupKwargs
122
-
123
  kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800))
124
  accelerator = Accelerator(kwargs_handlers=[kwargs])
125
  ```
@@ -127,45 +99,32 @@ class InitProcessGroupKwargs(KwargsHandler):
127
  backend: Optional[str] = "nccl"
128
  init_method: Optional[str] = None
129
  timeout: timedelta = timedelta(seconds=1800)
130
-
131
-
132
  # Literals
133
  Backend = Literal["msamp", "te"]
134
  OptLevel = Literal["O1", "O2"]
135
  FP8Format = Literal["E4M3", "HYBRID"]
136
  AmaxComputeAlgorithm = Literal["max", "most_recent"]
137
-
138
-
139
  @dataclass
140
  class FP8RecipeKwargs(KwargsHandler):
141
  """
142
  Use this object in your [`Accelerator`] to customize the initialization of the recipe for FP8 mixed precision
143
  training with `transformer-engine` or `ms-amp`.
144
-
145
  <Tip>
146
-
147
  For more information on `transformer-engine` args, please refer to the API
148
  [documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html).
149
-
150
  For more information on the `ms-amp` args, please refer to the Optimization Level
151
  [documentation](https://azure.github.io/MS-AMP/docs/user-tutorial/optimization-level).
152
-
153
  </Tip>
154
-
155
  ```python
156
  from accelerate import Accelerator
157
  from accelerate.utils import FP8RecipeKwargs
158
-
159
  kwargs = FP8RecipeKwargs(backend="te", fp8_format="HYBRID")
160
  accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[kwargs])
161
  ```
162
-
163
  To use MS-AMP as an engine, pass `backend="msamp"` and the `optimization_level`:
164
-
165
  ```python
166
  kwargs = FP8RecipeKwargs(backend="msamp", optimization_level="02")
167
  ```
168
-
169
  Args:
170
  backend (`str`, *optional*, defaults to "msamp"):
171
  Which FP8 engine to use. Must be one of `"msamp"` (MS-AMP) or `"te"` (TransformerEngine).
@@ -200,7 +159,6 @@ class FP8RecipeKwargs(KwargsHandler):
200
  amax_history_len: int = 1
201
  amax_compute_algo: AmaxComputeAlgorithm = "most_recent"
202
  override_linear_precision: Tuple[bool, bool, bool] = (False, False, False)
203
-
204
  def __post_init__(self):
205
  self.backend = self.backend.upper()
206
  if self.backend not in get_args(Backend):
@@ -215,37 +173,26 @@ class FP8RecipeKwargs(KwargsHandler):
215
  elif self.backend == "MSAMP":
216
  if self.opt_level not in get_args(OptLevel):
217
  raise ValueError(f"`optimization_level` must be one of {' or '.join(get_args(OptLevel))}")
218
-
219
-
220
  class EnumWithContains(enum.EnumMeta):
221
  "A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
222
-
223
  def __contains__(cls, item):
224
  try:
225
  cls(item)
226
  except ValueError:
227
  return False
228
  return True
229
-
230
-
231
  class BaseEnum(enum.Enum, metaclass=EnumWithContains):
232
  "An enum class that can get the value of an item with `str(Enum.key)`"
233
-
234
  def __str__(self):
235
  return self.value
236
-
237
  @classmethod
238
  def list(cls):
239
  "Method to list all the possible items in `cls`"
240
  return list(map(str, cls))
241
-
242
-
243
  class DistributedType(str, enum.Enum):
244
  """
245
  Represents a type of distributed environment.
246
-
247
  Values:
248
-
249
  - **NO** -- Not a distributed environment, just a single process.
250
  - **MULTI_CPU** -- Distributed on multiple CPU nodes.
251
  - **MULTI_GPU** -- Distributed on multiple GPUs.
@@ -264,14 +211,10 @@ class DistributedType(str, enum.Enum):
264
  FSDP = "FSDP"
265
  TPU = "TPU"
266
  MEGATRON_LM = "MEGATRON_LM"
267
-
268
-
269
  class SageMakerDistributedType(str, enum.Enum):
270
  """
271
  Represents a type of distributed environment.
272
-
273
  Values:
274
-
275
  - **NO** -- Not a distributed environment, just a single process.
276
  - **DATA_PARALLEL** -- using sagemaker distributed data parallelism.
277
  - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.
@@ -280,28 +223,20 @@ class SageMakerDistributedType(str, enum.Enum):
280
  NO = "NO"
281
  DATA_PARALLEL = "DATA_PARALLEL"
282
  MODEL_PARALLEL = "MODEL_PARALLEL"
283
-
284
-
285
  class ComputeEnvironment(str, enum.Enum):
286
  """
287
  Represents a type of the compute environment.
288
-
289
  Values:
290
-
291
  - **LOCAL_MACHINE** -- private/custom cluster hardware.
292
  - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.
293
  """
294
  # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.
295
  LOCAL_MACHINE = "LOCAL_MACHINE"
296
  AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER"
297
-
298
-
299
  class DynamoBackend(str, BaseEnum):
300
  """
301
  Represents a dynamo backend (see https://github.com/pytorch/torchdynamo).
302
-
303
  Values:
304
-
305
  - **NO** -- Do not use torch dynamo.
306
  - **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo
307
  issues.
@@ -325,7 +260,6 @@ class DynamoBackend(str, BaseEnum):
325
  - **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read
326
  more](https://github.com/intel/intel-extension-for-pytorch).
327
  - **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/)
328
-
329
  """
330
  # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
331
  NO = "NO"
@@ -341,13 +275,9 @@ class DynamoBackend(str, BaseEnum):
341
  TENSORRT = "TENSORRT"
342
  IPEX = "IPEX"
343
  TVM = "TVM"
344
-
345
-
346
  class LoggerType(BaseEnum):
347
  """Represents a type of supported experiment tracker
348
-
349
  Values:
350
-
351
  - **ALL** -- all available trackers in the environment that are supported
352
  - **TENSORBOARD** -- TensorBoard as an experiment tracker
353
  - **WANDB** -- wandb as an experiment tracker
@@ -362,13 +292,9 @@ class LoggerType(BaseEnum):
362
  MLFLOW = "mlflow"
363
  CLEARML = "clearml"
364
  DVCLIVE = "dvclive"
365
-
366
-
367
  class PrecisionType(BaseEnum):
368
  """Represents a type of precision used on floating point values
369
-
370
  Values:
371
-
372
  - **NO** -- using full precision (FP32)
373
  - **FP16** -- using half precision
374
  - **BF16** -- using brain floating point precision
@@ -377,8 +303,6 @@ class PrecisionType(BaseEnum):
377
  FP8 = "fp8"
378
  FP16 = "fp16"
379
  BF16 = "bf16"
380
-
381
-
382
  class RNGType(BaseEnum):
383
  TORCH = "torch"
384
  CUDA = "cuda"
@@ -386,25 +310,17 @@ class RNGType(BaseEnum):
386
  XLA = "xla"
387
  XPU = "xpu"
388
  GENERATOR = "generator"
389
-
390
-
391
  class CustomDtype(enum.Enum):
392
  r"""
393
  An enum that contains multiple custom dtypes that can be used for `infer_auto_device_map`.
394
  """
395
  FP8 = "fp8"
396
  INT4 = "int4"
397
-
398
-
399
  # data classes
400
-
401
-
402
  @dataclass
403
  class TensorInformation:
404
  shape: torch.Size
405
  dtype: torch.dtype
406
-
407
-
408
  @dataclass
409
  class ProjectConfiguration:
410
  """
@@ -421,17 +337,14 @@ class ProjectConfiguration:
421
  default=False,
422
  metadata={"help": "Whether saved states should be automatically iteratively named."},
423
  )
424
-
425
  total_limit: int = field(
426
  default=None,
427
  metadata={"help": "The maximum number of total saved states to keep."},
428
  )
429
-
430
  iteration: int = field(
431
  default=0,
432
  metadata={"help": "The current save iteration."},
433
  )
434
-
435
  save_on_each_node: bool = field(
436
  default=False,
437
  metadata={
@@ -441,17 +354,13 @@ class ProjectConfiguration:
441
  )
442
  },
443
  )
444
-
445
  def set_directories(self, project_dir: str = None):
446
  "Sets `self.project_dir` and `self.logging_dir` to the appropriate values."
447
  self.project_dir = project_dir
448
  if self.logging_dir is None:
449
  self.logging_dir = project_dir
450
-
451
  def __post_init__(self):
452
  self.set_directories(self.project_dir)
453
-
454
-
455
  @dataclass
456
  class GradientAccumulationPlugin(KwargsHandler):
457
  """
@@ -470,8 +379,6 @@ class GradientAccumulationPlugin(KwargsHandler):
470
  "help": "Whether to synchronize setting the gradients when at the end of the dataloader. Should only be set to `False` if you know what you're doing."
471
  },
472
  )
473
-
474
-
475
  @dataclass
476
  class TorchDynamoPlugin(KwargsHandler):
477
  """
@@ -488,7 +395,6 @@ class TorchDynamoPlugin(KwargsHandler):
488
  dynamic: bool = field(default=None, metadata={"help": "Whether to use dynamic shape for tracing"})
489
  options: Any = field(default=None, metadata={"help": "A dictionary of options to pass to the backend."})
490
  disable: bool = field(default=False, metadata={"help": "Turn torch.compile() into a no-op for testing"})
491
-
492
  def __post_init__(self):
493
  prefix = "ACCELERATE_DYNAMO_"
494
  if self.backend is None:
@@ -500,13 +406,10 @@ class TorchDynamoPlugin(KwargsHandler):
500
  self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
501
  if self.dynamic is None:
502
  self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
503
-
504
  def to_dict(self):
505
  dynamo_config = copy.deepcopy(self.__dict__)
506
  dynamo_config["backend"] = dynamo_config["backend"].value.lower()
507
  return dynamo_config
508
-
509
-
510
  @dataclass
511
  class DeepSpeedPlugin:
512
  """
@@ -560,41 +463,31 @@ class DeepSpeedPlugin:
560
  default=None,
561
  metadata={"help": "Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3."},
562
  )
563
-
564
  def __post_init__(self):
565
  from .deepspeed import HfDeepSpeedConfig
566
-
567
  if self.gradient_accumulation_steps is None:
568
  gas = os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "auto")
569
  self.gradient_accumulation_steps = int(gas) if gas.isdigit() else gas
570
-
571
  if self.gradient_clipping is None:
572
  gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "none")
573
  if gradient_clipping != "none":
574
  self.gradient_clipping = float(gradient_clipping)
575
-
576
  if self.zero_stage is None:
577
  self.zero_stage = int(os.environ.get("ACCELERATE_DEEPSPEED_ZERO_STAGE", 2))
578
-
579
  if self.offload_optimizer_device is None:
580
  self.offload_optimizer_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none")
581
-
582
  if self.offload_param_device is None:
583
  self.offload_param_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none")
584
-
585
  if self.offload_optimizer_nvme_path is None:
586
  self.offload_optimizer_nvme_path = os.environ.get(
587
  "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", "none"
588
  )
589
-
590
  if self.offload_param_nvme_path is None:
591
  self.offload_param_nvme_path = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", "none")
592
-
593
  if self.zero3_save_16bit_model is None:
594
  self.zero3_save_16bit_model = (
595
  os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false") == "true"
596
  )
597
-
598
  if self.hf_ds_config is None:
599
  self.hf_ds_config = os.environ.get("ACCELERATE_DEEPSPEED_CONFIG_FILE", "none")
600
  if (
@@ -608,7 +501,6 @@ class DeepSpeedPlugin:
608
  self.hf_ds_config.config["gradient_accumulation_steps"] = 1
609
  if "zero_optimization" not in self.hf_ds_config.config:
610
  raise ValueError("Please specify the ZeRO optimization config in the DeepSpeed config.")
611
-
612
  self._deepspeed_config_checks()
613
  plugin_to_config_mapping = {
614
  "gradient_accumulation_steps": "gradient_accumulation_steps",
@@ -624,7 +516,6 @@ class DeepSpeedPlugin:
624
  for key in kwargs.keys():
625
  self.fill_match(key, **kwargs, must_match=False)
626
  self.hf_ds_config.set_stage_and_offload()
627
-
628
  # filling the missing values in the class attributes from the DeepSpeed config
629
  # when using the DeepSpeed config file.
630
  for key, value in plugin_to_config_mapping.items():
@@ -654,7 +545,6 @@ class DeepSpeedPlugin:
654
  if self.gradient_clipping:
655
  config["gradient_clipping"] = self.gradient_clipping
656
  self.hf_ds_config = HfDeepSpeedConfig(config)
657
-
658
  self.deepspeed_config = self.hf_ds_config.config
659
  self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout
660
  if self.zero3_init_flag is None:
@@ -664,13 +554,11 @@ class DeepSpeedPlugin:
664
  if self.zero3_init_flag and not self.hf_ds_config.is_zero3():
665
  warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.")
666
  self.zero3_init_flag = False
667
-
668
  def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs):
669
  mismatches = [] if mismatches is None else mismatches
670
  config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)
671
  if config is None:
672
  return
673
-
674
  if config.get(ds_key) == "auto":
675
  if ds_key_long in kwargs:
676
  config[ds_key] = kwargs[ds_key_long]
@@ -681,15 +569,12 @@ class DeepSpeedPlugin:
681
  f"Please specify `{ds_key_long}` without `auto`(set to correct value) in the DeepSpeed config file or "
682
  "pass it in kwargs."
683
  )
684
-
685
  if not must_match:
686
  return
687
-
688
  ds_val = config.get(ds_key)
689
  if ds_val is not None and ds_key_long in kwargs:
690
  if ds_val != kwargs[ds_key_long]:
691
  mismatches.append(f"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}")
692
-
693
  def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs):
694
  """Process the DeepSpeed config with the values from the kwargs."""
695
  mismatches = [] if mismatches is None else mismatches
@@ -708,7 +593,6 @@ class DeepSpeedPlugin:
708
  "Please correct the following DeepSpeed config values that mismatch kwargs "
709
  f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
710
  )
711
-
712
  def set_mixed_precision(self, mixed_precision):
713
  ds_config = self.deepspeed_config
714
  kwargs = {
@@ -721,7 +605,6 @@ class DeepSpeedPlugin:
721
  elif mixed_precision == "bf16":
722
  if "bf16" not in ds_config:
723
  ds_config["bf16"] = {"enabled": True}
724
-
725
  if mixed_precision != "no":
726
  diff_dtype = "bf16" if mixed_precision == "fp16" else "fp16"
727
  if str(ds_config.get(diff_dtype, {}).get("enabled", "False")).lower() == "true":
@@ -733,10 +616,8 @@ class DeepSpeedPlugin:
733
  ds_config[dtype] = {"enabled": False}
734
  self.fill_match("fp16.enabled", must_match=False, **kwargs)
735
  self.fill_match("bf16.enabled", must_match=False, **kwargs)
736
-
737
  def set_deepspeed_weakref(self):
738
  from .imports import is_transformers_available
739
-
740
  if self.zero3_init_flag:
741
  if not is_transformers_available():
742
  raise Exception(
@@ -753,17 +634,13 @@ class DeepSpeedPlugin:
753
  ds_config["train_micro_batch_size_per_gpu"] = 1
754
  if ds_config.get("train_batch_size", None) == "auto":
755
  del ds_config["train_batch_size"]
756
-
757
  if compare_versions("transformers", "<", "4.33"):
758
  from transformers.deepspeed import HfDeepSpeedConfig
759
  else:
760
  from transformers.integrations import HfDeepSpeedConfig
761
-
762
  self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa
763
-
764
  def is_zero3_init_enabled(self):
765
  return self.zero3_init_flag
766
-
767
  @contextmanager
768
  def zero3_init_context_manager(self, enable=False):
769
  old = self.zero3_init_flag
@@ -777,7 +654,6 @@ class DeepSpeedPlugin:
777
  self.zero3_init_flag = old
778
  self.dschf = None
779
  self.set_deepspeed_weakref()
780
-
781
  def _deepspeed_config_checks(self):
782
  env_variable_names_to_ignore = [
783
  "ACCELERATE_GRADIENT_ACCUMULATION_STEPS",
@@ -793,9 +669,7 @@ class DeepSpeedPlugin:
793
  env_variable_names_to_ignore = [
794
  name.replace("ACCELERATE_", "").replace("DEEPSPEED_", "").lower() for name in env_variable_names_to_ignore
795
  ]
796
-
797
  deepspeed_fields_from_accelerate_config = os.environ.get("ACCELERATE_CONFIG_DS_FIELDS", "").split(",")
798
-
799
  if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config):
800
  raise ValueError(
801
  f"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\n"
@@ -804,8 +678,6 @@ class DeepSpeedPlugin:
804
  "The easiest method is to create a new config following the questionnaire via `accelerate config`.\n"
805
  "It will only ask for the necessary config variables when using `deepspeed_config_file`."
806
  )
807
-
808
-
809
  @dataclass
810
  class FullyShardedDataParallelPlugin:
811
  """
@@ -912,10 +784,8 @@ class FullyShardedDataParallelPlugin:
912
  "for reduced memory usage."
913
  },
914
  )
915
-
916
  def __post_init__(self):
917
  from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy
918
-
919
  prefix = "FSDP_"
920
  if self.sharding_strategy is None:
921
  sharding_strategy = os.environ.get(prefix + "SHARDING_STRATEGY", "FULL_SHARD")
@@ -925,18 +795,15 @@ class FullyShardedDataParallelPlugin:
925
  else int(sharding_strategy)
926
  )
927
  self.sharding_strategy = ShardingStrategy(sharding_strategy)
928
-
929
  if self.cpu_offload is None:
930
  if str_to_bool(os.environ.get(prefix + "OFFLOAD_PARAMS", "False")) == 1:
931
  self.cpu_offload = CPUOffload(offload_params=True)
932
  else:
933
  self.cpu_offload = CPUOffload(offload_params=False)
934
-
935
  if self.backward_prefetch is None:
936
  prefetch_policy = os.environ.get(prefix + "BACKWARD_PREFETCH", "NO_PREFETCH")
937
  if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]:
938
  self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1)
939
-
940
  if self.state_dict_type is None:
941
  state_dict_type_policy = os.environ.get(prefix + "STATE_DICT_TYPE", "FULL_STATE_DICT")
942
  self.set_state_dict_type(state_dict_type_policy)
@@ -944,7 +811,6 @@ class FullyShardedDataParallelPlugin:
944
  self.sync_module_states = str_to_bool(os.environ.get(prefix + "SYNC_MODULE_STATES", "True")) == 1
945
  self.forward_prefetch = str_to_bool(os.environ.get(prefix + "FORWARD_PREFETCH", "False")) == 1
946
  self.activation_checkpointing = str_to_bool(os.environ.get(prefix + "ACTIVATION_CHECKPOINTING", "False")) == 1
947
-
948
  if self.sync_module_states:
949
  if is_npu_available():
950
  device = torch.npu.current_device()
@@ -957,12 +823,10 @@ class FullyShardedDataParallelPlugin:
957
  "There are currently no available devices found, must be one of 'XPU', 'CUDA', or 'NPU'."
958
  )
959
  self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False)
960
-
961
  @staticmethod
962
  def get_module_class_from_name(module, name):
963
  """
964
  Gets a class from a module by its name.
965
-
966
  Args:
967
  module (`torch.nn.Module`): The module to get the class from.
968
  name (`str`): The name of the class.
@@ -977,10 +841,8 @@ class FullyShardedDataParallelPlugin:
977
  module_class = FullyShardedDataParallelPlugin.get_module_class_from_name(child_module, name)
978
  if module_class is not None:
979
  return module_class
980
-
981
  def set_auto_wrap_policy(self, model):
982
  from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
983
-
984
  default_transformer_cls_names_to_wrap = (
985
  ",".join(model._no_split_modules) if getattr(model, "_no_split_modules", None) is not None else ""
986
  )
@@ -997,7 +859,6 @@ class FullyShardedDataParallelPlugin:
997
  raise Exception("Could not find the transformer layer class to wrap in the model.")
998
  else:
999
  transformer_cls_to_wrap.add(transformer_cls)
1000
-
1001
  self.auto_wrap_policy = functools.partial(
1002
  transformer_auto_wrap_policy,
1003
  # Transformer layer class to wrap
@@ -1009,7 +870,6 @@ class FullyShardedDataParallelPlugin:
1009
  self.auto_wrap_policy = functools.partial(
1010
  size_based_auto_wrap_policy, min_num_params=min_num_params
1011
  )
1012
-
1013
  def set_mixed_precision(self, mixed_precision):
1014
  if mixed_precision == "fp16":
1015
  dtype = torch.float16
@@ -1018,26 +878,20 @@ class FullyShardedDataParallelPlugin:
1018
  else:
1019
  raise ValueError(f"Unknown mixed precision value: {mixed_precision}")
1020
  from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
1021
-
1022
  if self.mixed_precision_policy is None:
1023
  self.mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype)
1024
-
1025
  def set_state_dict_type(self, state_dict_type_policy):
1026
  from torch.distributed.fsdp.fully_sharded_data_parallel import (
1027
  FullOptimStateDictConfig,
1028
  FullStateDictConfig,
1029
  StateDictType,
1030
  )
1031
-
1032
  self.state_dict_type = StateDictType(FSDP_STATE_DICT_TYPE.index(state_dict_type_policy) + 1)
1033
-
1034
  if self.state_dict_type == StateDictType.FULL_STATE_DICT:
1035
  if self.state_dict_config is None:
1036
  self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
1037
  if self.optim_state_dict_config is None:
1038
  self.optim_state_dict_config = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True)
1039
-
1040
-
1041
  @dataclass
1042
  class MegatronLMPlugin:
1043
  """
@@ -1169,7 +1023,6 @@ class MegatronLMPlugin:
1169
  default=False,
1170
  metadata={"help": "Whether to return logits from the model."},
1171
  )
1172
-
1173
  # custom train step args
1174
  custom_train_step_class: Optional[Any] = field(
1175
  default=None,
@@ -1179,7 +1032,6 @@ class MegatronLMPlugin:
1179
  default=None,
1180
  metadata={"help": "Custom train step kwargs."},
1181
  )
1182
-
1183
  # custom model args
1184
  custom_model_provider_function: Optional[Callable] = field(
1185
  default=None,
@@ -1189,14 +1041,12 @@ class MegatronLMPlugin:
1189
  default=None,
1190
  metadata={"help": "Custom prepare model function."},
1191
  )
1192
-
1193
  # remaining args such as enabling Alibi/ROPE positional embeddings,
1194
  # wandb logging, Multi-Query Attention, etc.
1195
  other_megatron_args: Optional[Dict[str, Any]] = field(
1196
  default=None,
1197
  metadata={"help": "Other Megatron-LM arguments. Please refer Megatron-LM"},
1198
  )
1199
-
1200
  def __post_init__(self):
1201
  prefix = "MEGATRON_LM_"
1202
  if self.tp_degree is None:
@@ -1215,18 +1065,15 @@ class MegatronLMPlugin:
1215
  )
1216
  if self.sequence_parallelism is None:
1217
  self.sequence_parallelism = str_to_bool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1
1218
-
1219
  if self.pp_degree > 1 or self.use_distributed_optimizer:
1220
  self.DDP_impl = "local"
1221
  else:
1222
  self.DDP_impl = "torch"
1223
-
1224
  if self.consumed_samples is not None:
1225
  if len(self.consumed_samples) == 1:
1226
  self.consumed_samples.extend([0, 0])
1227
  elif len(self.consumed_samples) == 2:
1228
  self.consumed_samples.append(0)
1229
-
1230
  self.megatron_lm_default_args = {
1231
  "tensor_model_parallel_size": self.tp_degree,
1232
  "pipeline_model_parallel_size": self.pp_degree,
@@ -1253,7 +1100,6 @@ class MegatronLMPlugin:
1253
  self.set_tensorboard_logging_options()
1254
  if self.other_megatron_args is not None:
1255
  self.megatron_lm_default_args.update(self.other_megatron_args)
1256
-
1257
  def set_network_size_args(self, model, batch_data=None):
1258
  # Check if the model is either BERT, GPT or T5 else raise error
1259
  # set 'num_layers', 'hidden_size', 'num_attention_heads', 'max_position_embeddings'
@@ -1317,7 +1163,6 @@ class MegatronLMPlugin:
1317
  self.decoder_seq_length = batch_data["labels"].shape[1]
1318
  else:
1319
  self.decoder_seq_length = max_position_embeddings
1320
-
1321
  self.megatron_lm_default_args["encoder_seq_length"] = self.encoder_seq_length
1322
  self.megatron_lm_default_args["decoder_seq_length"] = self.decoder_seq_length
1323
  else:
@@ -1325,7 +1170,6 @@ class MegatronLMPlugin:
1325
  "🤗 Accelerate Megatron-LM integration supports only BERT, GPT and T5 model. "
1326
  "Please check the model you are using is one of those."
1327
  )
1328
-
1329
  self.megatron_lm_default_args["model_type_name"] = model_type_name
1330
  self.megatron_lm_default_args["num_layers"] = num_layers
1331
  self.megatron_lm_default_args["hidden_size"] = hidden_size
@@ -1336,7 +1180,6 @@ class MegatronLMPlugin:
1336
  self.megatron_lm_default_args["model_return_dict"] = model.config.return_dict
1337
  if model_type_name == "bert":
1338
  self.megatron_lm_default_args["num_labels"] = num_labels
1339
-
1340
  def set_mixed_precision(self, mixed_precision):
1341
  if mixed_precision == "fp16":
1342
  self.megatron_lm_default_args["fp16"] = True
@@ -1344,7 +1187,6 @@ class MegatronLMPlugin:
1344
  self.megatron_lm_default_args["bf16"] = True
1345
  self.DDP_impl = "local"
1346
  self.megatron_lm_default_args["DDP_impl"] = self.DDP_impl
1347
-
1348
  def set_training_args(self, micro_batch_size, dp_degree):
1349
  self.data_parallel_size = dp_degree
1350
  self.micro_batch_size = micro_batch_size
@@ -1352,7 +1194,6 @@ class MegatronLMPlugin:
1352
  self.megatron_lm_default_args["data_parallel_size"] = self.data_parallel_size
1353
  self.megatron_lm_default_args["micro_batch_size"] = self.micro_batch_size
1354
  self.megatron_lm_default_args["global_batch_size"] = self.global_batch_size
1355
-
1356
  def set_optimizer_type(self, optimizer):
1357
  optimizer_name = optimizer.__class__.__name__.lower()
1358
  if "adam" in optimizer_name:
@@ -1365,10 +1206,8 @@ class MegatronLMPlugin:
1365
  self.megatron_lm_default_args["sgd_momentum"] = optimizer.defaults["momentum"]
1366
  else:
1367
  raise ValueError(f"Optimizer {optimizer_name} is not supported by Megatron-LM")
1368
-
1369
  self.megatron_lm_default_args["lr"] = optimizer.defaults["lr"]
1370
  self.megatron_lm_default_args["weight_decay"] = optimizer.defaults["weight_decay"]
1371
-
1372
  def set_scheduler_args(self, scheduler):
1373
  if self.train_iters is None:
1374
  self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args["data_parallel_size"]
@@ -1384,7 +1223,6 @@ class MegatronLMPlugin:
1384
  "Ignoring `lr_warmup_samples` as `lr_warmup_iters` based on scheduler is being used for training."
1385
  )
1386
  self.lr_warmup_samples = 0
1387
-
1388
  self.megatron_lm_default_args["train_iters"] = self.train_iters
1389
  self.megatron_lm_default_args["lr_warmup_iters"] = self.lr_warmup_iters
1390
  self.megatron_lm_default_args["train_samples"] = self.train_samples
@@ -1397,10 +1235,8 @@ class MegatronLMPlugin:
1397
  self.megatron_lm_default_args["start_weight_decay"] = self.start_weight_decay
1398
  self.megatron_lm_default_args["end_weight_decay"] = self.end_weight_decay
1399
  self.megatron_lm_default_args["min_lr"] = self.min_lr
1400
-
1401
  def set_tensorboard_logging_options(self):
1402
  from megatron.arguments import _add_logging_args
1403
-
1404
  parser = argparse.ArgumentParser()
1405
  parser = _add_logging_args(parser)
1406
  logging_args = parser.parse_known_args()
@@ -1410,35 +1246,28 @@ class MegatronLMPlugin:
1410
  self.megatron_lm_default_args[key] = True
1411
  elif key.startswith("no_log_"):
1412
  self.megatron_lm_default_args[key.replace("no_", "")] = True
1413
-
1414
-
1415
  @dataclass
1416
  class BnbQuantizationConfig:
1417
  """
1418
  A plugin to enable BitsAndBytes 4bit and 8bit quantization
1419
  """
1420
  load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."})
1421
-
1422
  llm_int8_threshold: float = field(
1423
  default=6.0, metadata={"help": "value of the outliner threshold. only relevant when load_in_8bit=True"}
1424
  )
1425
-
1426
  load_in_4bit: bool = field(default=False, metadata={"help": "enable 4bit quantization."})
1427
-
1428
  bnb_4bit_quant_type: str = field(
1429
  default="fp4",
1430
  metadata={
1431
  "help": "set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','np4'}."
1432
  },
1433
  )
1434
-
1435
  bnb_4bit_use_double_quant: bool = field(
1436
  default=False,
1437
  metadata={
1438
  "help": "enable nested quantization where the quantization constants from the first quantization are quantized again."
1439
  },
1440
  )
1441
-
1442
  bnb_4bit_compute_dtype: bool = field(
1443
  default="fp16",
1444
  metadata={
@@ -1446,7 +1275,6 @@ class BnbQuantizationConfig:
1446
  "fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}."
1447
  },
1448
  )
1449
-
1450
  torch_dtype: torch.dtype = field(
1451
  default=None,
1452
  metadata={
@@ -1454,46 +1282,36 @@ class BnbQuantizationConfig:
1454
  "to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model "
1455
  },
1456
  )
1457
-
1458
  skip_modules: List[str] = field(
1459
  default=None,
1460
  metadata={
1461
  "help": "an explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`."
1462
  },
1463
  )
1464
-
1465
  keep_in_fp32_modules: List[str] = field(
1466
  default=None,
1467
  metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."},
1468
  )
1469
-
1470
  def __post_init__(self):
1471
  """
1472
  Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
1473
  """
1474
  if not isinstance(self.load_in_8bit, bool):
1475
  raise ValueError("load_in_8bit must be a boolean")
1476
-
1477
  if not isinstance(self.load_in_4bit, bool):
1478
  raise ValueError("load_in_4bit must be a boolean")
1479
-
1480
  if self.load_in_4bit and self.load_in_8bit:
1481
  raise ValueError("load_in_4bit and load_in_8 can't be both True")
1482
-
1483
  if not self.load_in_4bit and not self.load_in_8bit:
1484
  raise ValueError("load_in_4bit and load_in_8 can't be both False")
1485
-
1486
  if not isinstance(self.llm_int8_threshold, (int, float)):
1487
  raise ValueError("llm_int8_threshold must be a float or an int")
1488
-
1489
  if not isinstance(self.bnb_4bit_quant_type, str):
1490
  raise ValueError("bnb_4bit_quant_type must be a string")
1491
  elif self.bnb_4bit_quant_type not in ["fp4", "nf4"]:
1492
  raise ValueError(f"bnb_4bit_quant_type must be in ['fp4','nf4'] but found {self.bnb_4bit_quant_type}")
1493
-
1494
  if not isinstance(self.bnb_4bit_use_double_quant, bool):
1495
  raise ValueError("bnb_4bit_use_double_quant must be a boolean")
1496
-
1497
  if isinstance(self.bnb_4bit_compute_dtype, str):
1498
  if self.bnb_4bit_compute_dtype == "fp32":
1499
  self.bnb_4bit_compute_dtype = torch.float32
@@ -1507,22 +1325,16 @@ class BnbQuantizationConfig:
1507
  )
1508
  elif not isinstance(self.bnb_4bit_compute_dtype, torch.dtype):
1509
  raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype")
1510
-
1511
  if self.skip_modules is not None and not isinstance(self.skip_modules, list):
1512
  raise ValueError("skip_modules must be a list of strings")
1513
-
1514
  if self.keep_in_fp32_modules is not None and not isinstance(self.keep_in_fp32_modules, list):
1515
  raise ValueError("keep_in_fp_32_modules must be a list of strings")
1516
-
1517
  if self.load_in_4bit:
1518
  self.target_dtype = CustomDtype.INT4
1519
-
1520
  if self.load_in_8bit:
1521
  self.target_dtype = torch.int8
1522
-
1523
  if self.load_in_4bit and self.llm_int8_threshold != 6.0:
1524
  warnings.warn("llm_int8_threshold can only be used for model loaded in 8bit")
1525
-
1526
  if isinstance(self.torch_dtype, str):
1527
  if self.torch_dtype == "fp32":
1528
  self.torch_dtype = torch.float32
@@ -1534,9 +1346,7 @@ class BnbQuantizationConfig:
1534
  raise ValueError(f"torch_dtype must be in ['fp32','fp16','bf16'] but found {self.torch_dtype}")
1535
  if self.load_in_8bit and self.torch_dtype is None:
1536
  self.torch_dtype = torch.float16
1537
-
1538
  if self.load_in_4bit and self.torch_dtype is None:
1539
  self.torch_dtype = self.bnb_4bit_compute_dtype
1540
-
1541
  if not isinstance(self.torch_dtype, torch.dtype):
1542
  raise ValueError("torch_dtype must be a torch.dtype")
 
7
  """
8
  def to_dict(self):
9
  return copy.deepcopy(self.__dict__)
 
10
  def to_kwargs(self):
11
  """
12
  Returns a dictionary containing the attributes with values different from the default of this class.
13
  """
14
  # import clear_environment here to avoid circular import problem
15
  from .other import clear_environment
 
16
  with clear_environment():
17
  default_dict = self.__class__().to_dict()
18
  this_dict = self.to_dict()
19
  return {k: v for k, v in this_dict.items() if default_dict[k] != v}
 
 
20
  @dataclass
21
  class AutocastKwargs(KwargsHandler):
22
  """
23
  Use this object in your [`Accelerator`] to customize how `torch.autocast` behaves. Please refer to the
24
  documentation of this [context manager](https://pytorch.org/docs/stable/amp.html#torch.autocast) for more
25
  information on each argument.
 
26
  Example:
 
27
  ```python
28
  from accelerate import Accelerator
29
  from accelerate.utils import AutocastKwargs
 
30
  kwargs = AutocastKwargs(cache_enabled=True)
31
  accelerator = Accelerator(kwargs_handlers=[kwargs])
32
  ```
33
  """
34
  enabled: bool = True
35
  cache_enabled: bool = None
 
 
36
  @dataclass
37
  class DistributedDataParallelKwargs(KwargsHandler):
38
  """
 
40
  `torch.nn.parallel.DistributedDataParallel`. Please refer to the documentation of this
41
  [wrapper](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) for more
42
  information on each argument.
 
43
  <Tip warning={true}>
 
44
  `gradient_as_bucket_view` is only available in PyTorch 1.7.0 and later versions.
 
45
  `static_graph` is only available in PyTorch 1.11.0 and later versions.
 
46
  </Tip>
 
47
  Example:
 
48
  ```python
49
  from accelerate import Accelerator
50
  from accelerate.utils import DistributedDataParallelKwargs
 
51
  kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
52
  accelerator = Accelerator(kwargs_handlers=[kwargs])
53
  ```
 
59
  check_reduction: bool = False
60
  gradient_as_bucket_view: bool = False
61
  static_graph: bool = False
 
 
62
  @dataclass
63
  class GradScalerKwargs(KwargsHandler):
64
  """
65
  Use this object in your [`Accelerator`] to customize the behavior of mixed precision, specifically how the
66
  `torch.cuda.amp.GradScaler` used is created. Please refer to the documentation of this
67
  [scaler](https://pytorch.org/docs/stable/amp.html?highlight=gradscaler) for more information on each argument.
 
68
  <Tip warning={true}>
 
69
  `GradScaler` is only available in PyTorch 1.5.0 and later versions.
 
70
  </Tip>
 
71
  Example:
 
72
  ```python
73
  from accelerate import Accelerator
74
  from accelerate.utils import GradScalerKwargs
 
75
  kwargs = GradScalerKwargs(backoff_filter=0.25)
76
  accelerator = Accelerator(kwargs_handlers=[kwargs])
77
  ```
 
81
  backoff_factor: float = 0.5
82
  growth_interval: int = 2000
83
  enabled: bool = True
 
 
84
  @dataclass
85
  class InitProcessGroupKwargs(KwargsHandler):
86
  """
 
88
  to the documentation of this
89
  [method](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more
90
  information on each argument.
 
91
  ```python
92
  from datetime import timedelta
93
  from accelerate import Accelerator
94
  from accelerate.utils import InitProcessGroupKwargs
 
95
  kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800))
96
  accelerator = Accelerator(kwargs_handlers=[kwargs])
97
  ```
 
99
  backend: Optional[str] = "nccl"
100
  init_method: Optional[str] = None
101
  timeout: timedelta = timedelta(seconds=1800)
 
 
102
  # Literals
103
  Backend = Literal["msamp", "te"]
104
  OptLevel = Literal["O1", "O2"]
105
  FP8Format = Literal["E4M3", "HYBRID"]
106
  AmaxComputeAlgorithm = Literal["max", "most_recent"]
 
 
107
  @dataclass
108
  class FP8RecipeKwargs(KwargsHandler):
109
  """
110
  Use this object in your [`Accelerator`] to customize the initialization of the recipe for FP8 mixed precision
111
  training with `transformer-engine` or `ms-amp`.
 
112
  <Tip>
 
113
  For more information on `transformer-engine` args, please refer to the API
114
  [documentation](https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html).
 
115
  For more information on the `ms-amp` args, please refer to the Optimization Level
116
  [documentation](https://azure.github.io/MS-AMP/docs/user-tutorial/optimization-level).
 
117
  </Tip>
 
118
  ```python
119
  from accelerate import Accelerator
120
  from accelerate.utils import FP8RecipeKwargs
 
121
  kwargs = FP8RecipeKwargs(backend="te", fp8_format="HYBRID")
122
  accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=[kwargs])
123
  ```
 
124
  To use MS-AMP as an engine, pass `backend="msamp"` and the `optimization_level`:
 
125
  ```python
126
  kwargs = FP8RecipeKwargs(backend="msamp", optimization_level="02")
127
  ```
 
128
  Args:
129
  backend (`str`, *optional*, defaults to "msamp"):
130
  Which FP8 engine to use. Must be one of `"msamp"` (MS-AMP) or `"te"` (TransformerEngine).
 
159
  amax_history_len: int = 1
160
  amax_compute_algo: AmaxComputeAlgorithm = "most_recent"
161
  override_linear_precision: Tuple[bool, bool, bool] = (False, False, False)
 
162
  def __post_init__(self):
163
  self.backend = self.backend.upper()
164
  if self.backend not in get_args(Backend):
 
173
  elif self.backend == "MSAMP":
174
  if self.opt_level not in get_args(OptLevel):
175
  raise ValueError(f"`optimization_level` must be one of {' or '.join(get_args(OptLevel))}")
 
 
176
  class EnumWithContains(enum.EnumMeta):
177
  "A metaclass that adds the ability to check if `self` contains an item with the `in` operator"
 
178
  def __contains__(cls, item):
179
  try:
180
  cls(item)
181
  except ValueError:
182
  return False
183
  return True
 
 
184
  class BaseEnum(enum.Enum, metaclass=EnumWithContains):
185
  "An enum class that can get the value of an item with `str(Enum.key)`"
 
186
  def __str__(self):
187
  return self.value
 
188
  @classmethod
189
  def list(cls):
190
  "Method to list all the possible items in `cls`"
191
  return list(map(str, cls))
 
 
192
  class DistributedType(str, enum.Enum):
193
  """
194
  Represents a type of distributed environment.
 
195
  Values:
 
196
  - **NO** -- Not a distributed environment, just a single process.
197
  - **MULTI_CPU** -- Distributed on multiple CPU nodes.
198
  - **MULTI_GPU** -- Distributed on multiple GPUs.
 
211
  FSDP = "FSDP"
212
  TPU = "TPU"
213
  MEGATRON_LM = "MEGATRON_LM"
 
 
214
  class SageMakerDistributedType(str, enum.Enum):
215
  """
216
  Represents a type of distributed environment.
 
217
  Values:
 
218
  - **NO** -- Not a distributed environment, just a single process.
219
  - **DATA_PARALLEL** -- using sagemaker distributed data parallelism.
220
  - **MODEL_PARALLEL** -- using sagemaker distributed model parallelism.
 
223
  NO = "NO"
224
  DATA_PARALLEL = "DATA_PARALLEL"
225
  MODEL_PARALLEL = "MODEL_PARALLEL"
 
 
226
  class ComputeEnvironment(str, enum.Enum):
227
  """
228
  Represents a type of the compute environment.
 
229
  Values:
 
230
  - **LOCAL_MACHINE** -- private/custom cluster hardware.
231
  - **AMAZON_SAGEMAKER** -- Amazon SageMaker as compute environment.
232
  """
233
  # Subclassing str as well as Enum allows the `ComputeEnvironment` to be JSON-serializable out of the box.
234
  LOCAL_MACHINE = "LOCAL_MACHINE"
235
  AMAZON_SAGEMAKER = "AMAZON_SAGEMAKER"
 
 
236
  class DynamoBackend(str, BaseEnum):
237
  """
238
  Represents a dynamo backend (see https://github.com/pytorch/torchdynamo).
 
239
  Values:
 
240
  - **NO** -- Do not use torch dynamo.
241
  - **EAGER** -- Uses PyTorch to run the extracted GraphModule. This is quite useful in debugging TorchDynamo
242
  issues.
 
260
  - **IPEX** -- Uses IPEX for inference on CPU. Inference only. [Read
261
  more](https://github.com/intel/intel-extension-for-pytorch).
262
  - **TVM** -- Uses Apach TVM for inference optimizations. [Read more](https://tvm.apache.org/)
 
263
  """
264
  # Subclassing str as well as Enum allows the `SageMakerDistributedType` to be JSON-serializable out of the box.
265
  NO = "NO"
 
275
  TENSORRT = "TENSORRT"
276
  IPEX = "IPEX"
277
  TVM = "TVM"
 
 
278
  class LoggerType(BaseEnum):
279
  """Represents a type of supported experiment tracker
 
280
  Values:
 
281
  - **ALL** -- all available trackers in the environment that are supported
282
  - **TENSORBOARD** -- TensorBoard as an experiment tracker
283
  - **WANDB** -- wandb as an experiment tracker
 
292
  MLFLOW = "mlflow"
293
  CLEARML = "clearml"
294
  DVCLIVE = "dvclive"
 
 
295
  class PrecisionType(BaseEnum):
296
  """Represents a type of precision used on floating point values
 
297
  Values:
 
298
  - **NO** -- using full precision (FP32)
299
  - **FP16** -- using half precision
300
  - **BF16** -- using brain floating point precision
 
303
  FP8 = "fp8"
304
  FP16 = "fp16"
305
  BF16 = "bf16"
 
 
306
  class RNGType(BaseEnum):
307
  TORCH = "torch"
308
  CUDA = "cuda"
 
310
  XLA = "xla"
311
  XPU = "xpu"
312
  GENERATOR = "generator"
 
 
313
  class CustomDtype(enum.Enum):
314
  r"""
315
  An enum that contains multiple custom dtypes that can be used for `infer_auto_device_map`.
316
  """
317
  FP8 = "fp8"
318
  INT4 = "int4"
 
 
319
  # data classes
 
 
320
  @dataclass
321
  class TensorInformation:
322
  shape: torch.Size
323
  dtype: torch.dtype
 
 
324
  @dataclass
325
  class ProjectConfiguration:
326
  """
 
337
  default=False,
338
  metadata={"help": "Whether saved states should be automatically iteratively named."},
339
  )
 
340
  total_limit: int = field(
341
  default=None,
342
  metadata={"help": "The maximum number of total saved states to keep."},
343
  )
 
344
  iteration: int = field(
345
  default=0,
346
  metadata={"help": "The current save iteration."},
347
  )
 
348
  save_on_each_node: bool = field(
349
  default=False,
350
  metadata={
 
354
  )
355
  },
356
  )
 
357
  def set_directories(self, project_dir: str = None):
358
  "Sets `self.project_dir` and `self.logging_dir` to the appropriate values."
359
  self.project_dir = project_dir
360
  if self.logging_dir is None:
361
  self.logging_dir = project_dir
 
362
  def __post_init__(self):
363
  self.set_directories(self.project_dir)
 
 
364
  @dataclass
365
  class GradientAccumulationPlugin(KwargsHandler):
366
  """
 
379
  "help": "Whether to synchronize setting the gradients when at the end of the dataloader. Should only be set to `False` if you know what you're doing."
380
  },
381
  )
 
 
382
  @dataclass
383
  class TorchDynamoPlugin(KwargsHandler):
384
  """
 
395
  dynamic: bool = field(default=None, metadata={"help": "Whether to use dynamic shape for tracing"})
396
  options: Any = field(default=None, metadata={"help": "A dictionary of options to pass to the backend."})
397
  disable: bool = field(default=False, metadata={"help": "Turn torch.compile() into a no-op for testing"})
 
398
  def __post_init__(self):
399
  prefix = "ACCELERATE_DYNAMO_"
400
  if self.backend is None:
 
406
  self.fullgraph = str_to_bool(os.environ.get(prefix + "USE_FULLGRAPH", "False")) == 1
407
  if self.dynamic is None:
408
  self.dynamic = str_to_bool(os.environ.get(prefix + "USE_DYNAMIC", "False")) == 1
 
409
  def to_dict(self):
410
  dynamo_config = copy.deepcopy(self.__dict__)
411
  dynamo_config["backend"] = dynamo_config["backend"].value.lower()
412
  return dynamo_config
 
 
413
  @dataclass
414
  class DeepSpeedPlugin:
415
  """
 
463
  default=None,
464
  metadata={"help": "Flag to indicate whether to save 16-bit model. Only applicable with ZeRO Stage-3."},
465
  )
 
466
  def __post_init__(self):
467
  from .deepspeed import HfDeepSpeedConfig
 
468
  if self.gradient_accumulation_steps is None:
469
  gas = os.environ.get("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", "auto")
470
  self.gradient_accumulation_steps = int(gas) if gas.isdigit() else gas
 
471
  if self.gradient_clipping is None:
472
  gradient_clipping = os.environ.get("ACCELERATE_GRADIENT_CLIPPING", "none")
473
  if gradient_clipping != "none":
474
  self.gradient_clipping = float(gradient_clipping)
 
475
  if self.zero_stage is None:
476
  self.zero_stage = int(os.environ.get("ACCELERATE_DEEPSPEED_ZERO_STAGE", 2))
 
477
  if self.offload_optimizer_device is None:
478
  self.offload_optimizer_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE", "none")
 
479
  if self.offload_param_device is None:
480
  self.offload_param_device = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE", "none")
 
481
  if self.offload_optimizer_nvme_path is None:
482
  self.offload_optimizer_nvme_path = os.environ.get(
483
  "ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_NVME_PATH", "none"
484
  )
 
485
  if self.offload_param_nvme_path is None:
486
  self.offload_param_nvme_path = os.environ.get("ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_NVME_PATH", "none")
 
487
  if self.zero3_save_16bit_model is None:
488
  self.zero3_save_16bit_model = (
489
  os.environ.get("ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL", "false") == "true"
490
  )
 
491
  if self.hf_ds_config is None:
492
  self.hf_ds_config = os.environ.get("ACCELERATE_DEEPSPEED_CONFIG_FILE", "none")
493
  if (
 
501
  self.hf_ds_config.config["gradient_accumulation_steps"] = 1
502
  if "zero_optimization" not in self.hf_ds_config.config:
503
  raise ValueError("Please specify the ZeRO optimization config in the DeepSpeed config.")
 
504
  self._deepspeed_config_checks()
505
  plugin_to_config_mapping = {
506
  "gradient_accumulation_steps": "gradient_accumulation_steps",
 
516
  for key in kwargs.keys():
517
  self.fill_match(key, **kwargs, must_match=False)
518
  self.hf_ds_config.set_stage_and_offload()
 
519
  # filling the missing values in the class attributes from the DeepSpeed config
520
  # when using the DeepSpeed config file.
521
  for key, value in plugin_to_config_mapping.items():
 
545
  if self.gradient_clipping:
546
  config["gradient_clipping"] = self.gradient_clipping
547
  self.hf_ds_config = HfDeepSpeedConfig(config)
 
548
  self.deepspeed_config = self.hf_ds_config.config
549
  self.deepspeed_config["steps_per_print"] = float("inf") # this will stop deepspeed from logging @ stdout
550
  if self.zero3_init_flag is None:
 
554
  if self.zero3_init_flag and not self.hf_ds_config.is_zero3():
555
  warnings.warn("DeepSpeed Zero3 Init flag is only applicable for ZeRO Stage 3. Setting it to False.")
556
  self.zero3_init_flag = False
 
557
  def fill_match(self, ds_key_long, mismatches=None, must_match=True, **kwargs):
558
  mismatches = [] if mismatches is None else mismatches
559
  config, ds_key = self.hf_ds_config.find_config_node(ds_key_long)
560
  if config is None:
561
  return
 
562
  if config.get(ds_key) == "auto":
563
  if ds_key_long in kwargs:
564
  config[ds_key] = kwargs[ds_key_long]
 
569
  f"Please specify `{ds_key_long}` without `auto`(set to correct value) in the DeepSpeed config file or "
570
  "pass it in kwargs."
571
  )
 
572
  if not must_match:
573
  return
 
574
  ds_val = config.get(ds_key)
575
  if ds_val is not None and ds_key_long in kwargs:
576
  if ds_val != kwargs[ds_key_long]:
577
  mismatches.append(f"- ds {ds_key_long}={ds_val} vs arg {ds_key_long}={kwargs[ds_key_long]}")
 
578
  def deepspeed_config_process(self, prefix="", mismatches=None, config=None, must_match=True, **kwargs):
579
  """Process the DeepSpeed config with the values from the kwargs."""
580
  mismatches = [] if mismatches is None else mismatches
 
593
  "Please correct the following DeepSpeed config values that mismatch kwargs "
594
  f" values:\n{mismatches_msg}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
595
  )
 
596
  def set_mixed_precision(self, mixed_precision):
597
  ds_config = self.deepspeed_config
598
  kwargs = {
 
605
  elif mixed_precision == "bf16":
606
  if "bf16" not in ds_config:
607
  ds_config["bf16"] = {"enabled": True}
 
608
  if mixed_precision != "no":
609
  diff_dtype = "bf16" if mixed_precision == "fp16" else "fp16"
610
  if str(ds_config.get(diff_dtype, {}).get("enabled", "False")).lower() == "true":
 
616
  ds_config[dtype] = {"enabled": False}
617
  self.fill_match("fp16.enabled", must_match=False, **kwargs)
618
  self.fill_match("bf16.enabled", must_match=False, **kwargs)
 
619
  def set_deepspeed_weakref(self):
620
  from .imports import is_transformers_available
 
621
  if self.zero3_init_flag:
622
  if not is_transformers_available():
623
  raise Exception(
 
634
  ds_config["train_micro_batch_size_per_gpu"] = 1
635
  if ds_config.get("train_batch_size", None) == "auto":
636
  del ds_config["train_batch_size"]
 
637
  if compare_versions("transformers", "<", "4.33"):
638
  from transformers.deepspeed import HfDeepSpeedConfig
639
  else:
640
  from transformers.integrations import HfDeepSpeedConfig
 
641
  self.dschf = HfDeepSpeedConfig(ds_config) # keep this object alive # noqa
 
642
  def is_zero3_init_enabled(self):
643
  return self.zero3_init_flag
 
644
  @contextmanager
645
  def zero3_init_context_manager(self, enable=False):
646
  old = self.zero3_init_flag
 
654
  self.zero3_init_flag = old
655
  self.dschf = None
656
  self.set_deepspeed_weakref()
 
657
  def _deepspeed_config_checks(self):
658
  env_variable_names_to_ignore = [
659
  "ACCELERATE_GRADIENT_ACCUMULATION_STEPS",
 
669
  env_variable_names_to_ignore = [
670
  name.replace("ACCELERATE_", "").replace("DEEPSPEED_", "").lower() for name in env_variable_names_to_ignore
671
  ]
 
672
  deepspeed_fields_from_accelerate_config = os.environ.get("ACCELERATE_CONFIG_DS_FIELDS", "").split(",")
 
673
  if any(name in env_variable_names_to_ignore for name in deepspeed_fields_from_accelerate_config):
674
  raise ValueError(
675
  f"When using `deepspeed_config_file`, the following accelerate config variables will be ignored: {env_variable_names_to_ignore}.\n"
 
678
  "The easiest method is to create a new config following the questionnaire via `accelerate config`.\n"
679
  "It will only ask for the necessary config variables when using `deepspeed_config_file`."
680
  )
 
 
681
  @dataclass
682
  class FullyShardedDataParallelPlugin:
683
  """
 
784
  "for reduced memory usage."
785
  },
786
  )
 
787
  def __post_init__(self):
788
  from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, CPUOffload, ShardingStrategy
 
789
  prefix = "FSDP_"
790
  if self.sharding_strategy is None:
791
  sharding_strategy = os.environ.get(prefix + "SHARDING_STRATEGY", "FULL_SHARD")
 
795
  else int(sharding_strategy)
796
  )
797
  self.sharding_strategy = ShardingStrategy(sharding_strategy)
 
798
  if self.cpu_offload is None:
799
  if str_to_bool(os.environ.get(prefix + "OFFLOAD_PARAMS", "False")) == 1:
800
  self.cpu_offload = CPUOffload(offload_params=True)
801
  else:
802
  self.cpu_offload = CPUOffload(offload_params=False)
 
803
  if self.backward_prefetch is None:
804
  prefetch_policy = os.environ.get(prefix + "BACKWARD_PREFETCH", "NO_PREFETCH")
805
  if prefetch_policy != FSDP_BACKWARD_PREFETCH[-1]:
806
  self.backward_prefetch = BackwardPrefetch(FSDP_BACKWARD_PREFETCH.index(prefetch_policy) + 1)
 
807
  if self.state_dict_type is None:
808
  state_dict_type_policy = os.environ.get(prefix + "STATE_DICT_TYPE", "FULL_STATE_DICT")
809
  self.set_state_dict_type(state_dict_type_policy)
 
811
  self.sync_module_states = str_to_bool(os.environ.get(prefix + "SYNC_MODULE_STATES", "True")) == 1
812
  self.forward_prefetch = str_to_bool(os.environ.get(prefix + "FORWARD_PREFETCH", "False")) == 1
813
  self.activation_checkpointing = str_to_bool(os.environ.get(prefix + "ACTIVATION_CHECKPOINTING", "False")) == 1
 
814
  if self.sync_module_states:
815
  if is_npu_available():
816
  device = torch.npu.current_device()
 
823
  "There are currently no available devices found, must be one of 'XPU', 'CUDA', or 'NPU'."
824
  )
825
  self.param_init_fn = lambda x: x.to_empty(device=device, recurse=False)
 
826
  @staticmethod
827
  def get_module_class_from_name(module, name):
828
  """
829
  Gets a class from a module by its name.
 
830
  Args:
831
  module (`torch.nn.Module`): The module to get the class from.
832
  name (`str`): The name of the class.
 
841
  module_class = FullyShardedDataParallelPlugin.get_module_class_from_name(child_module, name)
842
  if module_class is not None:
843
  return module_class
 
844
  def set_auto_wrap_policy(self, model):
845
  from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
 
846
  default_transformer_cls_names_to_wrap = (
847
  ",".join(model._no_split_modules) if getattr(model, "_no_split_modules", None) is not None else ""
848
  )
 
859
  raise Exception("Could not find the transformer layer class to wrap in the model.")
860
  else:
861
  transformer_cls_to_wrap.add(transformer_cls)
 
862
  self.auto_wrap_policy = functools.partial(
863
  transformer_auto_wrap_policy,
864
  # Transformer layer class to wrap
 
870
  self.auto_wrap_policy = functools.partial(
871
  size_based_auto_wrap_policy, min_num_params=min_num_params
872
  )
 
873
  def set_mixed_precision(self, mixed_precision):
874
  if mixed_precision == "fp16":
875
  dtype = torch.float16
 
878
  else:
879
  raise ValueError(f"Unknown mixed precision value: {mixed_precision}")
880
  from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
 
881
  if self.mixed_precision_policy is None:
882
  self.mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype)
 
883
  def set_state_dict_type(self, state_dict_type_policy):
884
  from torch.distributed.fsdp.fully_sharded_data_parallel import (
885
  FullOptimStateDictConfig,
886
  FullStateDictConfig,
887
  StateDictType,
888
  )
 
889
  self.state_dict_type = StateDictType(FSDP_STATE_DICT_TYPE.index(state_dict_type_policy) + 1)
 
890
  if self.state_dict_type == StateDictType.FULL_STATE_DICT:
891
  if self.state_dict_config is None:
892
  self.state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
893
  if self.optim_state_dict_config is None:
894
  self.optim_state_dict_config = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True)
 
 
895
  @dataclass
896
  class MegatronLMPlugin:
897
  """
 
1023
  default=False,
1024
  metadata={"help": "Whether to return logits from the model."},
1025
  )
 
1026
  # custom train step args
1027
  custom_train_step_class: Optional[Any] = field(
1028
  default=None,
 
1032
  default=None,
1033
  metadata={"help": "Custom train step kwargs."},
1034
  )
 
1035
  # custom model args
1036
  custom_model_provider_function: Optional[Callable] = field(
1037
  default=None,
 
1041
  default=None,
1042
  metadata={"help": "Custom prepare model function."},
1043
  )
 
1044
  # remaining args such as enabling Alibi/ROPE positional embeddings,
1045
  # wandb logging, Multi-Query Attention, etc.
1046
  other_megatron_args: Optional[Dict[str, Any]] = field(
1047
  default=None,
1048
  metadata={"help": "Other Megatron-LM arguments. Please refer Megatron-LM"},
1049
  )
 
1050
  def __post_init__(self):
1051
  prefix = "MEGATRON_LM_"
1052
  if self.tp_degree is None:
 
1065
  )
1066
  if self.sequence_parallelism is None:
1067
  self.sequence_parallelism = str_to_bool(os.environ.get(prefix + "SEQUENCE_PARALLELISM", "False")) == 1
 
1068
  if self.pp_degree > 1 or self.use_distributed_optimizer:
1069
  self.DDP_impl = "local"
1070
  else:
1071
  self.DDP_impl = "torch"
 
1072
  if self.consumed_samples is not None:
1073
  if len(self.consumed_samples) == 1:
1074
  self.consumed_samples.extend([0, 0])
1075
  elif len(self.consumed_samples) == 2:
1076
  self.consumed_samples.append(0)
 
1077
  self.megatron_lm_default_args = {
1078
  "tensor_model_parallel_size": self.tp_degree,
1079
  "pipeline_model_parallel_size": self.pp_degree,
 
1100
  self.set_tensorboard_logging_options()
1101
  if self.other_megatron_args is not None:
1102
  self.megatron_lm_default_args.update(self.other_megatron_args)
 
1103
  def set_network_size_args(self, model, batch_data=None):
1104
  # Check if the model is either BERT, GPT or T5 else raise error
1105
  # set 'num_layers', 'hidden_size', 'num_attention_heads', 'max_position_embeddings'
 
1163
  self.decoder_seq_length = batch_data["labels"].shape[1]
1164
  else:
1165
  self.decoder_seq_length = max_position_embeddings
 
1166
  self.megatron_lm_default_args["encoder_seq_length"] = self.encoder_seq_length
1167
  self.megatron_lm_default_args["decoder_seq_length"] = self.decoder_seq_length
1168
  else:
 
1170
  "🤗 Accelerate Megatron-LM integration supports only BERT, GPT and T5 model. "
1171
  "Please check the model you are using is one of those."
1172
  )
 
1173
  self.megatron_lm_default_args["model_type_name"] = model_type_name
1174
  self.megatron_lm_default_args["num_layers"] = num_layers
1175
  self.megatron_lm_default_args["hidden_size"] = hidden_size
 
1180
  self.megatron_lm_default_args["model_return_dict"] = model.config.return_dict
1181
  if model_type_name == "bert":
1182
  self.megatron_lm_default_args["num_labels"] = num_labels
 
1183
  def set_mixed_precision(self, mixed_precision):
1184
  if mixed_precision == "fp16":
1185
  self.megatron_lm_default_args["fp16"] = True
 
1187
  self.megatron_lm_default_args["bf16"] = True
1188
  self.DDP_impl = "local"
1189
  self.megatron_lm_default_args["DDP_impl"] = self.DDP_impl
 
1190
  def set_training_args(self, micro_batch_size, dp_degree):
1191
  self.data_parallel_size = dp_degree
1192
  self.micro_batch_size = micro_batch_size
 
1194
  self.megatron_lm_default_args["data_parallel_size"] = self.data_parallel_size
1195
  self.megatron_lm_default_args["micro_batch_size"] = self.micro_batch_size
1196
  self.megatron_lm_default_args["global_batch_size"] = self.global_batch_size
 
1197
  def set_optimizer_type(self, optimizer):
1198
  optimizer_name = optimizer.__class__.__name__.lower()
1199
  if "adam" in optimizer_name:
 
1206
  self.megatron_lm_default_args["sgd_momentum"] = optimizer.defaults["momentum"]
1207
  else:
1208
  raise ValueError(f"Optimizer {optimizer_name} is not supported by Megatron-LM")
 
1209
  self.megatron_lm_default_args["lr"] = optimizer.defaults["lr"]
1210
  self.megatron_lm_default_args["weight_decay"] = optimizer.defaults["weight_decay"]
 
1211
  def set_scheduler_args(self, scheduler):
1212
  if self.train_iters is None:
1213
  self.train_iters = scheduler.total_num_steps // self.megatron_lm_default_args["data_parallel_size"]
 
1223
  "Ignoring `lr_warmup_samples` as `lr_warmup_iters` based on scheduler is being used for training."
1224
  )
1225
  self.lr_warmup_samples = 0
 
1226
  self.megatron_lm_default_args["train_iters"] = self.train_iters
1227
  self.megatron_lm_default_args["lr_warmup_iters"] = self.lr_warmup_iters
1228
  self.megatron_lm_default_args["train_samples"] = self.train_samples
 
1235
  self.megatron_lm_default_args["start_weight_decay"] = self.start_weight_decay
1236
  self.megatron_lm_default_args["end_weight_decay"] = self.end_weight_decay
1237
  self.megatron_lm_default_args["min_lr"] = self.min_lr
 
1238
  def set_tensorboard_logging_options(self):
1239
  from megatron.arguments import _add_logging_args
 
1240
  parser = argparse.ArgumentParser()
1241
  parser = _add_logging_args(parser)
1242
  logging_args = parser.parse_known_args()
 
1246
  self.megatron_lm_default_args[key] = True
1247
  elif key.startswith("no_log_"):
1248
  self.megatron_lm_default_args[key.replace("no_", "")] = True
 
 
1249
  @dataclass
1250
  class BnbQuantizationConfig:
1251
  """
1252
  A plugin to enable BitsAndBytes 4bit and 8bit quantization
1253
  """
1254
  load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."})
 
1255
  llm_int8_threshold: float = field(
1256
  default=6.0, metadata={"help": "value of the outliner threshold. only relevant when load_in_8bit=True"}
1257
  )
 
1258
  load_in_4bit: bool = field(default=False, metadata={"help": "enable 4bit quantization."})
 
1259
  bnb_4bit_quant_type: str = field(
1260
  default="fp4",
1261
  metadata={
1262
  "help": "set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','np4'}."
1263
  },
1264
  )
 
1265
  bnb_4bit_use_double_quant: bool = field(
1266
  default=False,
1267
  metadata={
1268
  "help": "enable nested quantization where the quantization constants from the first quantization are quantized again."
1269
  },
1270
  )
 
1271
  bnb_4bit_compute_dtype: bool = field(
1272
  default="fp16",
1273
  metadata={
 
1275
  "fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}."
1276
  },
1277
  )
 
1278
  torch_dtype: torch.dtype = field(
1279
  default=None,
1280
  metadata={
 
1282
  "to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model "
1283
  },
1284
  )
 
1285
  skip_modules: List[str] = field(
1286
  default=None,
1287
  metadata={
1288
  "help": "an explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`."
1289
  },
1290
  )
 
1291
  keep_in_fp32_modules: List[str] = field(
1292
  default=None,
1293
  metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."},
1294
  )
 
1295
  def __post_init__(self):
1296
  """
1297
  Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
1298
  """
1299
  if not isinstance(self.load_in_8bit, bool):
1300
  raise ValueError("load_in_8bit must be a boolean")
 
1301
  if not isinstance(self.load_in_4bit, bool):
1302
  raise ValueError("load_in_4bit must be a boolean")
 
1303
  if self.load_in_4bit and self.load_in_8bit:
1304
  raise ValueError("load_in_4bit and load_in_8 can't be both True")
 
1305
  if not self.load_in_4bit and not self.load_in_8bit:
1306
  raise ValueError("load_in_4bit and load_in_8 can't be both False")
 
1307
  if not isinstance(self.llm_int8_threshold, (int, float)):
1308
  raise ValueError("llm_int8_threshold must be a float or an int")
 
1309
  if not isinstance(self.bnb_4bit_quant_type, str):
1310
  raise ValueError("bnb_4bit_quant_type must be a string")
1311
  elif self.bnb_4bit_quant_type not in ["fp4", "nf4"]:
1312
  raise ValueError(f"bnb_4bit_quant_type must be in ['fp4','nf4'] but found {self.bnb_4bit_quant_type}")
 
1313
  if not isinstance(self.bnb_4bit_use_double_quant, bool):
1314
  raise ValueError("bnb_4bit_use_double_quant must be a boolean")
 
1315
  if isinstance(self.bnb_4bit_compute_dtype, str):
1316
  if self.bnb_4bit_compute_dtype == "fp32":
1317
  self.bnb_4bit_compute_dtype = torch.float32
 
1325
  )
1326
  elif not isinstance(self.bnb_4bit_compute_dtype, torch.dtype):
1327
  raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype")
 
1328
  if self.skip_modules is not None and not isinstance(self.skip_modules, list):
1329
  raise ValueError("skip_modules must be a list of strings")
 
1330
  if self.keep_in_fp32_modules is not None and not isinstance(self.keep_in_fp32_modules, list):
1331
  raise ValueError("keep_in_fp_32_modules must be a list of strings")
 
1332
  if self.load_in_4bit:
1333
  self.target_dtype = CustomDtype.INT4
 
1334
  if self.load_in_8bit:
1335
  self.target_dtype = torch.int8
 
1336
  if self.load_in_4bit and self.llm_int8_threshold != 6.0:
1337
  warnings.warn("llm_int8_threshold can only be used for model loaded in 8bit")
 
1338
  if isinstance(self.torch_dtype, str):
1339
  if self.torch_dtype == "fp32":
1340
  self.torch_dtype = torch.float32
 
1346
  raise ValueError(f"torch_dtype must be in ['fp32','fp16','bf16'] but found {self.torch_dtype}")
1347
  if self.load_in_8bit and self.torch_dtype is None:
1348
  self.torch_dtype = torch.float16
 
1349
  if self.load_in_4bit and self.torch_dtype is None:
1350
  self.torch_dtype = self.bnb_4bit_compute_dtype
 
1351
  if not isinstance(self.torch_dtype, torch.dtype):
1352
  raise ValueError("torch_dtype must be a torch.dtype")
src/utils/deepspeed.py CHANGED
@@ -1,18 +1,14 @@
1
  class HfDeepSpeedConfig:
2
  """
3
  This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage.
4
-
5
  A `weakref` of this object is stored in the module's globals to be able to access the config from areas where
6
  things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore
7
  it's important that this object remains alive while the program is still running.
8
-
9
  [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration
10
  with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic
11
  the DeepSpeed configuration is not modified in any way.
12
-
13
  Args:
14
  config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.
15
-
16
  """
17
  def __init__(self, config_file_or_dict):
18
  if isinstance(config_file_or_dict, dict):
@@ -30,17 +26,13 @@ class HfDeepSpeedConfig:
30
  raise ValueError(
31
  f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}"
32
  )
33
-
34
  self.config = config
35
-
36
  self.set_stage_and_offload()
37
-
38
  def set_stage_and_offload(self):
39
  # zero stage - this is done as early as possible, before model is created, to allow
40
  # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
41
  # during ``zero.Init()`` which needs to know the dtype, and some other hparams.
42
  self._stage = self.get_value("zero_optimization.stage", -1)
43
-
44
  # offload
45
  self._offload = False
46
  if self.is_zero2() or self.is_zero3():
@@ -53,10 +45,8 @@ class HfDeepSpeedConfig:
53
  )
54
  if len(offload_devices & offload_devices_valid) > 0:
55
  self._offload = True
56
-
57
  def find_config_node(self, ds_key_long):
58
  config = self.config
59
-
60
  # find the config node of interest if it exists
61
  nodes = ds_key_long.split(".")
62
  ds_key = nodes.pop()
@@ -64,9 +54,7 @@ class HfDeepSpeedConfig:
64
  config = config.get(node)
65
  if config is None:
66
  return None, ds_key
67
-
68
  return config, ds_key
69
-
70
  def get_value(self, ds_key_long, default=None):
71
  """
72
  Returns the set value or `default` if no value is set
@@ -75,15 +63,12 @@ class HfDeepSpeedConfig:
75
  if config is None:
76
  return default
77
  return config.get(ds_key, default)
78
-
79
  def del_config_sub_tree(self, ds_key_long, must_exist=False):
80
  """
81
  Deletes a sub-section of the config file if it's found.
82
-
83
  Unless `must_exist` is `True` the section doesn't have to exist.
84
  """
85
  config = self.config
86
-
87
  # find the config node of interest if it exists
88
  nodes = ds_key_long.split(".")
89
  for node in nodes:
@@ -94,20 +79,16 @@ class HfDeepSpeedConfig:
94
  raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}")
95
  else:
96
  return
97
-
98
  # if found remove it
99
  if parent_config is not None:
100
  parent_config.pop(node)
101
-
102
  def is_true(self, ds_key_long):
103
  """
104
  Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very
105
  specific question of whether the value is set to `True` (and it's not set to `False`` or isn't set).
106
-
107
  """
108
  value = self.get_value(ds_key_long)
109
  return False if value is None else bool(value)
110
-
111
  def is_false(self, ds_key_long):
112
  """
113
  Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very
@@ -115,31 +96,23 @@ class HfDeepSpeedConfig:
115
  """
116
  value = self.get_value(ds_key_long)
117
  return False if value is None else not bool(value)
118
-
119
  def is_zero2(self):
120
  return self._stage == 2
121
-
122
  def is_zero3(self):
123
  return self._stage == 3
124
-
125
  def is_offload(self):
126
  return self._offload
127
-
128
-
129
  class DeepSpeedEngineWrapper:
130
  """
131
  Internal wrapper for deepspeed.runtime.engine.DeepSpeedEngine. This is used to follow conventional training loop.
132
-
133
  Args:
134
  engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap
135
  """
136
  def __init__(self, engine):
137
  self.engine = engine
138
-
139
  def backward(self, loss, **kwargs):
140
  # runs backpropagation and handles mixed precision
141
  self.engine.backward(loss, **kwargs)
142
-
143
  # Deepspeed's `engine.step` performs the following operations:
144
  # - gradient accumulation check
145
  # - gradient clipping
@@ -151,12 +124,9 @@ class DeepSpeedEngineWrapper:
151
  # and this plugin overrides the above calls with no-ops when Accelerate runs under
152
  # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
153
  # training loop that works transparently under many training regimes.
154
-
155
-
156
  class DeepSpeedOptimizerWrapper(AcceleratedOptimizer):
157
  """
158
  Internal wrapper around a deepspeed optimizer.
159
-
160
  Args:
161
  optimizer (`torch.optim.optimizer.Optimizer`):
162
  The optimizer to wrap.
@@ -164,25 +134,19 @@ class DeepSpeedOptimizerWrapper(AcceleratedOptimizer):
164
  def __init__(self, optimizer):
165
  super().__init__(optimizer, device_placement=False, scaler=None)
166
  self.__has_overflow__ = hasattr(self.optimizer, "overflow")
167
-
168
  def zero_grad(self, set_to_none=None):
169
  pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
170
-
171
  def step(self):
172
  pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
173
-
174
  @property
175
  def step_was_skipped(self):
176
  """Whether or not the optimizer step was done, or skipped because of gradient overflow."""
177
  if self.__has_overflow__:
178
  return self.optimizer.overflow
179
  return False
180
-
181
-
182
  class DeepSpeedSchedulerWrapper(AcceleratedScheduler):
183
  """
184
  Internal wrapper around a deepspeed scheduler.
185
-
186
  Args:
187
  scheduler (`torch.optim.lr_scheduler.LambdaLR`):
188
  The scheduler to wrap.
@@ -190,16 +154,12 @@ class DeepSpeedSchedulerWrapper(AcceleratedScheduler):
190
  """
191
  def __init__(self, scheduler, optimizers):
192
  super().__init__(scheduler, optimizers)
193
-
194
  def step(self):
195
  pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
196
-
197
-
198
  class DummyOptim:
199
  """
200
  Dummy optimizer presents model parameters or param groups, this is primarily used to follow conventional training
201
  loop when optimizer config is specified in the deepspeed config file.
202
-
203
  Args:
204
  lr (float):
205
  Learning rate.
@@ -215,13 +175,10 @@ class DummyOptim:
215
  self.lr = lr
216
  self.weight_decay = weight_decay
217
  self.kwargs = kwargs
218
-
219
-
220
  class DummyScheduler:
221
  """
222
  Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training
223
  loop when scheduler config is specified in the deepspeed config file.
224
-
225
  Args:
226
  optimizer (`torch.optim.optimizer.Optimizer`):
227
  The optimizer to wrap.
 
1
  class HfDeepSpeedConfig:
2
  """
3
  This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage.
 
4
  A `weakref` of this object is stored in the module's globals to be able to access the config from areas where
5
  things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore
6
  it's important that this object remains alive while the program is still running.
 
7
  [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration
8
  with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic
9
  the DeepSpeed configuration is not modified in any way.
 
10
  Args:
11
  config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.
 
12
  """
13
  def __init__(self, config_file_or_dict):
14
  if isinstance(config_file_or_dict, dict):
 
26
  raise ValueError(
27
  f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}"
28
  )
 
29
  self.config = config
 
30
  self.set_stage_and_offload()
 
31
  def set_stage_and_offload(self):
32
  # zero stage - this is done as early as possible, before model is created, to allow
33
  # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
34
  # during ``zero.Init()`` which needs to know the dtype, and some other hparams.
35
  self._stage = self.get_value("zero_optimization.stage", -1)
 
36
  # offload
37
  self._offload = False
38
  if self.is_zero2() or self.is_zero3():
 
45
  )
46
  if len(offload_devices & offload_devices_valid) > 0:
47
  self._offload = True
 
48
  def find_config_node(self, ds_key_long):
49
  config = self.config
 
50
  # find the config node of interest if it exists
51
  nodes = ds_key_long.split(".")
52
  ds_key = nodes.pop()
 
54
  config = config.get(node)
55
  if config is None:
56
  return None, ds_key
 
57
  return config, ds_key
 
58
  def get_value(self, ds_key_long, default=None):
59
  """
60
  Returns the set value or `default` if no value is set
 
63
  if config is None:
64
  return default
65
  return config.get(ds_key, default)
 
66
  def del_config_sub_tree(self, ds_key_long, must_exist=False):
67
  """
68
  Deletes a sub-section of the config file if it's found.
 
69
  Unless `must_exist` is `True` the section doesn't have to exist.
70
  """
71
  config = self.config
 
72
  # find the config node of interest if it exists
73
  nodes = ds_key_long.split(".")
74
  for node in nodes:
 
79
  raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}")
80
  else:
81
  return
 
82
  # if found remove it
83
  if parent_config is not None:
84
  parent_config.pop(node)
 
85
  def is_true(self, ds_key_long):
86
  """
87
  Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very
88
  specific question of whether the value is set to `True` (and it's not set to `False`` or isn't set).
 
89
  """
90
  value = self.get_value(ds_key_long)
91
  return False if value is None else bool(value)
 
92
  def is_false(self, ds_key_long):
93
  """
94
  Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very
 
96
  """
97
  value = self.get_value(ds_key_long)
98
  return False if value is None else not bool(value)
 
99
  def is_zero2(self):
100
  return self._stage == 2
 
101
  def is_zero3(self):
102
  return self._stage == 3
 
103
  def is_offload(self):
104
  return self._offload
 
 
105
  class DeepSpeedEngineWrapper:
106
  """
107
  Internal wrapper for deepspeed.runtime.engine.DeepSpeedEngine. This is used to follow conventional training loop.
 
108
  Args:
109
  engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap
110
  """
111
  def __init__(self, engine):
112
  self.engine = engine
 
113
  def backward(self, loss, **kwargs):
114
  # runs backpropagation and handles mixed precision
115
  self.engine.backward(loss, **kwargs)
 
116
  # Deepspeed's `engine.step` performs the following operations:
117
  # - gradient accumulation check
118
  # - gradient clipping
 
124
  # and this plugin overrides the above calls with no-ops when Accelerate runs under
125
  # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
126
  # training loop that works transparently under many training regimes.
 
 
127
  class DeepSpeedOptimizerWrapper(AcceleratedOptimizer):
128
  """
129
  Internal wrapper around a deepspeed optimizer.
 
130
  Args:
131
  optimizer (`torch.optim.optimizer.Optimizer`):
132
  The optimizer to wrap.
 
134
  def __init__(self, optimizer):
135
  super().__init__(optimizer, device_placement=False, scaler=None)
136
  self.__has_overflow__ = hasattr(self.optimizer, "overflow")
 
137
  def zero_grad(self, set_to_none=None):
138
  pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
 
139
  def step(self):
140
  pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
 
141
  @property
142
  def step_was_skipped(self):
143
  """Whether or not the optimizer step was done, or skipped because of gradient overflow."""
144
  if self.__has_overflow__:
145
  return self.optimizer.overflow
146
  return False
 
 
147
  class DeepSpeedSchedulerWrapper(AcceleratedScheduler):
148
  """
149
  Internal wrapper around a deepspeed scheduler.
 
150
  Args:
151
  scheduler (`torch.optim.lr_scheduler.LambdaLR`):
152
  The scheduler to wrap.
 
154
  """
155
  def __init__(self, scheduler, optimizers):
156
  super().__init__(scheduler, optimizers)
 
157
  def step(self):
158
  pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
 
 
159
  class DummyOptim:
160
  """
161
  Dummy optimizer presents model parameters or param groups, this is primarily used to follow conventional training
162
  loop when optimizer config is specified in the deepspeed config file.
 
163
  Args:
164
  lr (float):
165
  Learning rate.
 
175
  self.lr = lr
176
  self.weight_decay = weight_decay
177
  self.kwargs = kwargs
 
 
178
  class DummyScheduler:
179
  """
180
  Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training
181
  loop when scheduler config is specified in the deepspeed config file.
 
182
  Args:
183
  optimizer (`torch.optim.optimizer.Optimizer`):
184
  The optimizer to wrap.
src/utils/environment.py CHANGED
@@ -1,7 +1,6 @@
1
  def str_to_bool(value) -> int:
2
  """
3
  Converts a string representation of truth to `True` (1) or `False` (0).
4
-
5
  True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
6
  """
7
  value = value.lower()
@@ -11,8 +10,6 @@ def str_to_bool(value) -> int:
11
  return 0
12
  else:
13
  raise ValueError(f"invalid truth value {value}")
14
-
15
-
16
  def get_int_from_env(env_keys, default):
17
  """Returns the first positive env value found in the `env_keys` list or the default."""
18
  for e in env_keys:
@@ -20,30 +17,21 @@ def get_int_from_env(env_keys, default):
20
  if val >= 0:
21
  return val
22
  return default
23
-
24
-
25
  def parse_flag_from_env(key, default=False):
26
  """Returns truthy value for `key` from the env if available else the default."""
27
  value = os.environ.get(key, str(default))
28
  return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int...
29
-
30
-
31
  def parse_choice_from_env(key, default="no"):
32
  value = os.environ.get(key, str(default))
33
  return value
34
-
35
-
36
  def are_libraries_initialized(*library_names: str) -> Dict[str, bool]:
37
  """
38
  Checks if any of `library_names` are imported in the environment. Will return results as a `key:bool` pair.
39
  """
40
  return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()]
41
-
42
-
43
  def get_gpu_info():
44
  """
45
  Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA.
46
-
47
  Largely based on the `gputil` library.
48
  """
49
  if platform.system() == "Windows":
@@ -64,13 +52,10 @@ def get_gpu_info():
64
  gpu_count = len(gpus)
65
  gpu_names = [gpu.split(",")[1].strip() for gpu in gpus]
66
  return gpu_names, gpu_count
67
-
68
-
69
  def check_cuda_p2p_ib_support():
70
  """
71
  Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after
72
  the 3090.
73
-
74
  Noteably uses `nvidia-smi` instead of torch to not initialize CUDA.
75
  """
76
  try:
@@ -86,12 +71,9 @@ def check_cuda_p2p_ib_support():
86
  except Exception:
87
  pass
88
  return True
89
-
90
-
91
  def check_fp8_capability():
92
  """
93
  Checks if all the current GPUs available support FP8.
94
-
95
  Notably must initialize `torch.cuda` to check.
96
  """
97
  cuda_device_capacity = torch.cuda.get_device_capability()
 
1
  def str_to_bool(value) -> int:
2
  """
3
  Converts a string representation of truth to `True` (1) or `False` (0).
 
4
  True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
5
  """
6
  value = value.lower()
 
10
  return 0
11
  else:
12
  raise ValueError(f"invalid truth value {value}")
 
 
13
  def get_int_from_env(env_keys, default):
14
  """Returns the first positive env value found in the `env_keys` list or the default."""
15
  for e in env_keys:
 
17
  if val >= 0:
18
  return val
19
  return default
 
 
20
  def parse_flag_from_env(key, default=False):
21
  """Returns truthy value for `key` from the env if available else the default."""
22
  value = os.environ.get(key, str(default))
23
  return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int...
 
 
24
  def parse_choice_from_env(key, default="no"):
25
  value = os.environ.get(key, str(default))
26
  return value
 
 
27
  def are_libraries_initialized(*library_names: str) -> Dict[str, bool]:
28
  """
29
  Checks if any of `library_names` are imported in the environment. Will return results as a `key:bool` pair.
30
  """
31
  return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()]
 
 
32
  def get_gpu_info():
33
  """
34
  Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA.
 
35
  Largely based on the `gputil` library.
36
  """
37
  if platform.system() == "Windows":
 
52
  gpu_count = len(gpus)
53
  gpu_names = [gpu.split(",")[1].strip() for gpu in gpus]
54
  return gpu_names, gpu_count
 
 
55
  def check_cuda_p2p_ib_support():
56
  """
57
  Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after
58
  the 3090.
 
59
  Noteably uses `nvidia-smi` instead of torch to not initialize CUDA.
60
  """
61
  try:
 
71
  except Exception:
72
  pass
73
  return True
 
 
74
  def check_fp8_capability():
75
  """
76
  Checks if all the current GPUs available support FP8.
 
77
  Notably must initialize `torch.cuda` to check.
78
  """
79
  cuda_device_capacity = torch.cuda.get_device_capability()
src/utils/fsdp_utils.py CHANGED
@@ -1,16 +1,12 @@
1
  logger = get_logger(__name__)
2
-
3
-
4
  def save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0):
5
  os.makedirs(output_dir, exist_ok=True)
6
-
7
  if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
8
  # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT
9
  # so, only enable it when num_processes>1
10
  is_multi_process = accelerator.num_processes > 1
11
  fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process
12
  fsdp_plugin.state_dict_config.rank0_only = is_multi_process
13
-
14
  with FSDP.state_dict_type(
15
  model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config
16
  ):
@@ -37,15 +33,12 @@ def save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0):
37
  os.makedirs(ckpt_dir, exist_ok=True)
38
  logger.info(f"Saving model to {ckpt_dir}")
39
  state_dict = {"model": state_dict}
40
-
41
  dist_cp.save_state_dict(
42
  state_dict=state_dict,
43
  storage_writer=dist_cp.FileSystemWriter(ckpt_dir),
44
  planner=DefaultSavePlanner(),
45
  )
46
  logger.info(f"Model saved to {ckpt_dir}")
47
-
48
-
49
  def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0):
50
  accelerator.wait_for_everyone()
51
  if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
@@ -97,8 +90,6 @@ def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0):
97
  logger.info(f"Model loaded from {ckpt_dir}")
98
  load_result = model.load_state_dict(state_dict)
99
  return load_result
100
-
101
-
102
  def save_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, output_dir, optimizer_index=0):
103
  os.makedirs(output_dir, exist_ok=True)
104
  with FSDP.state_dict_type(
@@ -124,8 +115,6 @@ def save_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, output_dir,
124
  planner=DefaultSavePlanner(),
125
  )
126
  logger.info(f"Optimizer state saved in {ckpt_dir}")
127
-
128
-
129
  def load_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, input_dir, optimizer_index=0):
130
  accelerator.wait_for_everyone()
131
  with FSDP.state_dict_type(
 
1
  logger = get_logger(__name__)
 
 
2
  def save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0):
3
  os.makedirs(output_dir, exist_ok=True)
 
4
  if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
5
  # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT
6
  # so, only enable it when num_processes>1
7
  is_multi_process = accelerator.num_processes > 1
8
  fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process
9
  fsdp_plugin.state_dict_config.rank0_only = is_multi_process
 
10
  with FSDP.state_dict_type(
11
  model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config
12
  ):
 
33
  os.makedirs(ckpt_dir, exist_ok=True)
34
  logger.info(f"Saving model to {ckpt_dir}")
35
  state_dict = {"model": state_dict}
 
36
  dist_cp.save_state_dict(
37
  state_dict=state_dict,
38
  storage_writer=dist_cp.FileSystemWriter(ckpt_dir),
39
  planner=DefaultSavePlanner(),
40
  )
41
  logger.info(f"Model saved to {ckpt_dir}")
 
 
42
  def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0):
43
  accelerator.wait_for_everyone()
44
  if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
 
90
  logger.info(f"Model loaded from {ckpt_dir}")
91
  load_result = model.load_state_dict(state_dict)
92
  return load_result
 
 
93
  def save_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, output_dir, optimizer_index=0):
94
  os.makedirs(output_dir, exist_ok=True)
95
  with FSDP.state_dict_type(
 
115
  planner=DefaultSavePlanner(),
116
  )
117
  logger.info(f"Optimizer state saved in {ckpt_dir}")
 
 
118
  def load_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, input_dir, optimizer_index=0):
119
  accelerator.wait_for_everyone()
120
  with FSDP.state_dict_type(
src/utils/imports.py CHANGED
@@ -1,7 +1,5 @@
1
  # Cache this result has it's a C FFI call which can be pretty time-consuming
2
  _torch_distributed_available = torch.distributed.is_available()
3
-
4
-
5
  def _is_package_available(pkg_name):
6
  # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version
7
  package_exists = importlib.util.find_spec(pkg_name) is not None
@@ -11,12 +9,8 @@ def _is_package_available(pkg_name):
11
  return True
12
  except importlib.metadata.PackageNotFoundError:
13
  return False
14
-
15
-
16
  def is_torch_distributed_available() -> bool:
17
  return _torch_distributed_available
18
-
19
-
20
  def is_ccl_available():
21
  try:
22
  pass
@@ -30,12 +24,8 @@ def is_ccl_available():
30
  importlib.util.find_spec("torch_ccl") is not None
31
  or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None
32
  )
33
-
34
-
35
  def get_ccl_version():
36
  return importlib.metadata.version("oneccl_bind_pt")
37
-
38
-
39
  def is_msamp_available():
40
  package_exists = importlib.util.find_spec("msamp") is not None
41
  if package_exists:
@@ -46,16 +36,10 @@ def is_msamp_available():
46
  except importlib.metadata.PackageNotFoundError:
47
  return False
48
  return False
49
-
50
-
51
  def is_transformer_engine_available():
52
  return _is_package_available("transformer_engine")
53
-
54
-
55
  def is_fp8_available():
56
  return is_msamp_available() or is_transformer_engine_available()
57
-
58
-
59
  def is_cuda_available():
60
  """
61
  Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda
@@ -67,8 +51,6 @@ def is_cuda_available():
67
  finally:
68
  os.environ.pop("PYTORCH_NVML_BASED_CUDA_CHECK", None)
69
  return available
70
-
71
-
72
  @lru_cache
73
  def is_tpu_available(check_device=True):
74
  "Checks if `torch_xla` is installed and potentially if a TPU is in the environment"
@@ -84,12 +66,8 @@ def is_tpu_available(check_device=True):
84
  except RuntimeError:
85
  return False
86
  return _tpu_available
87
-
88
-
89
  def is_deepspeed_available():
90
  return _is_package_available("deepspeed")
91
-
92
-
93
  def is_bf16_available(ignore_tpu=False):
94
  "Checks if bf16 is supported, optionally ignoring the TPU"
95
  if is_tpu_available():
@@ -97,28 +75,20 @@ def is_bf16_available(ignore_tpu=False):
97
  if is_cuda_available():
98
  return torch.cuda.is_bf16_supported()
99
  return True
100
-
101
-
102
  def is_4bit_bnb_available():
103
  package_exists = _is_package_available("bitsandbytes")
104
  if package_exists:
105
  bnb_version = version.parse(importlib.metadata.version("bitsandbytes"))
106
  return compare_versions(bnb_version, ">=", "0.39.0")
107
  return False
108
-
109
-
110
  def is_8bit_bnb_available():
111
  package_exists = _is_package_available("bitsandbytes")
112
  if package_exists:
113
  bnb_version = version.parse(importlib.metadata.version("bitsandbytes"))
114
  return compare_versions(bnb_version, ">=", "0.37.2")
115
  return False
116
-
117
-
118
  def is_bnb_available():
119
  return _is_package_available("bitsandbytes")
120
-
121
-
122
  def is_megatron_lm_available():
123
  if str_to_bool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1:
124
  package_exists = importlib.util.find_spec("megatron") is not None
@@ -129,44 +99,26 @@ def is_megatron_lm_available():
129
  except Exception as e:
130
  warnings.warn(f"Parse Megatron version failed. Exception:{e}")
131
  return False
132
-
133
-
134
  def is_transformers_available():
135
  return _is_package_available("transformers")
136
-
137
-
138
  def is_datasets_available():
139
  return _is_package_available("datasets")
140
-
141
-
142
  def is_timm_available():
143
  return _is_package_available("timm")
144
-
145
-
146
  def is_aim_available():
147
  package_exists = _is_package_available("aim")
148
  if package_exists:
149
  aim_version = version.parse(importlib.metadata.version("aim"))
150
  return compare_versions(aim_version, "<", "4.0.0")
151
  return False
152
-
153
-
154
  def is_tensorboard_available():
155
  return _is_package_available("tensorboard") or _is_package_available("tensorboardX")
156
-
157
-
158
  def is_wandb_available():
159
  return _is_package_available("wandb")
160
-
161
-
162
  def is_comet_ml_available():
163
  return _is_package_available("comet_ml")
164
-
165
-
166
  def is_boto3_available():
167
  return _is_package_available("boto3")
168
-
169
-
170
  def is_rich_available():
171
  if _is_package_available("rich"):
172
  if "ACCELERATE_DISABLE_RICH" in os.environ:
@@ -176,28 +128,17 @@ def is_rich_available():
176
  return not parse_flag_from_env("ACCELERATE_DISABLE_RICH", False)
177
  return parse_flag_from_env("ACCELERATE_ENABLE_RICH", False)
178
  return False
179
-
180
-
181
  def is_sagemaker_available():
182
  return _is_package_available("sagemaker")
183
-
184
-
185
  def is_tqdm_available():
186
  return _is_package_available("tqdm")
187
-
188
-
189
  def is_clearml_available():
190
  return _is_package_available("clearml")
191
-
192
-
193
  def is_pandas_available():
194
  return _is_package_available("pandas")
195
-
196
-
197
  def is_mlflow_available():
198
  if _is_package_available("mlflow"):
199
  return True
200
-
201
  if importlib.util.find_spec("mlflow") is not None:
202
  try:
203
  _ = importlib.metadata.metadata("mlflow-skinny")
@@ -205,16 +146,11 @@ def is_mlflow_available():
205
  except importlib.metadata.PackageNotFoundError:
206
  return False
207
  return False
208
-
209
-
210
  def is_mps_available():
211
  return is_torch_version(">=", "1.12") and torch.backends.mps.is_available() and torch.backends.mps.is_built()
212
-
213
-
214
  def is_ipex_available():
215
  def get_major_and_minor_from_version(full_version):
216
  return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor)
217
-
218
  _torch_version = importlib.metadata.version("torch")
219
  if importlib.util.find_spec("intel_extension_for_pytorch") is None:
220
  return False
@@ -232,17 +168,13 @@ def is_ipex_available():
232
  )
233
  return False
234
  return True
235
-
236
-
237
  @lru_cache
238
  def is_npu_available(check_device=False):
239
  "Checks if `torch_npu` is installed and potentially if a NPU is in the environment"
240
  if importlib.util.find_spec("torch") is None or importlib.util.find_spec("torch_npu") is None:
241
  return False
242
-
243
  import torch
244
  import torch_npu # noqa: F401
245
-
246
  if check_device:
247
  try:
248
  # Will raise a RuntimeError if no NPU is found
@@ -251,8 +183,6 @@ def is_npu_available(check_device=False):
251
  except RuntimeError:
252
  return False
253
  return hasattr(torch, "npu") and torch.npu.is_available()
254
-
255
-
256
  @lru_cache
257
  def is_xpu_available(check_device=False):
258
  "check if user disables it explicitly"
@@ -261,14 +191,11 @@ def is_xpu_available(check_device=False):
261
  "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment"
262
  if is_ipex_available():
263
  import torch
264
-
265
  if is_torch_version("<=", "1.12"):
266
  return False
267
  else:
268
  return False
269
-
270
  import intel_extension_for_pytorch # noqa: F401
271
-
272
  if check_device:
273
  try:
274
  # Will raise a RuntimeError if no XPU is found
@@ -277,7 +204,5 @@ def is_xpu_available(check_device=False):
277
  except RuntimeError:
278
  return False
279
  return hasattr(torch, "xpu") and torch.xpu.is_available()
280
-
281
-
282
  def is_dvclive_available():
283
  return _is_package_available("dvclive")
 
1
  # Cache this result has it's a C FFI call which can be pretty time-consuming
2
  _torch_distributed_available = torch.distributed.is_available()
 
 
3
  def _is_package_available(pkg_name):
4
  # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version
5
  package_exists = importlib.util.find_spec(pkg_name) is not None
 
9
  return True
10
  except importlib.metadata.PackageNotFoundError:
11
  return False
 
 
12
  def is_torch_distributed_available() -> bool:
13
  return _torch_distributed_available
 
 
14
  def is_ccl_available():
15
  try:
16
  pass
 
24
  importlib.util.find_spec("torch_ccl") is not None
25
  or importlib.util.find_spec("oneccl_bindings_for_pytorch") is not None
26
  )
 
 
27
  def get_ccl_version():
28
  return importlib.metadata.version("oneccl_bind_pt")
 
 
29
  def is_msamp_available():
30
  package_exists = importlib.util.find_spec("msamp") is not None
31
  if package_exists:
 
36
  except importlib.metadata.PackageNotFoundError:
37
  return False
38
  return False
 
 
39
  def is_transformer_engine_available():
40
  return _is_package_available("transformer_engine")
 
 
41
  def is_fp8_available():
42
  return is_msamp_available() or is_transformer_engine_available()
 
 
43
  def is_cuda_available():
44
  """
45
  Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda
 
51
  finally:
52
  os.environ.pop("PYTORCH_NVML_BASED_CUDA_CHECK", None)
53
  return available
 
 
54
  @lru_cache
55
  def is_tpu_available(check_device=True):
56
  "Checks if `torch_xla` is installed and potentially if a TPU is in the environment"
 
66
  except RuntimeError:
67
  return False
68
  return _tpu_available
 
 
69
  def is_deepspeed_available():
70
  return _is_package_available("deepspeed")
 
 
71
  def is_bf16_available(ignore_tpu=False):
72
  "Checks if bf16 is supported, optionally ignoring the TPU"
73
  if is_tpu_available():
 
75
  if is_cuda_available():
76
  return torch.cuda.is_bf16_supported()
77
  return True
 
 
78
  def is_4bit_bnb_available():
79
  package_exists = _is_package_available("bitsandbytes")
80
  if package_exists:
81
  bnb_version = version.parse(importlib.metadata.version("bitsandbytes"))
82
  return compare_versions(bnb_version, ">=", "0.39.0")
83
  return False
 
 
84
  def is_8bit_bnb_available():
85
  package_exists = _is_package_available("bitsandbytes")
86
  if package_exists:
87
  bnb_version = version.parse(importlib.metadata.version("bitsandbytes"))
88
  return compare_versions(bnb_version, ">=", "0.37.2")
89
  return False
 
 
90
  def is_bnb_available():
91
  return _is_package_available("bitsandbytes")
 
 
92
  def is_megatron_lm_available():
93
  if str_to_bool(os.environ.get("ACCELERATE_USE_MEGATRON_LM", "False")) == 1:
94
  package_exists = importlib.util.find_spec("megatron") is not None
 
99
  except Exception as e:
100
  warnings.warn(f"Parse Megatron version failed. Exception:{e}")
101
  return False
 
 
102
  def is_transformers_available():
103
  return _is_package_available("transformers")
 
 
104
  def is_datasets_available():
105
  return _is_package_available("datasets")
 
 
106
  def is_timm_available():
107
  return _is_package_available("timm")
 
 
108
  def is_aim_available():
109
  package_exists = _is_package_available("aim")
110
  if package_exists:
111
  aim_version = version.parse(importlib.metadata.version("aim"))
112
  return compare_versions(aim_version, "<", "4.0.0")
113
  return False
 
 
114
  def is_tensorboard_available():
115
  return _is_package_available("tensorboard") or _is_package_available("tensorboardX")
 
 
116
  def is_wandb_available():
117
  return _is_package_available("wandb")
 
 
118
  def is_comet_ml_available():
119
  return _is_package_available("comet_ml")
 
 
120
  def is_boto3_available():
121
  return _is_package_available("boto3")
 
 
122
  def is_rich_available():
123
  if _is_package_available("rich"):
124
  if "ACCELERATE_DISABLE_RICH" in os.environ:
 
128
  return not parse_flag_from_env("ACCELERATE_DISABLE_RICH", False)
129
  return parse_flag_from_env("ACCELERATE_ENABLE_RICH", False)
130
  return False
 
 
131
  def is_sagemaker_available():
132
  return _is_package_available("sagemaker")
 
 
133
  def is_tqdm_available():
134
  return _is_package_available("tqdm")
 
 
135
  def is_clearml_available():
136
  return _is_package_available("clearml")
 
 
137
  def is_pandas_available():
138
  return _is_package_available("pandas")
 
 
139
  def is_mlflow_available():
140
  if _is_package_available("mlflow"):
141
  return True
 
142
  if importlib.util.find_spec("mlflow") is not None:
143
  try:
144
  _ = importlib.metadata.metadata("mlflow-skinny")
 
146
  except importlib.metadata.PackageNotFoundError:
147
  return False
148
  return False
 
 
149
  def is_mps_available():
150
  return is_torch_version(">=", "1.12") and torch.backends.mps.is_available() and torch.backends.mps.is_built()
 
 
151
  def is_ipex_available():
152
  def get_major_and_minor_from_version(full_version):
153
  return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor)
 
154
  _torch_version = importlib.metadata.version("torch")
155
  if importlib.util.find_spec("intel_extension_for_pytorch") is None:
156
  return False
 
168
  )
169
  return False
170
  return True
 
 
171
  @lru_cache
172
  def is_npu_available(check_device=False):
173
  "Checks if `torch_npu` is installed and potentially if a NPU is in the environment"
174
  if importlib.util.find_spec("torch") is None or importlib.util.find_spec("torch_npu") is None:
175
  return False
 
176
  import torch
177
  import torch_npu # noqa: F401
 
178
  if check_device:
179
  try:
180
  # Will raise a RuntimeError if no NPU is found
 
183
  except RuntimeError:
184
  return False
185
  return hasattr(torch, "npu") and torch.npu.is_available()
 
 
186
  @lru_cache
187
  def is_xpu_available(check_device=False):
188
  "check if user disables it explicitly"
 
191
  "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment"
192
  if is_ipex_available():
193
  import torch
 
194
  if is_torch_version("<=", "1.12"):
195
  return False
196
  else:
197
  return False
 
198
  import intel_extension_for_pytorch # noqa: F401
 
199
  if check_device:
200
  try:
201
  # Will raise a RuntimeError if no XPU is found
 
204
  except RuntimeError:
205
  return False
206
  return hasattr(torch, "xpu") and torch.xpu.is_available()
 
 
207
  def is_dvclive_available():
208
  return _is_package_available("dvclive")
src/utils/launch.py CHANGED
@@ -7,8 +7,6 @@ def _filter_args(args, parser, default_args=[]):
7
  if key in vars(new_args).keys():
8
  setattr(new_args, key, value)
9
  return new_args
10
-
11
-
12
  def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]:
13
  """
14
  Prepares and returns the command list and an environment with the correct simple launcher environment variables.
@@ -22,7 +20,6 @@ def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str]
22
  cmd.append("-m")
23
  cmd.append(args.training_script)
24
  cmd.extend(args.training_script_args)
25
-
26
  current_env = os.environ.copy()
27
  current_env["ACCELERATE_USE_CPU"] = str(args.cpu or args.use_cpu)
28
  if args.debug:
@@ -40,16 +37,13 @@ def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str]
40
  elif args.num_processes > 1:
41
  current_env["MASTER_ADDR"] = args.main_process_ip if args.main_process_ip is not None else "127.0.0.1"
42
  current_env["MASTER_PORT"] = str(args.main_process_port) if args.main_process_port is not None else "29500"
43
-
44
  try:
45
  mixed_precision = PrecisionType(args.mixed_precision.lower())
46
  except ValueError:
47
  raise ValueError(
48
  f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
49
  )
50
-
51
  current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision)
52
-
53
  try:
54
  dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
55
  except ValueError:
@@ -60,14 +54,11 @@ def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str]
60
  current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode
61
  current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph)
62
  current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic)
63
-
64
  current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process)
65
  if is_ipex_available():
66
  current_env["ACCELERATE_USE_IPEX"] = str(args.ipex).lower()
67
  current_env["ACCELERATE_USE_XPU"] = str(args.use_xpu).lower()
68
  return cmd, current_env
69
-
70
-
71
  def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
72
  """
73
  Prepares and returns an environment with the correct multi-GPU environment variables.
@@ -89,10 +80,8 @@ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
89
  setattr(args, "nproc_per_node", str(num_processes))
90
  if main_process_port is not None:
91
  setattr(args, "master_port", str(main_process_port))
92
-
93
  if main_process_port is None:
94
  main_process_port = 29500
95
-
96
  # only need to check port availability in main process, in case we have to start multiple launchers on the same machine
97
  # for some reasons like splitting log files.
98
  need_port_check = num_machines <= 1 or int(args.machine_rank) == 0
@@ -102,14 +91,12 @@ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
102
  "Please specify a different port (such as using the `----main_process_port` flag or specifying a different `main_process_port` in your config file)"
103
  " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`."
104
  )
105
-
106
  if args.module and args.no_python:
107
  raise ValueError("--module and --no_python cannot be used together")
108
  elif args.module:
109
  setattr(args, "module", True)
110
  elif args.no_python:
111
  setattr(args, "no_python", True)
112
-
113
  current_env = os.environ.copy()
114
  if args.debug:
115
  current_env["ACCELERATE_DEBUG_MODE"] = "true"
@@ -126,9 +113,7 @@ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
126
  mixed_precision = PrecisionType(mixed_precision)
127
  except ValueError:
128
  raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.")
129
-
130
  current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision)
131
-
132
  try:
133
  dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
134
  except ValueError:
@@ -139,12 +124,10 @@ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
139
  current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode
140
  current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph)
141
  current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic)
142
-
143
  if args.use_fsdp:
144
  current_env["ACCELERATE_USE_FSDP"] = "true"
145
  if args.fsdp_cpu_ram_efficient_loading and not args.fsdp_sync_module_states:
146
  raise ValueError("When using `--fsdp_cpu_ram_efficient_loading` set `--fsdp_sync_module_states` to `True`")
147
-
148
  current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy)
149
  current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower()
150
  current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params)
@@ -167,7 +150,6 @@ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
167
  current_env["FSDP_USE_ORIG_PARAMS"] = str(args.fsdp_use_orig_params).lower()
168
  current_env["FSDP_CPU_RAM_EFFICIENT_LOADING"] = str(args.fsdp_cpu_ram_efficient_loading).lower()
169
  current_env["FSDP_SYNC_MODULE_STATES"] = str(args.fsdp_sync_module_states).lower()
170
-
171
  if args.use_megatron_lm:
172
  prefix = "MEGATRON_LM_"
173
  current_env["ACCELERATE_USE_MEGATRON_LM"] = "true"
@@ -182,11 +164,8 @@ def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
182
  current_env[prefix + "RECOMPUTE_ACTIVATIONS"] = str(args.megatron_lm_recompute_activations)
183
  if args.megatron_lm_use_distributed_optimizer is not None:
184
  current_env[prefix + "USE_DISTRIBUTED_OPTIMIZER"] = str(args.megatron_lm_use_distributed_optimizer)
185
-
186
  current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process)
187
  return current_env
188
-
189
-
190
  def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]:
191
  """
192
  Prepares and returns the command list and an environment with the correct DeepSpeed environment variables.
@@ -196,12 +175,10 @@ def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict
196
  main_process_ip = getattr(args, "main_process_ip")
197
  main_process_port = getattr(args, "main_process_port")
198
  cmd = None
199
-
200
  # make sure launcher is not None
201
  if args.deepspeed_multinode_launcher is None:
202
  # set to default pdsh
203
  setattr(args, "deepspeed_multinode_launcher", DEEPSPEED_MULTINODE_LAUNCHERS[0])
204
-
205
  if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
206
  cmd = ["deepspeed", "--no_local_rank"]
207
  cmd.extend(["--hostfile", str(args.deepspeed_hostfile), "--launcher", str(args.deepspeed_multinode_launcher)])
@@ -243,10 +220,8 @@ def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict
243
  setattr(args, "nproc_per_node", str(num_processes))
244
  if main_process_port is not None:
245
  setattr(args, "master_port", str(main_process_port))
246
-
247
  if main_process_port is None:
248
  main_process_port = 29500
249
-
250
  # only need to check port availability in main process, in case we have to start multiple launchers on the same machine
251
  # for some reasons like splitting log files.
252
  need_port_check = num_machines <= 1 or int(args.machine_rank) == 0
@@ -256,14 +231,12 @@ def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict
256
  "Please specify a different port (such as using the `----main_process_port` flag or specifying a different `main_process_port` in your config file)"
257
  " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`."
258
  )
259
-
260
  if args.module and args.no_python:
261
  raise ValueError("--module and --no_python cannot be used together")
262
  elif args.module:
263
  setattr(args, "module", True)
264
  elif args.no_python:
265
  setattr(args, "no_python", True)
266
-
267
  current_env = os.environ.copy()
268
  if args.debug:
269
  current_env["ACCELERATE_DEBUG_MODE"] = "true"
@@ -281,7 +254,6 @@ def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict
281
  raise ValueError(
282
  f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
283
  )
284
-
285
  current_env["PYTHONPATH"] = env_var_path_add("PYTHONPATH", os.path.abspath("."))
286
  current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision)
287
  current_env["ACCELERATE_CONFIG_DS_FIELDS"] = str(args.deepspeed_fields_from_accelerate_config).lower()
@@ -303,8 +275,6 @@ def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict
303
  if args.deepspeed_config_file is not None:
304
  current_env["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = str(args.deepspeed_config_file)
305
  return cmd, current_env
306
-
307
-
308
  def prepare_tpu(
309
  args: argparse.Namespace, current_env: Dict[str, str], pod: bool = False
310
  ) -> Tuple[argparse.Namespace, Dict[str, str]]:
@@ -323,23 +293,18 @@ def prepare_tpu(
323
  args.vm = args.tpu_vm
324
  args.tpu = args.tpu_name
325
  return args, current_env
326
-
327
-
328
  def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:
329
  if len(nargs) < 0:
330
  return {}
331
  # helper function to infer type for argsparser
332
-
333
  def _infer_type(s):
334
  try:
335
  s = float(s)
336
-
337
  if s // 1 == s:
338
  return int(s)
339
  return s
340
  except ValueError:
341
  return s
342
-
343
  parser = argparse.ArgumentParser()
344
  _, unknown = parser.parse_known_args(nargs)
345
  for index, argument in enumerate(unknown):
@@ -360,20 +325,16 @@ def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:
360
  parser.add_argument(argument, type=_infer_type)
361
  else:
362
  parser.add_argument(argument, action=action)
363
-
364
  return {
365
  key: (literal_eval(value) if value in ("True", "False") else value)
366
  for key, value in parser.parse_args(nargs).__dict__.items()
367
  }
368
-
369
-
370
  def prepare_sagemager_args_inputs(
371
  sagemaker_config: SageMakerConfig, args: argparse.Namespace
372
  ) -> Tuple[argparse.Namespace, Dict[str, Any]]:
373
  # configure environment
374
  print("Configuring Amazon SageMaker environment")
375
  os.environ["AWS_DEFAULT_REGION"] = sagemaker_config.region
376
-
377
  # configure credentials
378
  if sagemaker_config.profile is not None:
379
  os.environ["AWS_PROFILE"] = sagemaker_config.profile
@@ -384,7 +345,6 @@ def prepare_sagemager_args_inputs(
384
  raise EnvironmentError(
385
  "You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile"
386
  )
387
-
388
  # extract needed arguments
389
  source_dir = os.path.dirname(args.training_script)
390
  if not source_dir: # checks if string is empty
@@ -392,24 +352,20 @@ def prepare_sagemager_args_inputs(
392
  entry_point = os.path.basename(args.training_script)
393
  if not entry_point.endswith(".py"):
394
  raise ValueError(f'Your training script should be a python script and not "{entry_point}"')
395
-
396
  print("Converting Arguments to Hyperparameters")
397
  hyperparameters = _convert_nargs_to_dict(args.training_script_args)
398
-
399
  try:
400
  mixed_precision = PrecisionType(args.mixed_precision.lower())
401
  except ValueError:
402
  raise ValueError(
403
  f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
404
  )
405
-
406
  try:
407
  dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
408
  except ValueError:
409
  raise ValueError(
410
  f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
411
  )
412
-
413
  # Environment variables to be set for use during training job
414
  environment = {
415
  "ACCELERATE_USE_SAGEMAKER": "true",
@@ -424,7 +380,6 @@ def prepare_sagemager_args_inputs(
424
  distribution = None
425
  if sagemaker_config.distributed_type == SageMakerDistributedType.DATA_PARALLEL:
426
  distribution = {"smdistributed": {"dataparallel": {"enabled": True}}}
427
-
428
  # configure sagemaker inputs
429
  sagemaker_inputs = None
430
  if sagemaker_config.sagemaker_inputs_file is not None:
@@ -437,7 +392,6 @@ def prepare_sagemager_args_inputs(
437
  l = line.split("\t")
438
  sagemaker_inputs[l[0]] = l[1].strip()
439
  print(f"Loaded SageMaker Inputs: {sagemaker_inputs}")
440
-
441
  # configure sagemaker metrics
442
  sagemaker_metrics = None
443
  if sagemaker_config.sagemaker_metrics_file is not None:
@@ -454,7 +408,6 @@ def prepare_sagemager_args_inputs(
454
  }
455
  sagemaker_metrics.append(metric_dict)
456
  print(f"Loaded SageMaker Metrics: {sagemaker_metrics}")
457
-
458
  # configure session
459
  print("Creating Estimator")
460
  args = {
@@ -474,12 +427,9 @@ def prepare_sagemager_args_inputs(
474
  "environment": environment,
475
  "metric_definitions": sagemaker_metrics,
476
  }
477
-
478
  if sagemaker_config.additional_args is not None:
479
  args = merge_dicts(sagemaker_config.additional_args, args)
480
  return args, sagemaker_inputs
481
-
482
-
483
  def env_var_path_add(env_var_name, path_to_add):
484
  """
485
  Extends a path-based environment variable's value with a new path and returns the updated value. It's up to the
@@ -488,12 +438,9 @@ def env_var_path_add(env_var_name, path_to_add):
488
  paths = [p for p in os.environ.get(env_var_name, "").split(":") if len(p) > 0]
489
  paths.append(str(path_to_add))
490
  return ":".join(paths)
491
-
492
-
493
  class PrepareForLaunch:
494
  """
495
  Prepare a function that will launched in a distributed setup.
496
-
497
  Args:
498
  launcher (`Callable`):
499
  The function to launch.
@@ -506,7 +453,6 @@ class PrepareForLaunch:
506
  self.launcher = launcher
507
  self.distributed_type = DistributedType(distributed_type)
508
  self.debug = debug
509
-
510
  def __call__(self, index, *args):
511
  if self.debug:
512
  world_size = int(os.environ.get("WORLD_SIZE"))
@@ -528,6 +474,5 @@ class PrepareForLaunch:
528
  nproc = int(os.environ.get("NPROC", 1))
529
  node_rank = int(os.environ.get("NODE_RANK", 0))
530
  os.environ["RANK"] = str(nproc * node_rank + index)
531
-
532
  os.environ["FORK_LAUNCHED"] = str(1)
533
  self.launcher(*args)
 
7
  if key in vars(new_args).keys():
8
  setattr(new_args, key, value)
9
  return new_args
 
 
10
  def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]:
11
  """
12
  Prepares and returns the command list and an environment with the correct simple launcher environment variables.
 
20
  cmd.append("-m")
21
  cmd.append(args.training_script)
22
  cmd.extend(args.training_script_args)
 
23
  current_env = os.environ.copy()
24
  current_env["ACCELERATE_USE_CPU"] = str(args.cpu or args.use_cpu)
25
  if args.debug:
 
37
  elif args.num_processes > 1:
38
  current_env["MASTER_ADDR"] = args.main_process_ip if args.main_process_ip is not None else "127.0.0.1"
39
  current_env["MASTER_PORT"] = str(args.main_process_port) if args.main_process_port is not None else "29500"
 
40
  try:
41
  mixed_precision = PrecisionType(args.mixed_precision.lower())
42
  except ValueError:
43
  raise ValueError(
44
  f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
45
  )
 
46
  current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision)
 
47
  try:
48
  dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
49
  except ValueError:
 
54
  current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode
55
  current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph)
56
  current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic)
 
57
  current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process)
58
  if is_ipex_available():
59
  current_env["ACCELERATE_USE_IPEX"] = str(args.ipex).lower()
60
  current_env["ACCELERATE_USE_XPU"] = str(args.use_xpu).lower()
61
  return cmd, current_env
 
 
62
  def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]:
63
  """
64
  Prepares and returns an environment with the correct multi-GPU environment variables.
 
80
  setattr(args, "nproc_per_node", str(num_processes))
81
  if main_process_port is not None:
82
  setattr(args, "master_port", str(main_process_port))
 
83
  if main_process_port is None:
84
  main_process_port = 29500
 
85
  # only need to check port availability in main process, in case we have to start multiple launchers on the same machine
86
  # for some reasons like splitting log files.
87
  need_port_check = num_machines <= 1 or int(args.machine_rank) == 0
 
91
  "Please specify a different port (such as using the `----main_process_port` flag or specifying a different `main_process_port` in your config file)"
92
  " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`."
93
  )
 
94
  if args.module and args.no_python:
95
  raise ValueError("--module and --no_python cannot be used together")
96
  elif args.module:
97
  setattr(args, "module", True)
98
  elif args.no_python:
99
  setattr(args, "no_python", True)
 
100
  current_env = os.environ.copy()
101
  if args.debug:
102
  current_env["ACCELERATE_DEBUG_MODE"] = "true"
 
113
  mixed_precision = PrecisionType(mixed_precision)
114
  except ValueError:
115
  raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.")
 
116
  current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision)
 
117
  try:
118
  dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
119
  except ValueError:
 
124
  current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode
125
  current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph)
126
  current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic)
 
127
  if args.use_fsdp:
128
  current_env["ACCELERATE_USE_FSDP"] = "true"
129
  if args.fsdp_cpu_ram_efficient_loading and not args.fsdp_sync_module_states:
130
  raise ValueError("When using `--fsdp_cpu_ram_efficient_loading` set `--fsdp_sync_module_states` to `True`")
 
131
  current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy)
132
  current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower()
133
  current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params)
 
150
  current_env["FSDP_USE_ORIG_PARAMS"] = str(args.fsdp_use_orig_params).lower()
151
  current_env["FSDP_CPU_RAM_EFFICIENT_LOADING"] = str(args.fsdp_cpu_ram_efficient_loading).lower()
152
  current_env["FSDP_SYNC_MODULE_STATES"] = str(args.fsdp_sync_module_states).lower()
 
153
  if args.use_megatron_lm:
154
  prefix = "MEGATRON_LM_"
155
  current_env["ACCELERATE_USE_MEGATRON_LM"] = "true"
 
164
  current_env[prefix + "RECOMPUTE_ACTIVATIONS"] = str(args.megatron_lm_recompute_activations)
165
  if args.megatron_lm_use_distributed_optimizer is not None:
166
  current_env[prefix + "USE_DISTRIBUTED_OPTIMIZER"] = str(args.megatron_lm_use_distributed_optimizer)
 
167
  current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process)
168
  return current_env
 
 
169
  def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]:
170
  """
171
  Prepares and returns the command list and an environment with the correct DeepSpeed environment variables.
 
175
  main_process_ip = getattr(args, "main_process_ip")
176
  main_process_port = getattr(args, "main_process_port")
177
  cmd = None
 
178
  # make sure launcher is not None
179
  if args.deepspeed_multinode_launcher is None:
180
  # set to default pdsh
181
  setattr(args, "deepspeed_multinode_launcher", DEEPSPEED_MULTINODE_LAUNCHERS[0])
 
182
  if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
183
  cmd = ["deepspeed", "--no_local_rank"]
184
  cmd.extend(["--hostfile", str(args.deepspeed_hostfile), "--launcher", str(args.deepspeed_multinode_launcher)])
 
220
  setattr(args, "nproc_per_node", str(num_processes))
221
  if main_process_port is not None:
222
  setattr(args, "master_port", str(main_process_port))
 
223
  if main_process_port is None:
224
  main_process_port = 29500
 
225
  # only need to check port availability in main process, in case we have to start multiple launchers on the same machine
226
  # for some reasons like splitting log files.
227
  need_port_check = num_machines <= 1 or int(args.machine_rank) == 0
 
231
  "Please specify a different port (such as using the `----main_process_port` flag or specifying a different `main_process_port` in your config file)"
232
  " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`."
233
  )
 
234
  if args.module and args.no_python:
235
  raise ValueError("--module and --no_python cannot be used together")
236
  elif args.module:
237
  setattr(args, "module", True)
238
  elif args.no_python:
239
  setattr(args, "no_python", True)
 
240
  current_env = os.environ.copy()
241
  if args.debug:
242
  current_env["ACCELERATE_DEBUG_MODE"] = "true"
 
254
  raise ValueError(
255
  f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
256
  )
 
257
  current_env["PYTHONPATH"] = env_var_path_add("PYTHONPATH", os.path.abspath("."))
258
  current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision)
259
  current_env["ACCELERATE_CONFIG_DS_FIELDS"] = str(args.deepspeed_fields_from_accelerate_config).lower()
 
275
  if args.deepspeed_config_file is not None:
276
  current_env["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = str(args.deepspeed_config_file)
277
  return cmd, current_env
 
 
278
  def prepare_tpu(
279
  args: argparse.Namespace, current_env: Dict[str, str], pod: bool = False
280
  ) -> Tuple[argparse.Namespace, Dict[str, str]]:
 
293
  args.vm = args.tpu_vm
294
  args.tpu = args.tpu_name
295
  return args, current_env
 
 
296
  def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]:
297
  if len(nargs) < 0:
298
  return {}
299
  # helper function to infer type for argsparser
 
300
  def _infer_type(s):
301
  try:
302
  s = float(s)
 
303
  if s // 1 == s:
304
  return int(s)
305
  return s
306
  except ValueError:
307
  return s
 
308
  parser = argparse.ArgumentParser()
309
  _, unknown = parser.parse_known_args(nargs)
310
  for index, argument in enumerate(unknown):
 
325
  parser.add_argument(argument, type=_infer_type)
326
  else:
327
  parser.add_argument(argument, action=action)
 
328
  return {
329
  key: (literal_eval(value) if value in ("True", "False") else value)
330
  for key, value in parser.parse_args(nargs).__dict__.items()
331
  }
 
 
332
  def prepare_sagemager_args_inputs(
333
  sagemaker_config: SageMakerConfig, args: argparse.Namespace
334
  ) -> Tuple[argparse.Namespace, Dict[str, Any]]:
335
  # configure environment
336
  print("Configuring Amazon SageMaker environment")
337
  os.environ["AWS_DEFAULT_REGION"] = sagemaker_config.region
 
338
  # configure credentials
339
  if sagemaker_config.profile is not None:
340
  os.environ["AWS_PROFILE"] = sagemaker_config.profile
 
345
  raise EnvironmentError(
346
  "You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile"
347
  )
 
348
  # extract needed arguments
349
  source_dir = os.path.dirname(args.training_script)
350
  if not source_dir: # checks if string is empty
 
352
  entry_point = os.path.basename(args.training_script)
353
  if not entry_point.endswith(".py"):
354
  raise ValueError(f'Your training script should be a python script and not "{entry_point}"')
 
355
  print("Converting Arguments to Hyperparameters")
356
  hyperparameters = _convert_nargs_to_dict(args.training_script_args)
 
357
  try:
358
  mixed_precision = PrecisionType(args.mixed_precision.lower())
359
  except ValueError:
360
  raise ValueError(
361
  f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
362
  )
 
363
  try:
364
  dynamo_backend = DynamoBackend(args.dynamo_backend.upper())
365
  except ValueError:
366
  raise ValueError(
367
  f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}."
368
  )
 
369
  # Environment variables to be set for use during training job
370
  environment = {
371
  "ACCELERATE_USE_SAGEMAKER": "true",
 
380
  distribution = None
381
  if sagemaker_config.distributed_type == SageMakerDistributedType.DATA_PARALLEL:
382
  distribution = {"smdistributed": {"dataparallel": {"enabled": True}}}
 
383
  # configure sagemaker inputs
384
  sagemaker_inputs = None
385
  if sagemaker_config.sagemaker_inputs_file is not None:
 
392
  l = line.split("\t")
393
  sagemaker_inputs[l[0]] = l[1].strip()
394
  print(f"Loaded SageMaker Inputs: {sagemaker_inputs}")
 
395
  # configure sagemaker metrics
396
  sagemaker_metrics = None
397
  if sagemaker_config.sagemaker_metrics_file is not None:
 
408
  }
409
  sagemaker_metrics.append(metric_dict)
410
  print(f"Loaded SageMaker Metrics: {sagemaker_metrics}")
 
411
  # configure session
412
  print("Creating Estimator")
413
  args = {
 
427
  "environment": environment,
428
  "metric_definitions": sagemaker_metrics,
429
  }
 
430
  if sagemaker_config.additional_args is not None:
431
  args = merge_dicts(sagemaker_config.additional_args, args)
432
  return args, sagemaker_inputs
 
 
433
  def env_var_path_add(env_var_name, path_to_add):
434
  """
435
  Extends a path-based environment variable's value with a new path and returns the updated value. It's up to the
 
438
  paths = [p for p in os.environ.get(env_var_name, "").split(":") if len(p) > 0]
439
  paths.append(str(path_to_add))
440
  return ":".join(paths)
 
 
441
  class PrepareForLaunch:
442
  """
443
  Prepare a function that will launched in a distributed setup.
 
444
  Args:
445
  launcher (`Callable`):
446
  The function to launch.
 
453
  self.launcher = launcher
454
  self.distributed_type = DistributedType(distributed_type)
455
  self.debug = debug
 
456
  def __call__(self, index, *args):
457
  if self.debug:
458
  world_size = int(os.environ.get("WORLD_SIZE"))
 
474
  nproc = int(os.environ.get("NPROC", 1))
475
  node_rank = int(os.environ.get("NODE_RANK", 0))
476
  os.environ["RANK"] = str(nproc * node_rank + index)
 
477
  os.environ["FORK_LAUNCHED"] = str(1)
478
  self.launcher(*args)
src/utils/megatron_lm.py CHANGED
@@ -37,8 +37,6 @@ def model_provider_func(pre_process=True, post_process=True, add_encoder=True, a
37
  else:
38
  raise ValueError(f"Unsupported model type: {args.model_type_name}")
39
  return model
40
-
41
-
42
  def prepare_model(accelerator):
43
  accelerator.print("Preparing model")
44
  args = get_args()
@@ -58,13 +56,10 @@ def prepare_model(accelerator):
58
  args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2
59
  model = get_model(model_provider_func, model_type)
60
  return model
61
-
62
-
63
  # dataloader utilities
64
  class MegatronLMDummyDataLoader:
65
  """
66
  Dummy dataloader presents model parameters or param groups, this is primarily used to follow conventional training
67
-
68
  Args:
69
  **dataset_kwargs: Megatron data arguments.
70
  """
@@ -76,12 +71,10 @@ class MegatronLMDummyDataLoader:
76
  self.dataset_args = vars(data_args[0])
77
  self.dataset_args.update(dataset_kwargs)
78
  self.dataset_args["megatron_dataset_flag"] = True
79
-
80
  def set_megatron_data_args(self):
81
  args = get_args()
82
  for key, value in self.dataset_args.items():
83
  setattr(args, key, value)
84
-
85
  def get_train_valid_test_datasets_provider(self):
86
  def train_valid_test_datasets_provider(train_val_test_num_samples):
87
  """Build train, valid, and test datasets."""
@@ -127,15 +120,12 @@ class MegatronLMDummyDataLoader:
127
  from megatron.data.dataset_utils import build_train_valid_test_datasets
128
  train_ds, valid_ds, test_ds = build_train_valid_test_datasets(**dataset_args)
129
  return train_ds, valid_ds, test_ds
130
-
131
  return train_valid_test_datasets_provider
132
-
133
  def build_pretraining_data_loader(self, dataset, consumed_samples):
134
  if dataset is None:
135
  return None
136
  args = get_args()
137
  micro_batch_size = args.micro_batch_size * args.num_micro_batches
138
-
139
  # Megatron sampler
140
  if args.dataloader_type == "single":
141
  batch_sampler = MegatronPretrainingSampler(
@@ -157,24 +147,18 @@ class MegatronLMDummyDataLoader:
157
  )
158
  else:
159
  raise Exception("{} dataloader type is not supported.".format(args.dataloader_type))
160
-
161
  # Torch dataloader.
162
  return torch.utils.data.DataLoader(
163
  dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True
164
  )
165
-
166
  def build_train_valid_test_data_iterators(self):
167
  def cyclic_iter(iter):
168
  while True:
169
  for x in iter:
170
  yield x
171
-
172
  args = get_args()
173
-
174
  (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None)
175
-
176
  print_rank_0("> building train, validation, and test datasets ...")
177
-
178
  # Backward compatibility, assume fixed batch size.
179
  if args.iteration > 0 and args.consumed_train_samples == 0:
180
  assert args.train_samples is None, "only backward compatiblity support for iteration-based training"
@@ -184,7 +168,6 @@ class MegatronLMDummyDataLoader:
184
  args.consumed_valid_samples = (
185
  (args.iteration // args.eval_interval) * args.eval_iters * args.global_batch_size
186
  )
187
-
188
  # Data loader only on rank 0 of each model parallel group.
189
  if mpu.get_tensor_model_parallel_rank() == 0:
190
  # Number of train/valid/test samples.
@@ -203,16 +186,13 @@ class MegatronLMDummyDataLoader:
203
  print_rank_0(" train: {}".format(train_val_test_num_samples[0]))
204
  print_rank_0(" validation: {}".format(train_val_test_num_samples[1]))
205
  print_rank_0(" test: {}".format(train_val_test_num_samples[2]))
206
-
207
  # Build the datasets.
208
  train_valid_test_datasets_provider = self.get_train_valid_test_datasets_provider()
209
  train_ds, valid_ds, test_ds = train_valid_test_datasets_provider(train_val_test_num_samples)
210
-
211
  # Build dataloders.
212
  train_dataloader = self.build_pretraining_data_loader(train_ds, args.consumed_train_samples)
213
  valid_dataloader = self.build_pretraining_data_loader(valid_ds, args.consumed_valid_samples)
214
  test_dataloader = self.build_pretraining_data_loader(test_ds, 0)
215
-
216
  # Flags to know if we need to do training/validation/testing.
217
  do_train = train_dataloader is not None and args.train_iters > 0
218
  do_valid = valid_dataloader is not None and args.eval_iters > 0
@@ -221,7 +201,6 @@ class MegatronLMDummyDataLoader:
221
  flags = torch.cuda.LongTensor([int(do_train), int(do_valid), int(do_test)])
222
  else:
223
  flags = torch.cuda.LongTensor([0, 0, 0])
224
-
225
  # Broadcast num tokens.
226
  torch.distributed.broadcast(
227
  flags, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()
@@ -229,39 +208,31 @@ class MegatronLMDummyDataLoader:
229
  args.do_train = flags[0].item()
230
  args.do_valid = flags[1].item()
231
  args.do_test = flags[2].item()
232
-
233
  # Build iterators.
234
  dl_type = args.dataloader_type
235
  assert dl_type in ["single", "cyclic"]
236
-
237
  if train_dataloader is not None:
238
  train_data_iterator = (
239
  iter(train_dataloader) if dl_type == "single" else iter(cyclic_iter(train_dataloader))
240
  )
241
  else:
242
  train_data_iterator = None
243
-
244
  if valid_dataloader is not None:
245
  valid_data_iterator = (
246
  iter(valid_dataloader) if dl_type == "single" else iter(cyclic_iter(valid_dataloader))
247
  )
248
  else:
249
  valid_data_iterator = None
250
-
251
  if test_dataloader is not None:
252
  test_data_iterator = iter(test_dataloader) if dl_type == "single" else iter(cyclic_iter(test_dataloader))
253
  else:
254
  test_data_iterator = None
255
-
256
  return train_data_iterator, valid_data_iterator, test_data_iterator
257
-
258
-
259
  def prepare_data_loader(accelerator, dataloader):
260
  accelerator.print("Preparing dataloader")
261
  args = get_args()
262
  if not args.megatron_dataset_flag:
263
  from ..data_loader import _PYTORCH_DATALOADER_KWARGS, prepare_data_loader
264
-
265
  args = get_args()
266
  micro_batch_size = args.micro_batch_size * args.num_micro_batches
267
  kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS}
@@ -276,7 +247,6 @@ def prepare_data_loader(accelerator, dataloader):
276
  else:
277
  del kwargs["batch_sampler"]
278
  kwargs["batch_size"] = micro_batch_size
279
-
280
  dataloader = torch.utils.data.DataLoader(dataloader.dataset, **kwargs)
281
  return prepare_data_loader(
282
  dataloader,
@@ -303,38 +273,28 @@ def prepare_data_loader(accelerator, dataloader):
303
  test_data_iterator,
304
  ) = dataloader.build_train_valid_test_data_iterators()
305
  return train_data_iterator, valid_data_iterator, test_data_iterator
306
-
307
-
308
  # optimizer utilities
309
  class MegatronLMOptimizerWrapper(AcceleratedOptimizer):
310
  def __init__(self, optimizer):
311
  super().__init__(optimizer, device_placement=False, scaler=None)
312
-
313
  def zero_grad(self, set_to_none=None):
314
  pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
315
-
316
  def step(self):
317
  pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
318
-
319
  @property
320
  def step_was_skipped(self):
321
  """Whether or not the optimizer step was done, or skipped because of gradient overflow."""
322
  return self.optimizer.skipped_iter
323
-
324
-
325
  def prepare_optimizer(accelerator, model):
326
  accelerator.print("Preparing optimizer")
327
  args = get_args()
328
  optimizer = get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult)
329
  return optimizer
330
-
331
-
332
  # scheduler utilities
333
  class MegatronLMDummyScheduler:
334
  """
335
  Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training
336
  loop when scheduler config is specified in the deepspeed config file.
337
-
338
  Args:
339
  optimizer (`torch.optim.optimizer.Optimizer`):
340
  The optimizer to wrap.
@@ -350,43 +310,29 @@ class MegatronLMDummyScheduler:
350
  self.total_num_steps = total_num_steps
351
  self.warmup_num_steps = warmup_num_steps
352
  self.kwargs = kwargs
353
-
354
-
355
  class MegatronLMSchedulerWrapper(AcceleratedScheduler):
356
  def __init__(self, scheduler, optimizers):
357
  super().__init__(scheduler, optimizers)
358
-
359
  def step(self, *args, **kwargs):
360
  return # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
361
-
362
-
363
  def prepare_scheduler(accelerator, optimizer, scheduler):
364
  accelerator.print("Preparing scheduler")
365
  scheduler = get_optimizer_param_scheduler(optimizer)
366
  return scheduler
367
-
368
-
369
  class AbstractTrainStep(ABC):
370
  """Abstract class for batching, forward pass and loss handler."""
371
-
372
  def __init__(self, name):
373
  super().__init__()
374
  self.name = name
375
-
376
  def get_batch_func(self):
377
  pass
378
-
379
  def get_forward_step_func(self):
380
  pass
381
-
382
  def get_loss_func(self):
383
  pass
384
-
385
-
386
  class BertTrainStep(AbstractTrainStep):
387
  """
388
  Bert train step class.
389
-
390
  Args:
391
  args (`argparse.Namespace`): Megatron-LM arguments.
392
  """
@@ -399,22 +345,18 @@ class BertTrainStep(AbstractTrainStep):
399
  self.model_output_class = None
400
  else:
401
  self.model_output_class = SequenceClassifierOutput
402
-
403
  def get_batch_func(self, megatron_dataset_flag):
404
  def get_batch_megatron(data_iterator):
405
  """Build the batch."""
406
-
407
  # Items and their type.
408
  keys = ["text", "types", "labels", "is_random", "loss_mask", "padding_mask"]
409
  datatype = torch.int64
410
-
411
  # Broadcast data.
412
  if data_iterator is not None:
413
  data = next(data_iterator)
414
  else:
415
  data = None
416
  data_b = mpu.broadcast_data(keys, data, datatype)
417
-
418
  # Unpack.
419
  tokens = data_b["text"].long()
420
  types = data_b["types"].long()
@@ -422,14 +364,11 @@ class BertTrainStep(AbstractTrainStep):
422
  loss_mask = data_b["loss_mask"].float()
423
  lm_labels = data_b["labels"].long()
424
  padding_mask = data_b["padding_mask"].long()
425
-
426
  return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
427
-
428
  def get_batch_transformer(data_iterator):
429
  """Build the batch."""
430
  data = next(data_iterator)
431
  data = send_to_device(data, torch.cuda.current_device())
432
-
433
  # Unpack.
434
  tokens = data["input_ids"].long()
435
  padding_mask = data["attention_mask"].long()
@@ -447,34 +386,27 @@ class BertTrainStep(AbstractTrainStep):
447
  sentence_order = data["next_sentence_label"].long()
448
  else:
449
  sentence_order = None
450
-
451
  return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
452
-
453
  if megatron_dataset_flag:
454
  return get_batch_megatron
455
  else:
456
  return get_batch_transformer
457
-
458
  def get_loss_func(self, pretraining_flag, num_labels):
459
  def loss_func_pretrain(loss_mask, sentence_order, output_tensor):
460
  lm_loss_, sop_logits = output_tensor
461
-
462
  lm_loss_ = lm_loss_.float()
463
  loss_mask = loss_mask.float()
464
  lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
465
-
466
  if sop_logits is not None:
467
  sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), sentence_order.view(-1), ignore_index=-1)
468
  sop_loss = sop_loss.float()
469
  loss = lm_loss + sop_loss
470
  averaged_losses = average_losses_across_data_parallel_group([lm_loss, sop_loss])
471
  return loss, {"lm loss": averaged_losses[0], "sop loss": averaged_losses[1]}
472
-
473
  else:
474
  loss = lm_loss
475
  averaged_losses = average_losses_across_data_parallel_group([lm_loss])
476
  return loss, {"lm loss": averaged_losses[0]}
477
-
478
  def loss_func_finetune(labels, logits):
479
  if num_labels == 1:
480
  # We are doing regression
@@ -488,12 +420,10 @@ class BertTrainStep(AbstractTrainStep):
488
  loss = loss_fct(logits, labels)
489
  averaged_losses = average_losses_across_data_parallel_group([loss])
490
  return loss, {"loss": averaged_losses[0]}
491
-
492
  if pretraining_flag:
493
  return loss_func_pretrain
494
  else:
495
  return loss_func_finetune
496
-
497
  def get_forward_step_func(self, pretraining_flag, bert_binary_head):
498
  def forward_step(data_iterator, model):
499
  """Forward step."""
@@ -507,14 +437,10 @@ class BertTrainStep(AbstractTrainStep):
507
  else:
508
  logits = model(tokens, padding_mask, tokentype_ids=types)
509
  return logits, partial(self.loss_func, labels)
510
-
511
  return forward_step
512
-
513
-
514
  class GPTTrainStep(AbstractTrainStep):
515
  """
516
  GPT train step class.
517
-
518
  Args:
519
  args (`argparse.Namespace`): Megatron-LM arguments.
520
  """
@@ -534,38 +460,31 @@ class GPTTrainStep(AbstractTrainStep):
534
  self.model_output_class = None
535
  else:
536
  self.model_output_class = CausalLMOutputWithCrossAttentions
537
-
538
  def get_batch_func(self, megatron_dataset_flag):
539
  def get_batch_megatron(data_iterator):
540
  """Generate a batch"""
541
  # Items and their type.
542
  keys = ["text"]
543
  datatype = torch.int64
544
-
545
  # Broadcast data.
546
  if data_iterator is not None:
547
  data = next(data_iterator)
548
  else:
549
  data = None
550
  data_b = mpu.broadcast_data(keys, data, datatype)
551
-
552
  # Unpack.
553
  tokens_ = data_b["text"].long()
554
  labels = tokens_[:, 1:].contiguous()
555
  tokens = tokens_[:, :-1].contiguous()
556
-
557
  # Get the masks and postition ids.
558
  attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
559
  tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss
560
  )
561
-
562
  return tokens, labels, loss_mask, attention_mask, position_ids
563
-
564
  def get_batch_transformer(data_iterator):
565
  data = next(data_iterator)
566
  data = {"input_ids": data["input_ids"]}
567
  data = send_to_device(data, torch.cuda.current_device())
568
-
569
  tokens_ = data["input_ids"].long()
570
  padding = torch.zeros((tokens_.shape[0], 1), dtype=tokens_.dtype, device=tokens_.device) + self.eod_token
571
  tokens_ = torch.concat([tokens_, padding], dim=1)
@@ -576,15 +495,12 @@ class GPTTrainStep(AbstractTrainStep):
576
  tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, True
577
  )
578
  return tokens, labels, loss_mask, attention_mask, position_ids
579
-
580
  if megatron_dataset_flag:
581
  return get_batch_megatron
582
  else:
583
  return get_batch_transformer
584
-
585
  def get_loss_func(self):
586
  args = get_args()
587
-
588
  def loss_func(loss_mask, output_tensor):
589
  if args.return_logits:
590
  losses, logits = output_tensor
@@ -593,33 +509,24 @@ class GPTTrainStep(AbstractTrainStep):
593
  losses = losses.float()
594
  loss_mask = loss_mask.view(-1).float()
595
  loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
596
-
597
  # Reduce loss for logging.
598
  averaged_loss = average_losses_across_data_parallel_group([loss])
599
-
600
  output_dict = {"lm loss": averaged_loss[0]}
601
  if args.return_logits:
602
  output_dict.update({"logits": logits})
603
  return loss, output_dict
604
-
605
  return loss_func
606
-
607
  def get_forward_step_func(self):
608
  def forward_step(data_iterator, model):
609
  """Forward step."""
610
  # Get the batch.
611
  tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator)
612
  output_tensor = model(tokens, position_ids, attention_mask, labels=labels)
613
-
614
  return output_tensor, partial(self.loss_func, loss_mask)
615
-
616
  return forward_step
617
-
618
-
619
  class T5TrainStep(AbstractTrainStep):
620
  """
621
  T5 train step class.
622
-
623
  Args:
624
  args (`argparse.Namespace`): Megatron-LM arguments.
625
  """
@@ -632,7 +539,6 @@ class T5TrainStep(AbstractTrainStep):
632
  self.model_output_class = None
633
  else:
634
  self.model_output_class = Seq2SeqLMOutput
635
-
636
  @staticmethod
637
  def attn_mask_postprocess(attention_mask):
638
  # We create a 3D attention mask from a 2D tensor mask.
@@ -645,13 +551,11 @@ class T5TrainStep(AbstractTrainStep):
645
  # Convert attention mask to binary:
646
  extended_attention_mask = attention_mask_bss < 0.5
647
  return extended_attention_mask
648
-
649
  @staticmethod
650
  def get_decoder_mask(seq_length, device):
651
  attention_mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device))
652
  attention_mask = attention_mask < 0.5
653
  return attention_mask
654
-
655
  @staticmethod
656
  def get_enc_dec_mask(attention_mask, dec_seq_length, device):
657
  batch_size, _ = attention_mask.shape
@@ -663,38 +567,30 @@ class T5TrainStep(AbstractTrainStep):
663
  attention_mask_bss = attention_mask_bs1 * attention_mask_b1s
664
  extended_attention_mask = attention_mask_bss < 0.5
665
  return extended_attention_mask
666
-
667
  def get_batch_func(self, megatron_dataset_flag):
668
  def get_batch_megatron(data_iterator):
669
  """Build the batch."""
670
-
671
  keys = ["text_enc", "text_dec", "labels", "loss_mask", "enc_mask", "dec_mask", "enc_dec_mask"]
672
  datatype = torch.int64
673
-
674
  # Broadcast data.
675
  if data_iterator is not None:
676
  data = next(data_iterator)
677
  else:
678
  data = None
679
  data_b = mpu.broadcast_data(keys, data, datatype)
680
-
681
  # Unpack.
682
  tokens_enc = data_b["text_enc"].long()
683
  tokens_dec = data_b["text_dec"].long()
684
  labels = data_b["labels"].long()
685
  loss_mask = data_b["loss_mask"].float()
686
-
687
  enc_mask = data_b["enc_mask"] < 0.5
688
  dec_mask = data_b["dec_mask"] < 0.5
689
  enc_dec_mask = data_b["enc_dec_mask"] < 0.5
690
-
691
  return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask
692
-
693
  def get_batch_transformer(data_iterator):
694
  """Build the batch."""
695
  data = next(data_iterator)
696
  data = send_to_device(data, torch.cuda.current_device())
697
-
698
  tokens_enc = data["input_ids"].long()
699
  labels = data["labels"].long()
700
  loss_mask = (labels != -100).to(torch.float)
@@ -710,26 +606,19 @@ class T5TrainStep(AbstractTrainStep):
710
  enc_dec_mask = T5TrainStep.get_enc_dec_mask(
711
  data["attention_mask"].long(), tokens_dec.shape[1], tokens_dec.device
712
  )
713
-
714
  return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask
715
-
716
  if megatron_dataset_flag:
717
  return get_batch_megatron
718
  else:
719
  return get_batch_transformer
720
-
721
  def get_loss_func(self):
722
  def loss_func(loss_mask, output_tensor):
723
  lm_loss_ = output_tensor.float()
724
  lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
725
-
726
  loss = lm_loss
727
  averaged_losses = average_losses_across_data_parallel_group([lm_loss])
728
-
729
  return loss, {"lm loss": averaged_losses[0]}
730
-
731
  return loss_func
732
-
733
  def get_forward_step_func(self):
734
  def forward_step(data_iterator, model):
735
  """Forward step."""
@@ -741,20 +630,14 @@ class T5TrainStep(AbstractTrainStep):
741
  output_tensor = model(
742
  tokens_enc, tokens_dec, enc_mask, dec_mask, enc_dec_mask, tokentype_ids=None, lm_labels=lm_labels
743
  )
744
-
745
  return output_tensor, partial(self.loss_func, loss_mask)
746
-
747
  return forward_step
748
-
749
-
750
  # intialize megatron setup
751
  def initialize(accelerator, extra_args_provider=None, args_defaults={}):
752
  accelerator.print("Initializing Megatron-LM")
753
  assert torch.cuda.is_available(), "Megatron requires CUDA."
754
-
755
  # Parse arguments
756
  args = parse_args(extra_args_provider, ignore_unknown_args=True)
757
-
758
  # Set defaults
759
  for key, value in args_defaults.items():
760
  if getattr(args, key, None) is not None:
@@ -767,17 +650,13 @@ def initialize(accelerator, extra_args_provider=None, args_defaults={}):
767
  flush=True,
768
  )
769
  setattr(args, key, value)
770
-
771
  if args.use_checkpoint_args or args_defaults.get("use_checkpoint_args", False):
772
  assert args.load is not None, "--use-checkpoints-args requires --load argument"
773
  load_args_from_checkpoint(args)
774
-
775
  validate_args(args)
776
-
777
  # set global args, build tokenizer, and set adlr-autoresume,
778
  # tensorboard-writer, and timers.
779
  set_global_variables(args)
780
-
781
  # torch.distributed initialization
782
  def finish_mpu_init():
783
  args = get_args()
@@ -791,7 +670,6 @@ def initialize(accelerator, extra_args_provider=None, args_defaults={}):
791
  assert args.local_rank == device, "expected local-rank to be the same as rank % device-count."
792
  else:
793
  args.local_rank = device
794
-
795
  # Set the tensor model-parallel, pipeline model-parallel, and
796
  # data-parallel communicators.
797
  if mpu.model_parallel_is_initialized():
@@ -803,23 +681,17 @@ def initialize(accelerator, extra_args_provider=None, args_defaults={}):
803
  args.virtual_pipeline_model_parallel_size,
804
  args.pipeline_model_parallel_split_rank,
805
  )
806
-
807
  # Random seeds for reproducibility.
808
  if args.rank == 0:
809
  print("> setting random seeds to {} ...".format(args.seed))
810
  _set_random_seed(args.seed, args.data_parallel_random_init)
811
-
812
  args = get_args()
813
-
814
  # Megatron's MPU is the master. Complete initialization right away.
815
  finish_mpu_init()
816
-
817
  # Autoresume.
818
  _init_autoresume()
819
-
820
  # Compile dependencies.
821
  _compile_dependencies()
822
-
823
  # Set pytorch JIT layer fusion options and warmup JIT functions.
824
  set_jit_fusion_options()
825
  args = get_args()
@@ -829,12 +701,9 @@ def initialize(accelerator, extra_args_provider=None, args_defaults={}):
829
  else:
830
  args.bert_binary_head = False
831
  args.iteration = 0
832
-
833
-
834
  class MegatronEngine(torch.nn.Module):
835
  """
836
  Megatron-LM model wrapper
837
-
838
  Args:
839
  accelerator (:class:`~accelerate.Accelerator`): The accelerator object to use.
840
  model: Megatron-LM model
@@ -861,7 +730,6 @@ class MegatronEngine(torch.nn.Module):
861
  else:
862
  raise ValueError(f"Unsupported model type: {args.model_type_name}")
863
  self.optimizer.skipped_iter = False
864
-
865
  # Tracking loss.
866
  self.total_loss_dict = {}
867
  self.eval_total_loss_dict = {}
@@ -869,26 +737,21 @@ class MegatronEngine(torch.nn.Module):
869
  self.report_memory_flag = True
870
  if args.tensorboard_dir is not None:
871
  write_args_to_tensorboard()
872
-
873
  def train(self):
874
  for model_module in self.module:
875
  model_module.train()
876
  self.log_eval_results()
877
-
878
  def eval(self):
879
  for model_module in self.module:
880
  model_module.eval()
881
-
882
  def train_step(self, **batch_data):
883
  """
884
  Training step for Megatron-LM
885
-
886
  Args:
887
  batch_data (:obj:`dict`): The batch data to train on.
888
  """
889
  args = get_args()
890
  timers = get_timers()
891
-
892
  if len(batch_data) > 0:
893
  data_chunks = []
894
  if args.num_micro_batches > 1:
@@ -901,7 +764,6 @@ class MegatronEngine(torch.nn.Module):
901
  )
902
  else:
903
  data_chunks = [batch_data]
904
-
905
  if len(self.module) > 1:
906
  batch_data_iterator = (
907
  [iter(data_chunks) for _ in range(len(self.module))]
@@ -910,13 +772,11 @@ class MegatronEngine(torch.nn.Module):
910
  )
911
  else:
912
  batch_data_iterator = iter(data_chunks) if len(batch_data) > 0 else None
913
-
914
  # Set grad to zero.
915
  if args.DDP_impl == "local" and args.use_contiguous_buffers_in_local_ddp:
916
  for partition in self.module:
917
  partition.zero_grad_buffer()
918
  self.optimizer.zero_grad()
919
-
920
  # Forward pass.
921
  forward_backward_func = get_forward_backward_func()
922
  losses_reduced = forward_backward_func(
@@ -927,27 +787,22 @@ class MegatronEngine(torch.nn.Module):
927
  None,
928
  forward_only=False,
929
  )
930
-
931
  # Empty unused memory.
932
  if args.empty_unused_memory_level >= 1:
933
  torch.cuda.empty_cache()
934
-
935
  # Reduce gradients.
936
  timers("backward-reduce-model-grads").start()
937
  self.optimizer.reduce_model_grads(args, timers)
938
  timers("backward-reduce-model-grads").stop()
939
-
940
  # Update parameters.
941
  timers("optimizer").start()
942
  update_successful, grad_norm, num_zeros_in_grad = self.optimizer.step(args, timers)
943
  timers("optimizer").stop()
944
-
945
  # Gather params.
946
  if update_successful:
947
  timers("backward-gather-model-params").start()
948
  self.optimizer.gather_model_params(args, timers)
949
  timers("backward-gather-model-params").stop()
950
-
951
  # Update learning rate.
952
  if update_successful:
953
  if self.scheduler is not None:
@@ -956,17 +811,13 @@ class MegatronEngine(torch.nn.Module):
956
  skipped_iter = 0
957
  else:
958
  skipped_iter = 1
959
-
960
  self.optimizer.skipped_iter = not update_successful
961
-
962
  # Empty unused memory.
963
  if args.empty_unused_memory_level >= 2:
964
  torch.cuda.empty_cache()
965
-
966
  args.consumed_train_samples += (
967
  mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches()
968
  )
969
-
970
  if mpu.is_pipeline_last_stage(ignore_virtual=True):
971
  # Average loss across microbatches.
972
  loss_reduced = {}
@@ -978,11 +829,9 @@ class MegatronEngine(torch.nn.Module):
978
  loss_reduced[key] = torch.concat(losses_reduced_for_key)
979
  return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad
980
  return {}, skipped_iter, grad_norm, num_zeros_in_grad
981
-
982
  def eval_step(self, **batch_data):
983
  """
984
  Evaluation step for Megatron-LM
985
-
986
  Args:
987
  batch_data (:obj:`dict`): The batch data to evaluate on.
988
  """
@@ -995,7 +844,6 @@ class MegatronEngine(torch.nn.Module):
995
  )
996
  else:
997
  data_chunks = [batch_data]
998
-
999
  if len(self.module) > 1:
1000
  batch_data_iterator = [iter(data_chunks) for _ in range(len(self.module))]
1001
  else:
@@ -1012,11 +860,9 @@ class MegatronEngine(torch.nn.Module):
1012
  # Empty unused memory
1013
  if args.empty_unused_memory_level >= 1:
1014
  torch.cuda.empty_cache()
1015
-
1016
  args.consumed_valid_samples += (
1017
  mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches()
1018
  )
1019
-
1020
  if mpu.is_pipeline_last_stage(ignore_virtual=True):
1021
  # Average loss across microbatches.
1022
  loss_reduced = {}
@@ -1029,7 +875,6 @@ class MegatronEngine(torch.nn.Module):
1029
  return loss_reduced
1030
  else:
1031
  return {}
1032
-
1033
  def forward(self, **batch_data):
1034
  # During training, we use train_step()
1035
  # model(**batch_data) performs following operations by delegating it to `self.train_step`:
@@ -1077,12 +922,10 @@ class MegatronEngine(torch.nn.Module):
1077
  self.eval_total_loss_dict[key + "_num_iters"] = self.eval_total_loss_dict.get(
1078
  key + "_num_iters", torch.cuda.FloatTensor([0.0])
1079
  ) + torch.cuda.FloatTensor([1.0])
1080
-
1081
  loss = torch.tensor(0.0, device=args.local_rank)
1082
  for key in loss_dict:
1083
  if len(loss_dict[key].shape) == 0:
1084
  loss += loss_dict[key]
1085
-
1086
  logits = None
1087
  if "logits" in loss_dict:
1088
  logits = loss_dict["logits"]
@@ -1090,7 +933,6 @@ class MegatronEngine(torch.nn.Module):
1090
  if self.train_step_handler.model_output_class is not None:
1091
  return self.train_step_handler.model_output_class(loss=loss, logits=logits)
1092
  return loss
1093
-
1094
  def log_eval_results(self):
1095
  args = get_args()
1096
  if args.tensorboard_dir is None or self.iteration == 0:
@@ -1110,13 +952,11 @@ class MegatronEngine(torch.nn.Module):
1110
  writer.add_scalar(f"{key} validation", value.item(), self.iteration)
1111
  if args.pretraining_flag:
1112
  writer.add_scalar(f"{key} validation ppl", ppl, self.iteration)
1113
-
1114
  length = len(string) + 1
1115
  print_rank_last("-" * length)
1116
  print_rank_last(string)
1117
  print_rank_last("-" * length)
1118
  self.eval_total_loss_dict = {}
1119
-
1120
  def save_checkpoint(self, output_dir):
1121
  self.log_eval_results()
1122
  args = get_args()
@@ -1124,7 +964,6 @@ class MegatronEngine(torch.nn.Module):
1124
  torch.distributed.barrier()
1125
  save_checkpoint(self.iteration, self.module, self.optimizer, self.scheduler)
1126
  torch.distributed.barrier()
1127
-
1128
  def load_checkpoint(self, input_dir):
1129
  args = get_args()
1130
  args.load = input_dir
@@ -1136,7 +975,6 @@ class MegatronEngine(torch.nn.Module):
1136
  self.iteration = iteration
1137
  if args.fp16 and self.iteration == 0:
1138
  self.optimizer.reload_model_params()
1139
-
1140
  def megatron_generate(
1141
  self,
1142
  inputs,
@@ -1153,7 +991,6 @@ class MegatronEngine(torch.nn.Module):
1153
  """
1154
  Generate method for GPT2 model. This method is used for inference. Supports both greedy and beam search along
1155
  with sampling. Refer the Megatron-LM repo for more details
1156
-
1157
  Args:
1158
  inputs (torch.Tensor): input ids
1159
  attention_mask (torch.Tensor, optional): attention mask. Defaults to None.
@@ -1172,33 +1009,25 @@ class MegatronEngine(torch.nn.Module):
1172
  args = get_args()
1173
  if args.model_type_name != "gpt":
1174
  raise NotImplementedError("Generate method is not implemented for this model")
1175
-
1176
  if args.data_parallel_size > 1:
1177
  raise ValueError("Generate method requires data parallelism to be 1")
1178
-
1179
  if args.sequence_parallel:
1180
  raise ValueError("Generate method requires sequence parallelism to be False")
1181
-
1182
  if args.recompute_granularity is not None:
1183
  raise ValueError("Checkpoint activations cannot be set for inference")
1184
-
1185
  if args.vocab_file is None:
1186
  raise ValueError("Vocab file is required for inference")
1187
-
1188
  # Prepare inputs
1189
  if max_length is None and max_new_tokens is None:
1190
  raise ValueError("`max_length` or `max_new_tokens` are required for inference")
1191
-
1192
  if temperature is None:
1193
  temperature = 1.0
1194
  elif not (0.0 < temperature <= 100.0):
1195
  raise ValueError("temperature must be a positive number less than or equal to 100.0")
1196
-
1197
  if top_k is None:
1198
  top_k = 0
1199
  elif not (0 <= top_k <= 1000):
1200
  raise ValueError("top_k must be a positive number less than or equal to 1000")
1201
-
1202
  if top_p is None:
1203
  top_p = 0.0
1204
  elif top_p > 0.0 and top_k > 0.0:
@@ -1206,19 +1035,15 @@ class MegatronEngine(torch.nn.Module):
1206
  else:
1207
  if not (0.0 <= top_p <= 1.0):
1208
  raise ValueError("top_p must be less than or equal to 1.0")
1209
-
1210
  top_p_decay = kwargs.get("top_p_decay", 0.0)
1211
  if not (0.0 <= top_p_decay <= 1.0):
1212
  raise ValueError("top_p_decay must be less than or equal to 1.0")
1213
-
1214
  top_p_bound = kwargs.get("top_p_bound", 0.0)
1215
  if not (0.0 <= top_p_bound <= 1.0):
1216
  raise ValueError("top_p_bound must be less than or equal to 1.0")
1217
-
1218
  add_BOS = kwargs.get("add_BOS", False)
1219
  if not (isinstance(add_BOS, bool)):
1220
  raise ValueError("add_BOS must be a boolean")
1221
-
1222
  beam_width = num_beams
1223
  if beam_width is not None:
1224
  if not isinstance(beam_width, int):
@@ -1227,17 +1052,13 @@ class MegatronEngine(torch.nn.Module):
1227
  raise ValueError("beam_width must be greater than 0")
1228
  if inputs.shape[0] > 1:
1229
  return "When doing beam_search, batch size must be 1"
1230
-
1231
  tokenizer = get_tokenizer()
1232
-
1233
  stop_token = kwargs.get("stop_token", tokenizer.eod)
1234
  if stop_token is not None:
1235
  if not isinstance(stop_token, int):
1236
  raise ValueError("stop_token must be an integer")
1237
-
1238
  if length_penalty is None:
1239
  length_penalty = 1.0
1240
-
1241
  sizes_list = None
1242
  prompts_tokens_tensor = None
1243
  prompts_length_tensor = None
@@ -1247,12 +1068,10 @@ class MegatronEngine(torch.nn.Module):
1247
  prompts_length_tensor = torch.cuda.LongTensor([inputs.shape[1]] * inputs.shape[0])
1248
  else:
1249
  prompts_length_tensor = attention_mask.sum(axis=-1).cuda()
1250
-
1251
  if max_new_tokens is None:
1252
  max_new_tokens = max_length - inputs.shape[1]
1253
  if max_new_tokens <= 0:
1254
  raise ValueError("max_new_tokens must be greater than 0")
1255
-
1256
  if add_BOS:
1257
  max_length = max_new_tokens + inputs.shape[1] + 1
1258
  # making sure that `max_length` is a multiple of 4 to leverage fused kernels
@@ -1269,22 +1088,18 @@ class MegatronEngine(torch.nn.Module):
1269
  max_new_tokens = max_length - inputs.shape[1]
1270
  padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0])
1271
  prompts_tokens_tensor = torch.concat([inputs.cuda(), padding], axis=-1)
1272
-
1273
  # We need the sizes of these tensors for the boradcast
1274
  sizes_list = [
1275
  prompts_tokens_tensor.size(0), # Batch size
1276
  prompts_tokens_tensor.size(1),
1277
  ] # Sequence lenght
1278
-
1279
  # First, broadcast the sizes.
1280
  sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=0)
1281
-
1282
  # Now that we have the sizes, we can boradcast the tokens
1283
  # and length tensors.
1284
  sizes = sizes_tensor.tolist()
1285
  context_tokens_tensor = broadcast_tensor(sizes, torch.int64, tensor=prompts_tokens_tensor, rank=0)
1286
  context_length_tensor = broadcast_tensor(sizes[0], torch.int64, tensor=prompts_length_tensor, rank=0)
1287
-
1288
  # Run the inference
1289
  random_seed = kwargs.get("random_seed", 0)
1290
  torch.random.manual_seed(random_seed)
@@ -1313,27 +1128,20 @@ class MegatronEngine(torch.nn.Module):
1313
  use_eod_token_for_early_termination=True,
1314
  )
1315
  return tokens
1316
-
1317
-
1318
  # other utilities
1319
  def avg_losses_across_data_parallel_group(losses):
1320
  """
1321
  Average losses across data parallel group.
1322
-
1323
  Args:
1324
  losses (List[Tensor]): List of losses to average across data parallel group.
1325
  """
1326
  return average_losses_across_data_parallel_group(losses)
1327
-
1328
-
1329
  def gather_across_data_parallel_groups(tensor):
1330
  """
1331
  Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks.
1332
-
1333
  Args:
1334
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
1335
  The data to gather across data parallel ranks.
1336
-
1337
  """
1338
  def _gpu_gather_one(tensor):
1339
  if tensor.ndim == 0:
@@ -1344,5 +1152,4 @@ def gather_across_data_parallel_groups(tensor):
1344
  ]
1345
  torch.distributed.all_gather(output_tensors, tensor, group=mpu.get_data_parallel_group())
1346
  return torch.cat(output_tensors, dim=0)
1347
-
1348
  return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
 
37
  else:
38
  raise ValueError(f"Unsupported model type: {args.model_type_name}")
39
  return model
 
 
40
  def prepare_model(accelerator):
41
  accelerator.print("Preparing model")
42
  args = get_args()
 
56
  args.pipeline_model_parallel_split_rank = args.pipeline_model_parallel_size // 2
57
  model = get_model(model_provider_func, model_type)
58
  return model
 
 
59
  # dataloader utilities
60
  class MegatronLMDummyDataLoader:
61
  """
62
  Dummy dataloader presents model parameters or param groups, this is primarily used to follow conventional training
 
63
  Args:
64
  **dataset_kwargs: Megatron data arguments.
65
  """
 
71
  self.dataset_args = vars(data_args[0])
72
  self.dataset_args.update(dataset_kwargs)
73
  self.dataset_args["megatron_dataset_flag"] = True
 
74
  def set_megatron_data_args(self):
75
  args = get_args()
76
  for key, value in self.dataset_args.items():
77
  setattr(args, key, value)
 
78
  def get_train_valid_test_datasets_provider(self):
79
  def train_valid_test_datasets_provider(train_val_test_num_samples):
80
  """Build train, valid, and test datasets."""
 
120
  from megatron.data.dataset_utils import build_train_valid_test_datasets
121
  train_ds, valid_ds, test_ds = build_train_valid_test_datasets(**dataset_args)
122
  return train_ds, valid_ds, test_ds
 
123
  return train_valid_test_datasets_provider
 
124
  def build_pretraining_data_loader(self, dataset, consumed_samples):
125
  if dataset is None:
126
  return None
127
  args = get_args()
128
  micro_batch_size = args.micro_batch_size * args.num_micro_batches
 
129
  # Megatron sampler
130
  if args.dataloader_type == "single":
131
  batch_sampler = MegatronPretrainingSampler(
 
147
  )
148
  else:
149
  raise Exception("{} dataloader type is not supported.".format(args.dataloader_type))
 
150
  # Torch dataloader.
151
  return torch.utils.data.DataLoader(
152
  dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True
153
  )
 
154
  def build_train_valid_test_data_iterators(self):
155
  def cyclic_iter(iter):
156
  while True:
157
  for x in iter:
158
  yield x
 
159
  args = get_args()
 
160
  (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None)
 
161
  print_rank_0("> building train, validation, and test datasets ...")
 
162
  # Backward compatibility, assume fixed batch size.
163
  if args.iteration > 0 and args.consumed_train_samples == 0:
164
  assert args.train_samples is None, "only backward compatiblity support for iteration-based training"
 
168
  args.consumed_valid_samples = (
169
  (args.iteration // args.eval_interval) * args.eval_iters * args.global_batch_size
170
  )
 
171
  # Data loader only on rank 0 of each model parallel group.
172
  if mpu.get_tensor_model_parallel_rank() == 0:
173
  # Number of train/valid/test samples.
 
186
  print_rank_0(" train: {}".format(train_val_test_num_samples[0]))
187
  print_rank_0(" validation: {}".format(train_val_test_num_samples[1]))
188
  print_rank_0(" test: {}".format(train_val_test_num_samples[2]))
 
189
  # Build the datasets.
190
  train_valid_test_datasets_provider = self.get_train_valid_test_datasets_provider()
191
  train_ds, valid_ds, test_ds = train_valid_test_datasets_provider(train_val_test_num_samples)
 
192
  # Build dataloders.
193
  train_dataloader = self.build_pretraining_data_loader(train_ds, args.consumed_train_samples)
194
  valid_dataloader = self.build_pretraining_data_loader(valid_ds, args.consumed_valid_samples)
195
  test_dataloader = self.build_pretraining_data_loader(test_ds, 0)
 
196
  # Flags to know if we need to do training/validation/testing.
197
  do_train = train_dataloader is not None and args.train_iters > 0
198
  do_valid = valid_dataloader is not None and args.eval_iters > 0
 
201
  flags = torch.cuda.LongTensor([int(do_train), int(do_valid), int(do_test)])
202
  else:
203
  flags = torch.cuda.LongTensor([0, 0, 0])
 
204
  # Broadcast num tokens.
205
  torch.distributed.broadcast(
206
  flags, mpu.get_tensor_model_parallel_src_rank(), group=mpu.get_tensor_model_parallel_group()
 
208
  args.do_train = flags[0].item()
209
  args.do_valid = flags[1].item()
210
  args.do_test = flags[2].item()
 
211
  # Build iterators.
212
  dl_type = args.dataloader_type
213
  assert dl_type in ["single", "cyclic"]
 
214
  if train_dataloader is not None:
215
  train_data_iterator = (
216
  iter(train_dataloader) if dl_type == "single" else iter(cyclic_iter(train_dataloader))
217
  )
218
  else:
219
  train_data_iterator = None
 
220
  if valid_dataloader is not None:
221
  valid_data_iterator = (
222
  iter(valid_dataloader) if dl_type == "single" else iter(cyclic_iter(valid_dataloader))
223
  )
224
  else:
225
  valid_data_iterator = None
 
226
  if test_dataloader is not None:
227
  test_data_iterator = iter(test_dataloader) if dl_type == "single" else iter(cyclic_iter(test_dataloader))
228
  else:
229
  test_data_iterator = None
 
230
  return train_data_iterator, valid_data_iterator, test_data_iterator
 
 
231
  def prepare_data_loader(accelerator, dataloader):
232
  accelerator.print("Preparing dataloader")
233
  args = get_args()
234
  if not args.megatron_dataset_flag:
235
  from ..data_loader import _PYTORCH_DATALOADER_KWARGS, prepare_data_loader
 
236
  args = get_args()
237
  micro_batch_size = args.micro_batch_size * args.num_micro_batches
238
  kwargs = {k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k]) for k in _PYTORCH_DATALOADER_KWARGS}
 
247
  else:
248
  del kwargs["batch_sampler"]
249
  kwargs["batch_size"] = micro_batch_size
 
250
  dataloader = torch.utils.data.DataLoader(dataloader.dataset, **kwargs)
251
  return prepare_data_loader(
252
  dataloader,
 
273
  test_data_iterator,
274
  ) = dataloader.build_train_valid_test_data_iterators()
275
  return train_data_iterator, valid_data_iterator, test_data_iterator
 
 
276
  # optimizer utilities
277
  class MegatronLMOptimizerWrapper(AcceleratedOptimizer):
278
  def __init__(self, optimizer):
279
  super().__init__(optimizer, device_placement=False, scaler=None)
 
280
  def zero_grad(self, set_to_none=None):
281
  pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
 
282
  def step(self):
283
  pass # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
 
284
  @property
285
  def step_was_skipped(self):
286
  """Whether or not the optimizer step was done, or skipped because of gradient overflow."""
287
  return self.optimizer.skipped_iter
 
 
288
  def prepare_optimizer(accelerator, model):
289
  accelerator.print("Preparing optimizer")
290
  args = get_args()
291
  optimizer = get_megatron_optimizer(model, args.no_wd_decay_cond, args.scale_lr_cond, args.lr_mult)
292
  return optimizer
 
 
293
  # scheduler utilities
294
  class MegatronLMDummyScheduler:
295
  """
296
  Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training
297
  loop when scheduler config is specified in the deepspeed config file.
 
298
  Args:
299
  optimizer (`torch.optim.optimizer.Optimizer`):
300
  The optimizer to wrap.
 
310
  self.total_num_steps = total_num_steps
311
  self.warmup_num_steps = warmup_num_steps
312
  self.kwargs = kwargs
 
 
313
  class MegatronLMSchedulerWrapper(AcceleratedScheduler):
314
  def __init__(self, scheduler, optimizers):
315
  super().__init__(scheduler, optimizers)
 
316
  def step(self, *args, **kwargs):
317
  return # `model(**batch)` is doing that automatically. Therefore, it's implementation is not needed
 
 
318
  def prepare_scheduler(accelerator, optimizer, scheduler):
319
  accelerator.print("Preparing scheduler")
320
  scheduler = get_optimizer_param_scheduler(optimizer)
321
  return scheduler
 
 
322
  class AbstractTrainStep(ABC):
323
  """Abstract class for batching, forward pass and loss handler."""
 
324
  def __init__(self, name):
325
  super().__init__()
326
  self.name = name
 
327
  def get_batch_func(self):
328
  pass
 
329
  def get_forward_step_func(self):
330
  pass
 
331
  def get_loss_func(self):
332
  pass
 
 
333
  class BertTrainStep(AbstractTrainStep):
334
  """
335
  Bert train step class.
 
336
  Args:
337
  args (`argparse.Namespace`): Megatron-LM arguments.
338
  """
 
345
  self.model_output_class = None
346
  else:
347
  self.model_output_class = SequenceClassifierOutput
 
348
  def get_batch_func(self, megatron_dataset_flag):
349
  def get_batch_megatron(data_iterator):
350
  """Build the batch."""
 
351
  # Items and their type.
352
  keys = ["text", "types", "labels", "is_random", "loss_mask", "padding_mask"]
353
  datatype = torch.int64
 
354
  # Broadcast data.
355
  if data_iterator is not None:
356
  data = next(data_iterator)
357
  else:
358
  data = None
359
  data_b = mpu.broadcast_data(keys, data, datatype)
 
360
  # Unpack.
361
  tokens = data_b["text"].long()
362
  types = data_b["types"].long()
 
364
  loss_mask = data_b["loss_mask"].float()
365
  lm_labels = data_b["labels"].long()
366
  padding_mask = data_b["padding_mask"].long()
 
367
  return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
 
368
  def get_batch_transformer(data_iterator):
369
  """Build the batch."""
370
  data = next(data_iterator)
371
  data = send_to_device(data, torch.cuda.current_device())
 
372
  # Unpack.
373
  tokens = data["input_ids"].long()
374
  padding_mask = data["attention_mask"].long()
 
386
  sentence_order = data["next_sentence_label"].long()
387
  else:
388
  sentence_order = None
 
389
  return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask
 
390
  if megatron_dataset_flag:
391
  return get_batch_megatron
392
  else:
393
  return get_batch_transformer
 
394
  def get_loss_func(self, pretraining_flag, num_labels):
395
  def loss_func_pretrain(loss_mask, sentence_order, output_tensor):
396
  lm_loss_, sop_logits = output_tensor
 
397
  lm_loss_ = lm_loss_.float()
398
  loss_mask = loss_mask.float()
399
  lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
 
400
  if sop_logits is not None:
401
  sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), sentence_order.view(-1), ignore_index=-1)
402
  sop_loss = sop_loss.float()
403
  loss = lm_loss + sop_loss
404
  averaged_losses = average_losses_across_data_parallel_group([lm_loss, sop_loss])
405
  return loss, {"lm loss": averaged_losses[0], "sop loss": averaged_losses[1]}
 
406
  else:
407
  loss = lm_loss
408
  averaged_losses = average_losses_across_data_parallel_group([lm_loss])
409
  return loss, {"lm loss": averaged_losses[0]}
 
410
  def loss_func_finetune(labels, logits):
411
  if num_labels == 1:
412
  # We are doing regression
 
420
  loss = loss_fct(logits, labels)
421
  averaged_losses = average_losses_across_data_parallel_group([loss])
422
  return loss, {"loss": averaged_losses[0]}
 
423
  if pretraining_flag:
424
  return loss_func_pretrain
425
  else:
426
  return loss_func_finetune
 
427
  def get_forward_step_func(self, pretraining_flag, bert_binary_head):
428
  def forward_step(data_iterator, model):
429
  """Forward step."""
 
437
  else:
438
  logits = model(tokens, padding_mask, tokentype_ids=types)
439
  return logits, partial(self.loss_func, labels)
 
440
  return forward_step
 
 
441
  class GPTTrainStep(AbstractTrainStep):
442
  """
443
  GPT train step class.
 
444
  Args:
445
  args (`argparse.Namespace`): Megatron-LM arguments.
446
  """
 
460
  self.model_output_class = None
461
  else:
462
  self.model_output_class = CausalLMOutputWithCrossAttentions
 
463
  def get_batch_func(self, megatron_dataset_flag):
464
  def get_batch_megatron(data_iterator):
465
  """Generate a batch"""
466
  # Items and their type.
467
  keys = ["text"]
468
  datatype = torch.int64
 
469
  # Broadcast data.
470
  if data_iterator is not None:
471
  data = next(data_iterator)
472
  else:
473
  data = None
474
  data_b = mpu.broadcast_data(keys, data, datatype)
 
475
  # Unpack.
476
  tokens_ = data_b["text"].long()
477
  labels = tokens_[:, 1:].contiguous()
478
  tokens = tokens_[:, :-1].contiguous()
 
479
  # Get the masks and postition ids.
480
  attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
481
  tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss
482
  )
 
483
  return tokens, labels, loss_mask, attention_mask, position_ids
 
484
  def get_batch_transformer(data_iterator):
485
  data = next(data_iterator)
486
  data = {"input_ids": data["input_ids"]}
487
  data = send_to_device(data, torch.cuda.current_device())
 
488
  tokens_ = data["input_ids"].long()
489
  padding = torch.zeros((tokens_.shape[0], 1), dtype=tokens_.dtype, device=tokens_.device) + self.eod_token
490
  tokens_ = torch.concat([tokens_, padding], dim=1)
 
495
  tokens, self.eod_token, self.reset_position_ids, self.reset_attention_mask, True
496
  )
497
  return tokens, labels, loss_mask, attention_mask, position_ids
 
498
  if megatron_dataset_flag:
499
  return get_batch_megatron
500
  else:
501
  return get_batch_transformer
 
502
  def get_loss_func(self):
503
  args = get_args()
 
504
  def loss_func(loss_mask, output_tensor):
505
  if args.return_logits:
506
  losses, logits = output_tensor
 
509
  losses = losses.float()
510
  loss_mask = loss_mask.view(-1).float()
511
  loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
 
512
  # Reduce loss for logging.
513
  averaged_loss = average_losses_across_data_parallel_group([loss])
 
514
  output_dict = {"lm loss": averaged_loss[0]}
515
  if args.return_logits:
516
  output_dict.update({"logits": logits})
517
  return loss, output_dict
 
518
  return loss_func
 
519
  def get_forward_step_func(self):
520
  def forward_step(data_iterator, model):
521
  """Forward step."""
522
  # Get the batch.
523
  tokens, labels, loss_mask, attention_mask, position_ids = self.get_batch(data_iterator)
524
  output_tensor = model(tokens, position_ids, attention_mask, labels=labels)
 
525
  return output_tensor, partial(self.loss_func, loss_mask)
 
526
  return forward_step
 
 
527
  class T5TrainStep(AbstractTrainStep):
528
  """
529
  T5 train step class.
 
530
  Args:
531
  args (`argparse.Namespace`): Megatron-LM arguments.
532
  """
 
539
  self.model_output_class = None
540
  else:
541
  self.model_output_class = Seq2SeqLMOutput
 
542
  @staticmethod
543
  def attn_mask_postprocess(attention_mask):
544
  # We create a 3D attention mask from a 2D tensor mask.
 
551
  # Convert attention mask to binary:
552
  extended_attention_mask = attention_mask_bss < 0.5
553
  return extended_attention_mask
 
554
  @staticmethod
555
  def get_decoder_mask(seq_length, device):
556
  attention_mask = torch.tril(torch.ones((1, seq_length, seq_length), device=device))
557
  attention_mask = attention_mask < 0.5
558
  return attention_mask
 
559
  @staticmethod
560
  def get_enc_dec_mask(attention_mask, dec_seq_length, device):
561
  batch_size, _ = attention_mask.shape
 
567
  attention_mask_bss = attention_mask_bs1 * attention_mask_b1s
568
  extended_attention_mask = attention_mask_bss < 0.5
569
  return extended_attention_mask
 
570
  def get_batch_func(self, megatron_dataset_flag):
571
  def get_batch_megatron(data_iterator):
572
  """Build the batch."""
 
573
  keys = ["text_enc", "text_dec", "labels", "loss_mask", "enc_mask", "dec_mask", "enc_dec_mask"]
574
  datatype = torch.int64
 
575
  # Broadcast data.
576
  if data_iterator is not None:
577
  data = next(data_iterator)
578
  else:
579
  data = None
580
  data_b = mpu.broadcast_data(keys, data, datatype)
 
581
  # Unpack.
582
  tokens_enc = data_b["text_enc"].long()
583
  tokens_dec = data_b["text_dec"].long()
584
  labels = data_b["labels"].long()
585
  loss_mask = data_b["loss_mask"].float()
 
586
  enc_mask = data_b["enc_mask"] < 0.5
587
  dec_mask = data_b["dec_mask"] < 0.5
588
  enc_dec_mask = data_b["enc_dec_mask"] < 0.5
 
589
  return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask
 
590
  def get_batch_transformer(data_iterator):
591
  """Build the batch."""
592
  data = next(data_iterator)
593
  data = send_to_device(data, torch.cuda.current_device())
 
594
  tokens_enc = data["input_ids"].long()
595
  labels = data["labels"].long()
596
  loss_mask = (labels != -100).to(torch.float)
 
606
  enc_dec_mask = T5TrainStep.get_enc_dec_mask(
607
  data["attention_mask"].long(), tokens_dec.shape[1], tokens_dec.device
608
  )
 
609
  return tokens_enc, tokens_dec, loss_mask, labels, enc_mask, dec_mask, enc_dec_mask
 
610
  if megatron_dataset_flag:
611
  return get_batch_megatron
612
  else:
613
  return get_batch_transformer
 
614
  def get_loss_func(self):
615
  def loss_func(loss_mask, output_tensor):
616
  lm_loss_ = output_tensor.float()
617
  lm_loss = torch.sum(lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
 
618
  loss = lm_loss
619
  averaged_losses = average_losses_across_data_parallel_group([lm_loss])
 
620
  return loss, {"lm loss": averaged_losses[0]}
 
621
  return loss_func
 
622
  def get_forward_step_func(self):
623
  def forward_step(data_iterator, model):
624
  """Forward step."""
 
630
  output_tensor = model(
631
  tokens_enc, tokens_dec, enc_mask, dec_mask, enc_dec_mask, tokentype_ids=None, lm_labels=lm_labels
632
  )
 
633
  return output_tensor, partial(self.loss_func, loss_mask)
 
634
  return forward_step
 
 
635
  # intialize megatron setup
636
  def initialize(accelerator, extra_args_provider=None, args_defaults={}):
637
  accelerator.print("Initializing Megatron-LM")
638
  assert torch.cuda.is_available(), "Megatron requires CUDA."
 
639
  # Parse arguments
640
  args = parse_args(extra_args_provider, ignore_unknown_args=True)
 
641
  # Set defaults
642
  for key, value in args_defaults.items():
643
  if getattr(args, key, None) is not None:
 
650
  flush=True,
651
  )
652
  setattr(args, key, value)
 
653
  if args.use_checkpoint_args or args_defaults.get("use_checkpoint_args", False):
654
  assert args.load is not None, "--use-checkpoints-args requires --load argument"
655
  load_args_from_checkpoint(args)
 
656
  validate_args(args)
 
657
  # set global args, build tokenizer, and set adlr-autoresume,
658
  # tensorboard-writer, and timers.
659
  set_global_variables(args)
 
660
  # torch.distributed initialization
661
  def finish_mpu_init():
662
  args = get_args()
 
670
  assert args.local_rank == device, "expected local-rank to be the same as rank % device-count."
671
  else:
672
  args.local_rank = device
 
673
  # Set the tensor model-parallel, pipeline model-parallel, and
674
  # data-parallel communicators.
675
  if mpu.model_parallel_is_initialized():
 
681
  args.virtual_pipeline_model_parallel_size,
682
  args.pipeline_model_parallel_split_rank,
683
  )
 
684
  # Random seeds for reproducibility.
685
  if args.rank == 0:
686
  print("> setting random seeds to {} ...".format(args.seed))
687
  _set_random_seed(args.seed, args.data_parallel_random_init)
 
688
  args = get_args()
 
689
  # Megatron's MPU is the master. Complete initialization right away.
690
  finish_mpu_init()
 
691
  # Autoresume.
692
  _init_autoresume()
 
693
  # Compile dependencies.
694
  _compile_dependencies()
 
695
  # Set pytorch JIT layer fusion options and warmup JIT functions.
696
  set_jit_fusion_options()
697
  args = get_args()
 
701
  else:
702
  args.bert_binary_head = False
703
  args.iteration = 0
 
 
704
  class MegatronEngine(torch.nn.Module):
705
  """
706
  Megatron-LM model wrapper
 
707
  Args:
708
  accelerator (:class:`~accelerate.Accelerator`): The accelerator object to use.
709
  model: Megatron-LM model
 
730
  else:
731
  raise ValueError(f"Unsupported model type: {args.model_type_name}")
732
  self.optimizer.skipped_iter = False
 
733
  # Tracking loss.
734
  self.total_loss_dict = {}
735
  self.eval_total_loss_dict = {}
 
737
  self.report_memory_flag = True
738
  if args.tensorboard_dir is not None:
739
  write_args_to_tensorboard()
 
740
  def train(self):
741
  for model_module in self.module:
742
  model_module.train()
743
  self.log_eval_results()
 
744
  def eval(self):
745
  for model_module in self.module:
746
  model_module.eval()
 
747
  def train_step(self, **batch_data):
748
  """
749
  Training step for Megatron-LM
 
750
  Args:
751
  batch_data (:obj:`dict`): The batch data to train on.
752
  """
753
  args = get_args()
754
  timers = get_timers()
 
755
  if len(batch_data) > 0:
756
  data_chunks = []
757
  if args.num_micro_batches > 1:
 
764
  )
765
  else:
766
  data_chunks = [batch_data]
 
767
  if len(self.module) > 1:
768
  batch_data_iterator = (
769
  [iter(data_chunks) for _ in range(len(self.module))]
 
772
  )
773
  else:
774
  batch_data_iterator = iter(data_chunks) if len(batch_data) > 0 else None
 
775
  # Set grad to zero.
776
  if args.DDP_impl == "local" and args.use_contiguous_buffers_in_local_ddp:
777
  for partition in self.module:
778
  partition.zero_grad_buffer()
779
  self.optimizer.zero_grad()
 
780
  # Forward pass.
781
  forward_backward_func = get_forward_backward_func()
782
  losses_reduced = forward_backward_func(
 
787
  None,
788
  forward_only=False,
789
  )
 
790
  # Empty unused memory.
791
  if args.empty_unused_memory_level >= 1:
792
  torch.cuda.empty_cache()
 
793
  # Reduce gradients.
794
  timers("backward-reduce-model-grads").start()
795
  self.optimizer.reduce_model_grads(args, timers)
796
  timers("backward-reduce-model-grads").stop()
 
797
  # Update parameters.
798
  timers("optimizer").start()
799
  update_successful, grad_norm, num_zeros_in_grad = self.optimizer.step(args, timers)
800
  timers("optimizer").stop()
 
801
  # Gather params.
802
  if update_successful:
803
  timers("backward-gather-model-params").start()
804
  self.optimizer.gather_model_params(args, timers)
805
  timers("backward-gather-model-params").stop()
 
806
  # Update learning rate.
807
  if update_successful:
808
  if self.scheduler is not None:
 
811
  skipped_iter = 0
812
  else:
813
  skipped_iter = 1
 
814
  self.optimizer.skipped_iter = not update_successful
 
815
  # Empty unused memory.
816
  if args.empty_unused_memory_level >= 2:
817
  torch.cuda.empty_cache()
 
818
  args.consumed_train_samples += (
819
  mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches()
820
  )
 
821
  if mpu.is_pipeline_last_stage(ignore_virtual=True):
822
  # Average loss across microbatches.
823
  loss_reduced = {}
 
829
  loss_reduced[key] = torch.concat(losses_reduced_for_key)
830
  return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad
831
  return {}, skipped_iter, grad_norm, num_zeros_in_grad
 
832
  def eval_step(self, **batch_data):
833
  """
834
  Evaluation step for Megatron-LM
 
835
  Args:
836
  batch_data (:obj:`dict`): The batch data to evaluate on.
837
  """
 
844
  )
845
  else:
846
  data_chunks = [batch_data]
 
847
  if len(self.module) > 1:
848
  batch_data_iterator = [iter(data_chunks) for _ in range(len(self.module))]
849
  else:
 
860
  # Empty unused memory
861
  if args.empty_unused_memory_level >= 1:
862
  torch.cuda.empty_cache()
 
863
  args.consumed_valid_samples += (
864
  mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches()
865
  )
 
866
  if mpu.is_pipeline_last_stage(ignore_virtual=True):
867
  # Average loss across microbatches.
868
  loss_reduced = {}
 
875
  return loss_reduced
876
  else:
877
  return {}
 
878
  def forward(self, **batch_data):
879
  # During training, we use train_step()
880
  # model(**batch_data) performs following operations by delegating it to `self.train_step`:
 
922
  self.eval_total_loss_dict[key + "_num_iters"] = self.eval_total_loss_dict.get(
923
  key + "_num_iters", torch.cuda.FloatTensor([0.0])
924
  ) + torch.cuda.FloatTensor([1.0])
 
925
  loss = torch.tensor(0.0, device=args.local_rank)
926
  for key in loss_dict:
927
  if len(loss_dict[key].shape) == 0:
928
  loss += loss_dict[key]
 
929
  logits = None
930
  if "logits" in loss_dict:
931
  logits = loss_dict["logits"]
 
933
  if self.train_step_handler.model_output_class is not None:
934
  return self.train_step_handler.model_output_class(loss=loss, logits=logits)
935
  return loss
 
936
  def log_eval_results(self):
937
  args = get_args()
938
  if args.tensorboard_dir is None or self.iteration == 0:
 
952
  writer.add_scalar(f"{key} validation", value.item(), self.iteration)
953
  if args.pretraining_flag:
954
  writer.add_scalar(f"{key} validation ppl", ppl, self.iteration)
 
955
  length = len(string) + 1
956
  print_rank_last("-" * length)
957
  print_rank_last(string)
958
  print_rank_last("-" * length)
959
  self.eval_total_loss_dict = {}
 
960
  def save_checkpoint(self, output_dir):
961
  self.log_eval_results()
962
  args = get_args()
 
964
  torch.distributed.barrier()
965
  save_checkpoint(self.iteration, self.module, self.optimizer, self.scheduler)
966
  torch.distributed.barrier()
 
967
  def load_checkpoint(self, input_dir):
968
  args = get_args()
969
  args.load = input_dir
 
975
  self.iteration = iteration
976
  if args.fp16 and self.iteration == 0:
977
  self.optimizer.reload_model_params()
 
978
  def megatron_generate(
979
  self,
980
  inputs,
 
991
  """
992
  Generate method for GPT2 model. This method is used for inference. Supports both greedy and beam search along
993
  with sampling. Refer the Megatron-LM repo for more details
 
994
  Args:
995
  inputs (torch.Tensor): input ids
996
  attention_mask (torch.Tensor, optional): attention mask. Defaults to None.
 
1009
  args = get_args()
1010
  if args.model_type_name != "gpt":
1011
  raise NotImplementedError("Generate method is not implemented for this model")
 
1012
  if args.data_parallel_size > 1:
1013
  raise ValueError("Generate method requires data parallelism to be 1")
 
1014
  if args.sequence_parallel:
1015
  raise ValueError("Generate method requires sequence parallelism to be False")
 
1016
  if args.recompute_granularity is not None:
1017
  raise ValueError("Checkpoint activations cannot be set for inference")
 
1018
  if args.vocab_file is None:
1019
  raise ValueError("Vocab file is required for inference")
 
1020
  # Prepare inputs
1021
  if max_length is None and max_new_tokens is None:
1022
  raise ValueError("`max_length` or `max_new_tokens` are required for inference")
 
1023
  if temperature is None:
1024
  temperature = 1.0
1025
  elif not (0.0 < temperature <= 100.0):
1026
  raise ValueError("temperature must be a positive number less than or equal to 100.0")
 
1027
  if top_k is None:
1028
  top_k = 0
1029
  elif not (0 <= top_k <= 1000):
1030
  raise ValueError("top_k must be a positive number less than or equal to 1000")
 
1031
  if top_p is None:
1032
  top_p = 0.0
1033
  elif top_p > 0.0 and top_k > 0.0:
 
1035
  else:
1036
  if not (0.0 <= top_p <= 1.0):
1037
  raise ValueError("top_p must be less than or equal to 1.0")
 
1038
  top_p_decay = kwargs.get("top_p_decay", 0.0)
1039
  if not (0.0 <= top_p_decay <= 1.0):
1040
  raise ValueError("top_p_decay must be less than or equal to 1.0")
 
1041
  top_p_bound = kwargs.get("top_p_bound", 0.0)
1042
  if not (0.0 <= top_p_bound <= 1.0):
1043
  raise ValueError("top_p_bound must be less than or equal to 1.0")
 
1044
  add_BOS = kwargs.get("add_BOS", False)
1045
  if not (isinstance(add_BOS, bool)):
1046
  raise ValueError("add_BOS must be a boolean")
 
1047
  beam_width = num_beams
1048
  if beam_width is not None:
1049
  if not isinstance(beam_width, int):
 
1052
  raise ValueError("beam_width must be greater than 0")
1053
  if inputs.shape[0] > 1:
1054
  return "When doing beam_search, batch size must be 1"
 
1055
  tokenizer = get_tokenizer()
 
1056
  stop_token = kwargs.get("stop_token", tokenizer.eod)
1057
  if stop_token is not None:
1058
  if not isinstance(stop_token, int):
1059
  raise ValueError("stop_token must be an integer")
 
1060
  if length_penalty is None:
1061
  length_penalty = 1.0
 
1062
  sizes_list = None
1063
  prompts_tokens_tensor = None
1064
  prompts_length_tensor = None
 
1068
  prompts_length_tensor = torch.cuda.LongTensor([inputs.shape[1]] * inputs.shape[0])
1069
  else:
1070
  prompts_length_tensor = attention_mask.sum(axis=-1).cuda()
 
1071
  if max_new_tokens is None:
1072
  max_new_tokens = max_length - inputs.shape[1]
1073
  if max_new_tokens <= 0:
1074
  raise ValueError("max_new_tokens must be greater than 0")
 
1075
  if add_BOS:
1076
  max_length = max_new_tokens + inputs.shape[1] + 1
1077
  # making sure that `max_length` is a multiple of 4 to leverage fused kernels
 
1088
  max_new_tokens = max_length - inputs.shape[1]
1089
  padding = torch.cuda.LongTensor([[tokenizer.eod] * max_new_tokens] * inputs.shape[0])
1090
  prompts_tokens_tensor = torch.concat([inputs.cuda(), padding], axis=-1)
 
1091
  # We need the sizes of these tensors for the boradcast
1092
  sizes_list = [
1093
  prompts_tokens_tensor.size(0), # Batch size
1094
  prompts_tokens_tensor.size(1),
1095
  ] # Sequence lenght
 
1096
  # First, broadcast the sizes.
1097
  sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=0)
 
1098
  # Now that we have the sizes, we can boradcast the tokens
1099
  # and length tensors.
1100
  sizes = sizes_tensor.tolist()
1101
  context_tokens_tensor = broadcast_tensor(sizes, torch.int64, tensor=prompts_tokens_tensor, rank=0)
1102
  context_length_tensor = broadcast_tensor(sizes[0], torch.int64, tensor=prompts_length_tensor, rank=0)
 
1103
  # Run the inference
1104
  random_seed = kwargs.get("random_seed", 0)
1105
  torch.random.manual_seed(random_seed)
 
1128
  use_eod_token_for_early_termination=True,
1129
  )
1130
  return tokens
 
 
1131
  # other utilities
1132
  def avg_losses_across_data_parallel_group(losses):
1133
  """
1134
  Average losses across data parallel group.
 
1135
  Args:
1136
  losses (List[Tensor]): List of losses to average across data parallel group.
1137
  """
1138
  return average_losses_across_data_parallel_group(losses)
 
 
1139
  def gather_across_data_parallel_groups(tensor):
1140
  """
1141
  Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks.
 
1142
  Args:
1143
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
1144
  The data to gather across data parallel ranks.
 
1145
  """
1146
  def _gpu_gather_one(tensor):
1147
  if tensor.ndim == 0:
 
1152
  ]
1153
  torch.distributed.all_gather(output_tensors, tensor, group=mpu.get_data_parallel_group())
1154
  return torch.cat(output_tensors, dim=0)
 
1155
  return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
src/utils/memory.py CHANGED
@@ -6,19 +6,15 @@ def release_memory(*objects):
6
  """
7
  Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`.
8
  Returned objects should be reassigned to the same variables.
9
-
10
  Args:
11
  objects (`Iterable`):
12
  An iterable of objects
13
  Returns:
14
  A list of `None` objects to replace `objects`
15
-
16
  Example:
17
-
18
  ```python
19
  >>> import torch
20
  >>> from accelerate.utils import release_memory
21
-
22
  >>> a = torch.ones(1000, 1000).cuda()
23
  >>> b = torch.ones(1000, 1000).cuda()
24
  >>> a, b = release_memory(a, b)
@@ -36,12 +32,9 @@ def release_memory(*objects):
36
  else:
37
  torch.cuda.empty_cache()
38
  return objects
39
-
40
-
41
  def should_reduce_batch_size(exception: Exception) -> bool:
42
  """
43
  Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory
44
-
45
  Args:
46
  exception (`Exception`):
47
  An exception
@@ -54,40 +47,28 @@ def should_reduce_batch_size(exception: Exception) -> bool:
54
  if isinstance(exception, RuntimeError) and len(exception.args) == 1:
55
  return any(err in exception.args[0] for err in _statements)
56
  return False
57
-
58
-
59
  def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128):
60
  """
61
  A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or
62
  CUDNN, the batch size is cut in half and passed to `function`
63
-
64
  `function` must take in a `batch_size` parameter as its first argument.
65
-
66
  Args:
67
  function (`callable`, *optional*):
68
  A function to wrap
69
  starting_batch_size (`int`, *optional*):
70
  The batch size to try and fit into memory
71
-
72
  Example:
73
-
74
  ```python
75
  >>> from accelerate.utils import find_executable_batch_size
76
-
77
-
78
  >>> @find_executable_batch_size(starting_batch_size=128)
79
  ... def train(batch_size, model, optimizer):
80
  ... ...
81
-
82
-
83
  >>> train(model, optimizer)
84
  ```
85
  """
86
  if function is None:
87
  return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size)
88
-
89
  batch_size = starting_batch_size
90
-
91
  def decorator(*args, **kwargs):
92
  nonlocal batch_size
93
  gc.collect()
@@ -122,5 +103,4 @@ def find_executable_batch_size(function: callable = None, starting_batch_size: i
122
  batch_size //= 2
123
  else:
124
  raise
125
-
126
  return decorator
 
6
  """
7
  Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`.
8
  Returned objects should be reassigned to the same variables.
 
9
  Args:
10
  objects (`Iterable`):
11
  An iterable of objects
12
  Returns:
13
  A list of `None` objects to replace `objects`
 
14
  Example:
 
15
  ```python
16
  >>> import torch
17
  >>> from accelerate.utils import release_memory
 
18
  >>> a = torch.ones(1000, 1000).cuda()
19
  >>> b = torch.ones(1000, 1000).cuda()
20
  >>> a, b = release_memory(a, b)
 
32
  else:
33
  torch.cuda.empty_cache()
34
  return objects
 
 
35
  def should_reduce_batch_size(exception: Exception) -> bool:
36
  """
37
  Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory
 
38
  Args:
39
  exception (`Exception`):
40
  An exception
 
47
  if isinstance(exception, RuntimeError) and len(exception.args) == 1:
48
  return any(err in exception.args[0] for err in _statements)
49
  return False
 
 
50
  def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128):
51
  """
52
  A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or
53
  CUDNN, the batch size is cut in half and passed to `function`
 
54
  `function` must take in a `batch_size` parameter as its first argument.
 
55
  Args:
56
  function (`callable`, *optional*):
57
  A function to wrap
58
  starting_batch_size (`int`, *optional*):
59
  The batch size to try and fit into memory
 
60
  Example:
 
61
  ```python
62
  >>> from accelerate.utils import find_executable_batch_size
 
 
63
  >>> @find_executable_batch_size(starting_batch_size=128)
64
  ... def train(batch_size, model, optimizer):
65
  ... ...
 
 
66
  >>> train(model, optimizer)
67
  ```
68
  """
69
  if function is None:
70
  return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size)
 
71
  batch_size = starting_batch_size
 
72
  def decorator(*args, **kwargs):
73
  nonlocal batch_size
74
  gc.collect()
 
103
  batch_size //= 2
104
  else:
105
  raise
 
106
  return decorator
src/utils/modeling.py CHANGED
@@ -1,13 +1,9 @@
1
  WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
2
-
3
  logger = logging.getLogger(__name__)
4
-
5
-
6
  def check_device_same(first_device, second_device):
7
  """
8
  Utility method to check if two `torch` devices are similar. When dealing with CUDA devices, torch throws `False`
9
  for `torch.device("cuda") == torch.device("cuda:0")` whereas they should be the same
10
-
11
  Args:
12
  first_device (`torch.device`):
13
  First device to check
@@ -16,29 +12,21 @@ def check_device_same(first_device, second_device):
16
  """
17
  if first_device.type != second_device.type:
18
  return False
19
-
20
  if first_device.type == "cuda" and first_device.index is None:
21
  # In case the first_device is a cuda device and have
22
  # the index attribute set to `None`, default it to `0`
23
  first_device = torch.device("cuda", index=0)
24
-
25
  if second_device.type == "cuda" and second_device.index is None:
26
  # In case the second_device is a cuda device and have
27
  # the index attribute set to `None`, default it to `0`
28
  second_device = torch.device("cuda", index=0)
29
-
30
  return first_device == second_device
31
-
32
-
33
  def convert_file_size_to_int(size: Union[int, str]):
34
  """
35
  Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes).
36
-
37
  Args:
38
  size (`int` or `str`): The size to convert. Will be directly returned if an `int`.
39
-
40
  Example:
41
-
42
  ```py
43
  >>> convert_file_size_to_int("1MiB")
44
  1048576
@@ -68,18 +56,13 @@ def convert_file_size_to_int(size: Union[int, str]):
68
  mem_size = int_size // 8 if size.endswith("b") else int_size
69
  except ValueError:
70
  raise ValueError(err_msg)
71
-
72
  if mem_size <= 0:
73
  raise ValueError(err_msg)
74
  return mem_size
75
-
76
-
77
  def dtype_byte_size(dtype: torch.dtype):
78
  """
79
  Returns the size (in bytes) occupied by one parameter of type `dtype`.
80
-
81
  Example:
82
-
83
  ```py
84
  >>> dtype_byte_size(torch.float32)
85
  4
@@ -96,8 +79,6 @@ def dtype_byte_size(dtype: torch.dtype):
96
  raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
97
  bit_size = int(bit_search.groups()[0])
98
  return bit_size // 8
99
-
100
-
101
  def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]:
102
  """
103
  Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For
@@ -130,29 +111,21 @@ def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]:
130
  storage_ptr = 0
131
  # On torch >=2.0 this is the tensor size
132
  storage_size = tensor.nelement() * _SIZE[tensor.dtype]
133
-
134
  return tensor.device, storage_ptr, storage_size
135
-
136
-
137
  def shard_checkpoint(
138
  state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME
139
  ):
140
  """
141
  Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
142
  given size.
143
-
144
  The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no
145
  optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the
146
  limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],
147
  [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].
148
-
149
  <Tip warning={true}>
150
-
151
  If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will
152
  have a size greater than `max_shard_size`.
153
-
154
  </Tip>
155
-
156
  Args:
157
  state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save.
158
  max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
@@ -162,12 +135,10 @@ def shard_checkpoint(
162
  The name of the model save file.
163
  """
164
  max_shard_size = convert_file_size_to_int(max_shard_size)
165
-
166
  sharded_state_dicts = [{}]
167
  last_block_size = 0
168
  total_size = 0
169
  storage_id_to_block = {}
170
-
171
  for key, weight in state_dict.items():
172
  # when bnb serialization is used the weights in the state dict can be strings
173
  # check: https://github.com/huggingface/transformers/pull/24416 for more details
@@ -175,29 +146,23 @@ def shard_checkpoint(
175
  continue
176
  else:
177
  storage_id = id_tensor_storage(weight)
178
-
179
  # If a `weight` shares the same underlying storage as another tensor, we put `weight` in the same `block`
180
  if storage_id in storage_id_to_block:
181
  block_id = storage_id_to_block[storage_id]
182
  sharded_state_dicts[block_id][key] = weight
183
  continue
184
-
185
  weight_size = weight.numel() * dtype_byte_size(weight.dtype)
186
-
187
  # If this weight is going to tip up over the maximal size, we split.
188
  if last_block_size + weight_size > max_shard_size:
189
  sharded_state_dicts.append({})
190
  last_block_size = 0
191
-
192
  sharded_state_dicts[-1][key] = weight
193
  last_block_size += weight_size
194
  total_size += weight_size
195
  storage_id_to_block[storage_id] = len(sharded_state_dicts) - 1
196
-
197
  # If we only have one shard, we return it
198
  if len(sharded_state_dicts) == 1:
199
  return {weights_name: sharded_state_dicts[0]}, None
200
-
201
  # Otherwise, let's build the index
202
  weight_map = {}
203
  shards = {}
@@ -209,13 +174,10 @@ def shard_checkpoint(
209
  shards[shard_file] = shard
210
  for key in shard.keys():
211
  weight_map[key] = shard_file
212
-
213
  # Add the metadata
214
  metadata = {"total_size": total_size}
215
  index = {"metadata": metadata, "weight_map": weight_map}
216
  return shards, index
217
-
218
-
219
  def set_module_tensor_to_device(
220
  module: nn.Module,
221
  tensor_name: str,
@@ -227,7 +189,6 @@ def set_module_tensor_to_device(
227
  """
228
  A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing
229
  `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).
230
-
231
  Args:
232
  module (`torch.nn.Module`):
233
  The module in which the tensor we want to move lives.
@@ -252,30 +213,24 @@ def set_module_tensor_to_device(
252
  raise ValueError(f"{module} has no attribute {split}.")
253
  module = new_module
254
  tensor_name = splits[-1]
255
-
256
  if tensor_name not in module._parameters and tensor_name not in module._buffers:
257
  raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
258
  is_buffer = tensor_name in module._buffers
259
  old_value = getattr(module, tensor_name)
260
-
261
  if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None:
262
  raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.")
263
-
264
  if value is not None:
265
  if old_value.shape != value.shape:
266
  raise ValueError(
267
  f'Trying to set a tensor of shape {value.shape} in "{tensor_name}" (which has shape {old_value.shape}), this look incorrect.'
268
  )
269
-
270
  if dtype is None:
271
  # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model
272
  value = value.to(old_value.dtype)
273
  elif not str(value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")):
274
  value = value.to(dtype)
275
-
276
  param = module._parameters[tensor_name] if tensor_name in module._parameters else None
277
  param_cls = type(param)
278
-
279
  device_quantization = None
280
  with torch.no_grad():
281
  # leave it on cpu first before moving them to cuda
@@ -296,7 +251,6 @@ def set_module_tensor_to_device(
296
  if dtype is not None and device in ["meta", torch.device("meta")]:
297
  if not str(old_value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")):
298
  new_value = new_value.to(dtype)
299
-
300
  if not is_buffer:
301
  module._parameters[tensor_name] = param_cls(new_value, requires_grad=old_value.requires_grad)
302
  elif isinstance(value, torch.Tensor):
@@ -352,15 +306,12 @@ def set_module_tensor_to_device(
352
  torch.npu.empty_cache()
353
  else:
354
  torch.cuda.empty_cache()
355
-
356
-
357
  def named_module_tensors(
358
  module: nn.Module, include_buffers: bool = True, recurse: bool = False, remove_non_persistent: bool = False
359
  ):
360
  """
361
  A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`
362
  it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.
363
-
364
  Args:
365
  module (`torch.nn.Module`):
366
  The module we want the tensors on.
@@ -374,7 +325,6 @@ def named_module_tensors(
374
  """
375
  for named_parameter in module.named_parameters(recurse=recurse):
376
  yield named_parameter
377
-
378
  if include_buffers:
379
  non_persistent_buffers = set()
380
  if remove_non_persistent:
@@ -383,12 +333,9 @@ def named_module_tensors(
383
  name, _ = named_buffer
384
  if name not in non_persistent_buffers:
385
  yield named_buffer
386
-
387
-
388
  def get_non_persistent_buffers(module: nn.Module, recurse: bool = False):
389
  """
390
  Gather all non persistent buffers of a given modules into a set
391
-
392
  Args:
393
  module (`nn.Module`):
394
  The module we want the non persistent buffers on.
@@ -399,10 +346,7 @@ def get_non_persistent_buffers(module: nn.Module, recurse: bool = False):
399
  if recurse:
400
  for _, m in module.named_modules():
401
  non_persistent_buffers_set |= m._non_persistent_buffers_set
402
-
403
  return non_persistent_buffers_set
404
-
405
-
406
  class FindTiedParametersResult(list):
407
  """
408
  This is a subclass of a list to handle backward compatibility for Transformers. Do not rely on the fact this is not
@@ -410,19 +354,14 @@ class FindTiedParametersResult(list):
410
  """
411
  def __init__(self, *args, **kwargs):
412
  super().__init__(*args, **kwargs)
413
-
414
  def values(self):
415
  # TODO: at the next Transformers release (4.28.0) issue a deprecation warning here.
416
  return sum([x[1:] for x in self], [])
417
-
418
-
419
  def check_tied_parameters_in_config(model: nn.Module):
420
  """
421
  Check if there is any indication in the given model that some weights should be tied.
422
-
423
  Args:
424
  model (`torch.nn.Module`): The model to inspect
425
-
426
  Returns:
427
  bool: True if the model needs to have tied weights
428
  """
@@ -430,7 +369,6 @@ def check_tied_parameters_in_config(model: nn.Module):
430
  has_tied_word_embedding = False
431
  has_tied_encoder_decoder = False
432
  has_tied_module = False
433
-
434
  if "PreTrainedModel" in [c.__name__ for c in inspect.getmro(model.__class__)]:
435
  has_tied_word_embedding = (
436
  hasattr(model, "config")
@@ -443,10 +381,7 @@ def check_tied_parameters_in_config(model: nn.Module):
443
  and getattr(model.config, "tie_encoder_decoder", False)
444
  )
445
  has_tied_module = any(hasattr(module, "_tie_weights") for module in model.modules())
446
-
447
  return any([has_tied_word_embedding, has_tied_encoder_decoder, has_tied_module])
448
-
449
-
450
  def _get_param_device(param, device_map):
451
  if param in device_map:
452
  return device_map[param]
@@ -455,19 +390,14 @@ def _get_param_device(param, device_map):
455
  raise ValueError(f"The `device_map` does not contain the module {param}.")
456
  else:
457
  return _get_param_device(parent_param, device_map)
458
-
459
-
460
  def check_tied_parameters_on_same_device(tied_params, device_map):
461
  """
462
  Check if tied parameters are on the same device
463
-
464
  Args:
465
  tied_params (`List[List[str]]`):
466
  A list of lists of parameter names being all tied together.
467
-
468
  device_map (`Dict[str, Union[int, str, torch.device]]`):
469
  A map that specifies where each submodule should go.
470
-
471
  """
472
  for tie_param in tied_params:
473
  tie_param_devices = {}
@@ -478,31 +408,21 @@ def check_tied_parameters_on_same_device(tied_params, device_map):
478
  f"Tied parameters are on different devices: {tie_param_devices}. "
479
  "Please modify your custom device map or set `device_map='auto'`. "
480
  )
481
-
482
-
483
  def find_tied_parameters(model: nn.Module, **kwargs):
484
  """
485
  Find the tied parameters in a given model.
486
-
487
  <Tip warning={true}>
488
-
489
  The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore
490
  them.
491
-
492
  </Tip>
493
-
494
  Args:
495
  model (`torch.nn.Module`): The model to inspect.
496
-
497
  Returns:
498
  List[List[str]]: A list of lists of parameter names being all tied together.
499
-
500
  Example:
501
-
502
  ```py
503
  >>> from collections import OrderedDict
504
  >>> import torch.nn as nn
505
-
506
  >>> model = nn.Sequential(OrderedDict([("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(4, 4))]))
507
  >>> model.linear2.weight = model.linear1.weight
508
  >>> find_tied_parameters(model)
@@ -513,7 +433,6 @@ def find_tied_parameters(model: nn.Module, **kwargs):
513
  named_parameters = kwargs.get("named_parameters", None)
514
  prefix = kwargs.get("prefix", "")
515
  result = kwargs.get("result", {})
516
-
517
  if named_parameters is None:
518
  named_parameters = {n: p for n, p in model.named_parameters()}
519
  else:
@@ -529,19 +448,14 @@ def find_tied_parameters(model: nn.Module, **kwargs):
529
  if new_name not in result:
530
  result[new_name] = []
531
  result[new_name].append(full_name)
532
-
533
  # Once we have treated direct parameters, we move to the child modules.
534
  for name, child in model.named_children():
535
  child_name = name if prefix == "" else f"{prefix}.{name}"
536
  find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)
537
-
538
  return FindTiedParametersResult([sorted([weight] + list(set(tied))) for weight, tied in result.items()])
539
-
540
-
541
  def retie_parameters(model, tied_params):
542
  """
543
  Reties tied parameters in a given model if the link was broken (for instance when adding hooks).
544
-
545
  Args:
546
  model (`torch.nn.Module`):
547
  The model in which to retie parameters.
@@ -567,8 +481,6 @@ def retie_parameters(model, tied_params):
567
  for split in splits[:-1]:
568
  module = getattr(module, split)
569
  setattr(module, splits[-1], param_to_tie)
570
-
571
-
572
  def _get_proper_dtype(dtype: Union[str, torch.device]) -> torch.dtype:
573
  """
574
  Just does torch.dtype(dtype) if necessary.
@@ -578,8 +490,6 @@ def _get_proper_dtype(dtype: Union[str, torch.device]) -> torch.dtype:
578
  dtype = dtype.replace("torch.", "")
579
  dtype = getattr(torch, dtype)
580
  return dtype
581
-
582
-
583
  def compute_module_sizes(
584
  model: nn.Module,
585
  dtype: Optional[Union[str, torch.device]] = None,
@@ -605,10 +515,7 @@ def compute_module_sizes(
605
  name_parts = name.split(".")
606
  for idx in range(len(name_parts) + 1):
607
  module_sizes[".".join(name_parts[:idx])] += size
608
-
609
  return module_sizes
610
-
611
-
612
  def get_max_layer_size(
613
  modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]
614
  ):
@@ -617,7 +524,6 @@ def get_max_layer_size(
617
  definition of a layer being:
618
  - a module with no direct children (just parameters and buffers)
619
  - a module whose class name is in the list `no_split_module_classes`
620
-
621
  Args:
622
  modules (`List[Tuple[str, torch.nn.Module]]`):
623
  The list of named modules where we want to determine the maximum layer size.
@@ -625,7 +531,6 @@ def get_max_layer_size(
625
  A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).
626
  no_split_module_classes (`List[str]`):
627
  A list of class names for layers we don't want to be split.
628
-
629
  Returns:
630
  `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.
631
  """
@@ -646,18 +551,14 @@ def get_max_layer_size(
646
  else:
647
  modules_to_treat = [(f"{module_name}.{n}", v) for n, v in modules_children] + modules_to_treat
648
  return max_size, layer_names
649
-
650
-
651
  def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):
652
  """
653
  Get the maximum memory available if nothing is passed, converts string to int otherwise.
654
  """
655
  import psutil
656
-
657
  if max_memory is None:
658
  if not (torch.cuda.is_available() or is_npu_available() or is_xpu_available()):
659
  max_memory = {}
660
-
661
  else:
662
  # Make sure CUDA is initialized on each GPU to have the right memory info.
663
  if is_npu_available():
@@ -678,11 +579,9 @@ def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]]
678
  else:
679
  max_memory["cpu"] = psutil.virtual_memory().available
680
  return max_memory
681
-
682
  for key in max_memory:
683
  if isinstance(max_memory[key], str):
684
  max_memory[key] = convert_file_size_to_int(max_memory[key])
685
-
686
  # Need to sort the device by type to make sure that we allocate the gpu first.
687
  # As gpu/npu/xpu are represented by int, we need to sort them first.
688
  gpu_devices = [k for k in max_memory.keys() if isinstance(k, int)]
@@ -706,10 +605,7 @@ def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]]
706
  f"Device {k} is not recognized, available devices are integers(for GPU/XPU), 'mps', 'cpu' and 'disk'"
707
  )
708
  max_memory = {k: max_memory[k] for k in all_devices}
709
-
710
  return max_memory
711
-
712
-
713
  def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = ""):
714
  """
715
  Cleans a device_map by grouping all submodules that go on the same device together.
@@ -721,21 +617,16 @@ def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], modul
721
  for k in [k for k in device_map if k.startswith(prefix)]:
722
  del device_map[k]
723
  device_map[module_name] = values[0]
724
-
725
  # Recurse over the children
726
  children_modules = [k for k in device_map.keys() if k.startswith(prefix) and len(k) > len(module_name)]
727
  idx = len(module_name.split(".")) + 1 if len(module_name) > 0 else 1
728
  children_modules = set(".".join(k.split(".")[:idx]) for k in children_modules)
729
  for child in children_modules:
730
  clean_device_map(device_map, module_name=child)
731
-
732
  return device_map
733
-
734
-
735
  def load_offloaded_weights(model, index, offload_folder):
736
  """
737
  Loads the weights from the offload folder into the model.
738
-
739
  Args:
740
  model (`torch.nn.Module`):
741
  The model to load the weights into.
@@ -760,8 +651,6 @@ def load_offloaded_weights(model, index, offload_folder):
760
  tensor_file = os.path.join(offload_folder, f"{param_name}.dat")
761
  weight = load_offloaded_weight(tensor_file, metadata)
762
  set_module_tensor_to_device(model, param_name, "cpu", value=weight, fp16_statistics=fp16_statistics)
763
-
764
-
765
  def get_balanced_memory(
766
  model: nn.Module,
767
  max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
@@ -772,14 +661,10 @@ def get_balanced_memory(
772
  ):
773
  """
774
  Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU.
775
-
776
  <Tip>
777
-
778
  All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the
779
  meta device (as it would if initialized within the `init_empty_weights` context manager).
780
-
781
  </Tip>
782
-
783
  Args:
784
  model (`torch.nn.Module`):
785
  The model to analyze.
@@ -800,7 +685,6 @@ def get_balanced_memory(
800
  # Get default / clean up max_memory
801
  user_not_set_max_memory = max_memory is None
802
  max_memory = get_max_memory(max_memory)
803
-
804
  if is_npu_available():
805
  num_devices = len([d for d in max_memory if torch.device(d).type == "npu" and max_memory[d] > 0])
806
  elif is_xpu_available():
@@ -817,10 +701,8 @@ def get_balanced_memory(
817
  )
818
  else:
819
  num_devices = len([d for d in max_memory if torch.device(d).type == "cuda" and max_memory[d] > 0])
820
-
821
  if num_devices == 0:
822
  return max_memory
823
-
824
  if num_devices == 1:
825
  # We cannot do low_zero on just one GPU, but we will still reserve some memory for the buffer
826
  low_zero = False
@@ -834,10 +716,8 @@ def get_balanced_memory(
834
  "You can set `max_memory` in to a higher value to use more memory (at your own risk)."
835
  )
836
  break # only one device
837
-
838
  module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes)
839
  per_gpu = module_sizes[""] // (num_devices - 1 if low_zero else num_devices)
840
-
841
  # We can't just set the memory to model_size // num_devices as it will end being too small: each GPU will get
842
  # slightly less layers and some layers will end up offload at the end. So this function computes a buffer size to
843
  # add which is the biggest of:
@@ -847,7 +727,6 @@ def get_balanced_memory(
847
  no_split_module_classes = []
848
  elif not isinstance(no_split_module_classes, (list, tuple)):
849
  no_split_module_classes = [no_split_module_classes]
850
-
851
  # Identify the size of the no_split_block modules
852
  if len(no_split_module_classes) > 0:
853
  no_split_children = {}
@@ -860,13 +739,11 @@ def get_balanced_memory(
860
  class_name = submodule.__class__.__name__
861
  if class_name in no_split_module_classes and class_name not in no_split_children:
862
  no_split_children[class_name] = size
863
-
864
  if set(no_split_children.keys()) == set(no_split_module_classes):
865
  break
866
  buffer = max(no_split_children.values()) if len(no_split_children) > 0 else 0
867
  else:
868
  buffer = 0
869
-
870
  # Compute mean of final modules. In the first dict of module sizes, leaves are the parameters
871
  leaves = [n for n in module_sizes if len([p for p in module_sizes if n == "" or p.startswith(n + ".")]) == 0]
872
  module_sizes = {n: v for n, v in module_sizes.items() if n not in leaves}
@@ -875,7 +752,6 @@ def get_balanced_memory(
875
  mean_leaves = int(sum([module_sizes[n] for n in leaves]) / max(len(leaves), 1))
876
  buffer = int(1.25 * max(buffer, mean_leaves))
877
  per_gpu += buffer
878
-
879
  # Sorted list of GPUs id (we may have some gpu ids not included in the our max_memory list - let's ignore them)
880
  gpus_idx_list = list(
881
  sorted(
@@ -885,14 +761,10 @@ def get_balanced_memory(
885
  # The last device is left with max_memory just in case the buffer is not enough.
886
  for idx in gpus_idx_list[:-1]:
887
  max_memory[idx] = min(max_memory[0] if low_zero and idx == 0 else per_gpu, max_memory[idx])
888
-
889
  if low_zero:
890
  min_zero = max(0, module_sizes[""] - sum([max_memory[i] for i in range(1, num_devices)]))
891
  max_memory[0] = min(min_zero, max_memory[0])
892
-
893
  return max_memory
894
-
895
-
896
  def calculate_maximum_sizes(model: torch.nn.Module):
897
  "Computes the total size of the model and its largest layer"
898
  sizes = compute_module_sizes(model)
@@ -900,7 +772,6 @@ def calculate_maximum_sizes(model: torch.nn.Module):
900
  no_split_modules = getattr(model, "_no_split_modules", None)
901
  if no_split_modules is None:
902
  no_split_modules = []
903
-
904
  modules_to_treat = (
905
  list(model.named_parameters(recurse=False))
906
  + list(model.named_children())
@@ -909,8 +780,6 @@ def calculate_maximum_sizes(model: torch.nn.Module):
909
  largest_layer = get_max_layer_size(modules_to_treat, sizes, no_split_modules)
910
  total_size = sizes[""]
911
  return total_size, largest_layer
912
-
913
-
914
  def infer_auto_device_map(
915
  model: nn.Module,
916
  max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
@@ -929,14 +798,10 @@ def infer_auto_device_map(
929
  - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.
930
  - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk
931
  that has the largest size.
932
-
933
  <Tip>
934
-
935
  All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the
936
  meta device (as it would if initialized within the `init_empty_weights` context manager).
937
-
938
  </Tip>
939
-
940
  Args:
941
  model (`torch.nn.Module`):
942
  The model to analyze.
@@ -961,12 +826,10 @@ def infer_auto_device_map(
961
  no_split_module_classes = []
962
  elif not isinstance(no_split_module_classes, (list, tuple)):
963
  no_split_module_classes = [no_split_module_classes]
964
-
965
  devices = list(max_memory.keys())
966
  if "disk" not in devices:
967
  devices.append("disk")
968
  gpus = [device for device in devices if device not in ["cpu", "disk"]]
969
-
970
  # Devices that need to keep space for a potential offloaded layer.
971
  if "mps" in gpus:
972
  main_devices = ["mps"]
@@ -974,19 +837,15 @@ def infer_auto_device_map(
974
  main_devices = [gpus[0], "cpu"]
975
  else:
976
  main_devices = ["cpu"]
977
-
978
  module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes)
979
  tied_parameters = find_tied_parameters(model)
980
-
981
  if check_tied_parameters_in_config(model) and len(tied_parameters) == 0:
982
  logger.warn(
983
  "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function."
984
  )
985
-
986
  device_map = OrderedDict()
987
  current_device = 0
988
  current_memory_used = 0
989
-
990
  # Direct submodules and parameters
991
  modules_to_treat = (
992
  list(model.named_parameters(recurse=False))
@@ -995,7 +854,6 @@ def infer_auto_device_map(
995
  )
996
  # Initialize maximum largest layer, to know which space to keep in memory
997
  max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)
998
-
999
  # Ready ? This is going to be a bit messy.
1000
  while len(modules_to_treat) > 0:
1001
  name, module = modules_to_treat.pop(0)
@@ -1011,7 +869,6 @@ def infer_auto_device_map(
1011
  )
1012
  # Assess size needed
1013
  module_size = module_sizes[name]
1014
-
1015
  # We keep relevant tied parameters only: one of the tied parameters in the group is inside the current module
1016
  # and the other is not.
1017
  tied_param_goups = [
@@ -1025,7 +882,6 @@ def infer_auto_device_map(
1025
  tied_params = sum([[p for p in tied_group if name not in p] for tied_group in tied_param_goups], [])
1026
  if verbose and len(tied_params) > 0:
1027
  print(f" So those parameters need to be taken into account {tied_params}")
1028
-
1029
  device = devices[current_device]
1030
  current_max_size = max_memory[device] if device != "disk" else None
1031
  # Reduce max size available by the largest layer.
@@ -1059,7 +915,6 @@ def infer_auto_device_map(
1059
  module_sizes,
1060
  no_split_module_classes,
1061
  )
1062
-
1063
  # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters.
1064
  elif len(tied_params) > 0:
1065
  # First locate all tied modules
@@ -1074,12 +929,10 @@ def infer_auto_device_map(
1074
  f" It looks like {name} is going to fit on {devices[current_device]} but we have tied "
1075
  f"parameters to account for.\n - Names {tied_params}\n - Module names {tied_module_names}"
1076
  )
1077
-
1078
  # Let's see if it all fits first
1079
  module_size_with_ties = module_size
1080
  for tied_param, tied_module_name in zip(tied_params, tied_module_names):
1081
  module_size_with_ties += module_sizes[tied_module_name] - module_sizes[tied_param]
1082
-
1083
  if current_max_size is None or current_memory_used + module_size_with_ties <= current_max_size:
1084
  # We really really fit!
1085
  if verbose:
@@ -1094,7 +947,6 @@ def infer_auto_device_map(
1094
  ]
1095
  modules_to_treat.pop(tied_module_index)
1096
  device_map[tied_module_name] = devices[current_device]
1097
-
1098
  else:
1099
  # We don't fit with the tied modules. Next question is: can we split one of the tied modules to make it
1100
  # smaller or do we need to go on the next device?
@@ -1109,13 +961,11 @@ def infer_auto_device_map(
1109
  if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes:
1110
  # can't break this one.
1111
  continue
1112
-
1113
  if verbose:
1114
  print(f"Splitting {tied_module_name}.")
1115
  tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children
1116
  tied_module_children = [(f"{tied_module_name}.{n}", v) for n, v in tied_module_children]
1117
  tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][0]
1118
-
1119
  modules_to_treat = (
1120
  [(name, module)]
1121
  + modules_to_treat[:tied_module_index]
@@ -1130,7 +980,6 @@ def infer_auto_device_map(
1130
  )
1131
  split_happened = True
1132
  break
1133
-
1134
  if not split_happened:
1135
  # If the tied module is not split, we go to the next device
1136
  if verbose:
@@ -1138,7 +987,6 @@ def infer_auto_device_map(
1138
  current_device += 1
1139
  modules_to_treat = [(name, module)] + modules_to_treat
1140
  current_memory_used = 0
1141
-
1142
  else:
1143
  if verbose:
1144
  if current_max_size is None:
@@ -1150,16 +998,12 @@ def infer_auto_device_map(
1150
  )
1151
  current_memory_used += module_size
1152
  device_map[name] = devices[current_device]
1153
-
1154
  if clean_result:
1155
  device_map = clean_device_map(device_map)
1156
  return device_map
1157
-
1158
-
1159
  def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]):
1160
  """
1161
  Checks a device map covers everything in a given model.
1162
-
1163
  Args:
1164
  model (`torch.nn.Module`): The model to check the device map against.
1165
  device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check.
@@ -1180,13 +1024,10 @@ def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, tor
1180
  raise ValueError(
1181
  f"The device_map provided does not give any device for the following parameters: {non_covered_params}"
1182
  )
1183
-
1184
-
1185
  def load_state_dict(checkpoint_file, device_map=None):
1186
  """
1187
  Load a checkpoint from a given file. If the checkpoint is in the safetensors format and a device map is passed, the
1188
  weights can be fast-loaded directly on the GPU.
1189
-
1190
  Args:
1191
  checkpoint_file (`str`): The path to the checkpoint to load.
1192
  device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
@@ -1197,14 +1038,12 @@ def load_state_dict(checkpoint_file, device_map=None):
1197
  with safe_open(checkpoint_file, framework="pt") as f:
1198
  metadata = f.metadata()
1199
  weight_names = f.keys()
1200
-
1201
  if metadata is None:
1202
  logger.warn(
1203
  f"The safetensors archive passed at {checkpoint_file} does not contain metadata. "
1204
  "Make sure to save your model with the `save_pretrained` method. Defaulting to 'pt' metadata."
1205
  )
1206
  metadata = {"format": "pt"}
1207
-
1208
  if metadata.get("format") not in ["pt", "tf", "flax"]:
1209
  raise OSError(
1210
  f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure "
@@ -1218,12 +1057,10 @@ def load_state_dict(checkpoint_file, device_map=None):
1218
  # if we only have one device we can load everything directly
1219
  if len(set(device_map.values())) == 1:
1220
  return safe_load_file(checkpoint_file, device=list(device_map.values())[0])
1221
-
1222
  devices = list(set(device_map.values()) - {"disk"})
1223
  # cpu device should always exist as fallback option
1224
  if "cpu" not in devices:
1225
  devices.append("cpu")
1226
-
1227
  # For each device, get the weights that go there
1228
  device_weights = {device: [] for device in devices}
1229
  for module_name, device in device_map.items():
@@ -1231,7 +1068,6 @@ def load_state_dict(checkpoint_file, device_map=None):
1231
  device_weights[device].extend(
1232
  [k for k in weight_names if k == module_name or k.startswith(module_name + ".")]
1233
  )
1234
-
1235
  # all weights that haven't defined a device should be loaded on CPU
1236
  device_weights["cpu"].extend([k for k in weight_names if k not in sum(device_weights.values(), [])])
1237
  tensors = {}
@@ -1256,22 +1092,17 @@ def load_state_dict(checkpoint_file, device_map=None):
1256
  progress_bar.update()
1257
  if progress_bar is not None:
1258
  progress_bar.close()
1259
-
1260
  return tensors
1261
  else:
1262
  return torch.load(checkpoint_file, map_location=torch.device("cpu"))
1263
-
1264
-
1265
  def get_state_dict_offloaded_model(model: nn.Module):
1266
  """
1267
  Returns the state dictionary for an offloaded model via iterative onloading
1268
-
1269
  Args:
1270
  model (`torch.nn.Module`):
1271
  The offloaded model we want to save
1272
  """
1273
  from ..hooks import AlignDevicesHook
1274
-
1275
  state_dict = {}
1276
  placeholders = set()
1277
  for name, module in model.named_modules():
@@ -1293,7 +1124,6 @@ def get_state_dict_offloaded_model(model: nn.Module):
1293
  module._hf_hook.execution_device = original_device
1294
  else:
1295
  module_state_dict = module.state_dict()
1296
-
1297
  for key in module_state_dict:
1298
  # ignore placeholder parameters that are still on the meta device
1299
  if module_state_dict[key].device == torch.device("meta"):
@@ -1306,10 +1136,7 @@ def get_state_dict_offloaded_model(model: nn.Module):
1306
  placeholders.remove(key)
1307
  if placeholders:
1308
  logger.warning(f"The following tensors were not saved because they were still on meta device: {placeholders}")
1309
-
1310
  return state_dict
1311
-
1312
-
1313
  def load_checkpoint_in_model(
1314
  model: nn.Module,
1315
  checkpoint: Union[str, os.PathLike],
@@ -1324,14 +1151,10 @@ def load_checkpoint_in_model(
1324
  """
1325
  Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
1326
  loaded.
1327
-
1328
  <Tip warning={true}>
1329
-
1330
  Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To
1331
  group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`].
1332
-
1333
  </Tip>
1334
-
1335
  Args:
1336
  model (`torch.nn.Module`):
1337
  The model in which we want to load a checkpoint.
@@ -1357,32 +1180,26 @@ def load_checkpoint_in_model(
1357
  A list of the modules that we keep in `torch.float32` dtype.
1358
  offload_8bit_bnb (`bool`, *optional*):
1359
  Whether or not to enable offload of 8-bit modules on cpu/disk.
1360
-
1361
  """
1362
  if offload_8bit_bnb:
1363
  from .bnb import quantize_and_offload_8bit
1364
-
1365
  tied_params = find_tied_parameters(model)
1366
-
1367
  if check_tied_parameters_in_config(model) and len(tied_params) == 0:
1368
  logger.warn(
1369
  "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function."
1370
  )
1371
  if device_map is not None:
1372
  check_tied_parameters_on_same_device(tied_params, device_map)
1373
-
1374
  if offload_folder is None and device_map is not None and "disk" in device_map.values():
1375
  raise ValueError(
1376
  "At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`."
1377
  )
1378
  elif offload_folder is not None and device_map is not None and "disk" in device_map.values():
1379
  os.makedirs(offload_folder, exist_ok=True)
1380
-
1381
  if isinstance(dtype, str):
1382
  # We accept "torch.float16" or just "float16"
1383
  dtype = dtype.replace("torch.", "")
1384
  dtype = getattr(torch, dtype)
1385
-
1386
  checkpoint_files = None
1387
  index_filename = None
1388
  if os.path.isfile(checkpoint):
@@ -1416,24 +1233,19 @@ def load_checkpoint_in_model(
1416
  "`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded "
1417
  f"checkpoint, or a folder containing a sharded checkpoint or the whole state dict, but got {checkpoint}."
1418
  )
1419
-
1420
  if index_filename is not None:
1421
  checkpoint_folder = os.path.split(index_filename)[0]
1422
  with open(index_filename, "r") as f:
1423
  index = json.loads(f.read())
1424
-
1425
  if "weight_map" in index:
1426
  index = index["weight_map"]
1427
  checkpoint_files = sorted(list(set(index.values())))
1428
  checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files]
1429
-
1430
  # Logic for missing/unexepected keys goes here.
1431
-
1432
  offload_index = {}
1433
  if offload_state_dict:
1434
  state_dict_folder = tempfile.mkdtemp()
1435
  state_dict_index = {}
1436
-
1437
  buffer_names = [name for name, _ in model.named_buffers()]
1438
  for checkpoint_file in checkpoint_files:
1439
  checkpoint = load_state_dict(checkpoint_file, device_map=device_map)
@@ -1444,9 +1256,7 @@ def load_checkpoint_in_model(
1444
  # skip SCB parameter (for 8-bit serialization)
1445
  if "SCB" in param_name:
1446
  continue
1447
-
1448
  module_name = param_name
1449
-
1450
  while len(module_name) > 0 and module_name not in device_map:
1451
  module_name = ".".join(module_name.split(".")[:-1])
1452
  if module_name == "" and "" not in device_map:
@@ -1463,13 +1273,11 @@ def load_checkpoint_in_model(
1463
  break
1464
  if proceed:
1465
  new_dtype = torch.float32
1466
-
1467
  if "weight" in param_name and param_name.replace("weight", "SCB") in checkpoint.keys():
1468
  if param.dtype == torch.int8:
1469
  fp16_statistics = checkpoint[param_name.replace("weight", "SCB")]
1470
  else:
1471
  fp16_statistics = None
1472
-
1473
  if param_device == "disk":
1474
  if offload_buffers or param_name not in buffer_names:
1475
  if new_dtype is None:
@@ -1501,25 +1309,18 @@ def load_checkpoint_in_model(
1501
  dtype=new_dtype,
1502
  fp16_statistics=fp16_statistics,
1503
  )
1504
-
1505
  # Force Python to clean up.
1506
  del checkpoint
1507
  gc.collect()
1508
-
1509
  save_offload_index(offload_index, offload_folder)
1510
-
1511
  # Load back offloaded state dict on CPU
1512
  if offload_state_dict:
1513
  load_offloaded_weights(model, state_dict_index, state_dict_folder)
1514
  shutil.rmtree(state_dict_folder)
1515
-
1516
  retie_parameters(model, tied_params)
1517
-
1518
-
1519
  def get_mixed_precision_context_manager(native_amp: bool = False, autocast_kwargs: AutocastKwargs = None):
1520
  """
1521
  Return a context manager for autocasting mixed precision
1522
-
1523
  Args:
1524
  native_amp (`bool`, *optional*, defaults to False):
1525
  Whether mixed precision is actually enabled.
 
1
  WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
 
2
  logger = logging.getLogger(__name__)
 
 
3
  def check_device_same(first_device, second_device):
4
  """
5
  Utility method to check if two `torch` devices are similar. When dealing with CUDA devices, torch throws `False`
6
  for `torch.device("cuda") == torch.device("cuda:0")` whereas they should be the same
 
7
  Args:
8
  first_device (`torch.device`):
9
  First device to check
 
12
  """
13
  if first_device.type != second_device.type:
14
  return False
 
15
  if first_device.type == "cuda" and first_device.index is None:
16
  # In case the first_device is a cuda device and have
17
  # the index attribute set to `None`, default it to `0`
18
  first_device = torch.device("cuda", index=0)
 
19
  if second_device.type == "cuda" and second_device.index is None:
20
  # In case the second_device is a cuda device and have
21
  # the index attribute set to `None`, default it to `0`
22
  second_device = torch.device("cuda", index=0)
 
23
  return first_device == second_device
 
 
24
  def convert_file_size_to_int(size: Union[int, str]):
25
  """
26
  Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes).
 
27
  Args:
28
  size (`int` or `str`): The size to convert. Will be directly returned if an `int`.
 
29
  Example:
 
30
  ```py
31
  >>> convert_file_size_to_int("1MiB")
32
  1048576
 
56
  mem_size = int_size // 8 if size.endswith("b") else int_size
57
  except ValueError:
58
  raise ValueError(err_msg)
 
59
  if mem_size <= 0:
60
  raise ValueError(err_msg)
61
  return mem_size
 
 
62
  def dtype_byte_size(dtype: torch.dtype):
63
  """
64
  Returns the size (in bytes) occupied by one parameter of type `dtype`.
 
65
  Example:
 
66
  ```py
67
  >>> dtype_byte_size(torch.float32)
68
  4
 
79
  raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
80
  bit_size = int(bit_search.groups()[0])
81
  return bit_size // 8
 
 
82
  def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]:
83
  """
84
  Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For
 
111
  storage_ptr = 0
112
  # On torch >=2.0 this is the tensor size
113
  storage_size = tensor.nelement() * _SIZE[tensor.dtype]
 
114
  return tensor.device, storage_ptr, storage_size
 
 
115
  def shard_checkpoint(
116
  state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME
117
  ):
118
  """
119
  Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
120
  given size.
 
121
  The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no
122
  optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the
123
  limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],
124
  [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].
 
125
  <Tip warning={true}>
 
126
  If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will
127
  have a size greater than `max_shard_size`.
 
128
  </Tip>
 
129
  Args:
130
  state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save.
131
  max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
 
135
  The name of the model save file.
136
  """
137
  max_shard_size = convert_file_size_to_int(max_shard_size)
 
138
  sharded_state_dicts = [{}]
139
  last_block_size = 0
140
  total_size = 0
141
  storage_id_to_block = {}
 
142
  for key, weight in state_dict.items():
143
  # when bnb serialization is used the weights in the state dict can be strings
144
  # check: https://github.com/huggingface/transformers/pull/24416 for more details
 
146
  continue
147
  else:
148
  storage_id = id_tensor_storage(weight)
 
149
  # If a `weight` shares the same underlying storage as another tensor, we put `weight` in the same `block`
150
  if storage_id in storage_id_to_block:
151
  block_id = storage_id_to_block[storage_id]
152
  sharded_state_dicts[block_id][key] = weight
153
  continue
 
154
  weight_size = weight.numel() * dtype_byte_size(weight.dtype)
 
155
  # If this weight is going to tip up over the maximal size, we split.
156
  if last_block_size + weight_size > max_shard_size:
157
  sharded_state_dicts.append({})
158
  last_block_size = 0
 
159
  sharded_state_dicts[-1][key] = weight
160
  last_block_size += weight_size
161
  total_size += weight_size
162
  storage_id_to_block[storage_id] = len(sharded_state_dicts) - 1
 
163
  # If we only have one shard, we return it
164
  if len(sharded_state_dicts) == 1:
165
  return {weights_name: sharded_state_dicts[0]}, None
 
166
  # Otherwise, let's build the index
167
  weight_map = {}
168
  shards = {}
 
174
  shards[shard_file] = shard
175
  for key in shard.keys():
176
  weight_map[key] = shard_file
 
177
  # Add the metadata
178
  metadata = {"total_size": total_size}
179
  index = {"metadata": metadata, "weight_map": weight_map}
180
  return shards, index
 
 
181
  def set_module_tensor_to_device(
182
  module: nn.Module,
183
  tensor_name: str,
 
189
  """
190
  A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing
191
  `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function).
 
192
  Args:
193
  module (`torch.nn.Module`):
194
  The module in which the tensor we want to move lives.
 
213
  raise ValueError(f"{module} has no attribute {split}.")
214
  module = new_module
215
  tensor_name = splits[-1]
 
216
  if tensor_name not in module._parameters and tensor_name not in module._buffers:
217
  raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
218
  is_buffer = tensor_name in module._buffers
219
  old_value = getattr(module, tensor_name)
 
220
  if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None:
221
  raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.")
 
222
  if value is not None:
223
  if old_value.shape != value.shape:
224
  raise ValueError(
225
  f'Trying to set a tensor of shape {value.shape} in "{tensor_name}" (which has shape {old_value.shape}), this look incorrect.'
226
  )
 
227
  if dtype is None:
228
  # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model
229
  value = value.to(old_value.dtype)
230
  elif not str(value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")):
231
  value = value.to(dtype)
 
232
  param = module._parameters[tensor_name] if tensor_name in module._parameters else None
233
  param_cls = type(param)
 
234
  device_quantization = None
235
  with torch.no_grad():
236
  # leave it on cpu first before moving them to cuda
 
251
  if dtype is not None and device in ["meta", torch.device("meta")]:
252
  if not str(old_value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")):
253
  new_value = new_value.to(dtype)
 
254
  if not is_buffer:
255
  module._parameters[tensor_name] = param_cls(new_value, requires_grad=old_value.requires_grad)
256
  elif isinstance(value, torch.Tensor):
 
306
  torch.npu.empty_cache()
307
  else:
308
  torch.cuda.empty_cache()
 
 
309
  def named_module_tensors(
310
  module: nn.Module, include_buffers: bool = True, recurse: bool = False, remove_non_persistent: bool = False
311
  ):
312
  """
313
  A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True`
314
  it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`.
 
315
  Args:
316
  module (`torch.nn.Module`):
317
  The module we want the tensors on.
 
325
  """
326
  for named_parameter in module.named_parameters(recurse=recurse):
327
  yield named_parameter
 
328
  if include_buffers:
329
  non_persistent_buffers = set()
330
  if remove_non_persistent:
 
333
  name, _ = named_buffer
334
  if name not in non_persistent_buffers:
335
  yield named_buffer
 
 
336
  def get_non_persistent_buffers(module: nn.Module, recurse: bool = False):
337
  """
338
  Gather all non persistent buffers of a given modules into a set
 
339
  Args:
340
  module (`nn.Module`):
341
  The module we want the non persistent buffers on.
 
346
  if recurse:
347
  for _, m in module.named_modules():
348
  non_persistent_buffers_set |= m._non_persistent_buffers_set
 
349
  return non_persistent_buffers_set
 
 
350
  class FindTiedParametersResult(list):
351
  """
352
  This is a subclass of a list to handle backward compatibility for Transformers. Do not rely on the fact this is not
 
354
  """
355
  def __init__(self, *args, **kwargs):
356
  super().__init__(*args, **kwargs)
 
357
  def values(self):
358
  # TODO: at the next Transformers release (4.28.0) issue a deprecation warning here.
359
  return sum([x[1:] for x in self], [])
 
 
360
  def check_tied_parameters_in_config(model: nn.Module):
361
  """
362
  Check if there is any indication in the given model that some weights should be tied.
 
363
  Args:
364
  model (`torch.nn.Module`): The model to inspect
 
365
  Returns:
366
  bool: True if the model needs to have tied weights
367
  """
 
369
  has_tied_word_embedding = False
370
  has_tied_encoder_decoder = False
371
  has_tied_module = False
 
372
  if "PreTrainedModel" in [c.__name__ for c in inspect.getmro(model.__class__)]:
373
  has_tied_word_embedding = (
374
  hasattr(model, "config")
 
381
  and getattr(model.config, "tie_encoder_decoder", False)
382
  )
383
  has_tied_module = any(hasattr(module, "_tie_weights") for module in model.modules())
 
384
  return any([has_tied_word_embedding, has_tied_encoder_decoder, has_tied_module])
 
 
385
  def _get_param_device(param, device_map):
386
  if param in device_map:
387
  return device_map[param]
 
390
  raise ValueError(f"The `device_map` does not contain the module {param}.")
391
  else:
392
  return _get_param_device(parent_param, device_map)
 
 
393
  def check_tied_parameters_on_same_device(tied_params, device_map):
394
  """
395
  Check if tied parameters are on the same device
 
396
  Args:
397
  tied_params (`List[List[str]]`):
398
  A list of lists of parameter names being all tied together.
 
399
  device_map (`Dict[str, Union[int, str, torch.device]]`):
400
  A map that specifies where each submodule should go.
 
401
  """
402
  for tie_param in tied_params:
403
  tie_param_devices = {}
 
408
  f"Tied parameters are on different devices: {tie_param_devices}. "
409
  "Please modify your custom device map or set `device_map='auto'`. "
410
  )
 
 
411
  def find_tied_parameters(model: nn.Module, **kwargs):
412
  """
413
  Find the tied parameters in a given model.
 
414
  <Tip warning={true}>
 
415
  The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore
416
  them.
 
417
  </Tip>
 
418
  Args:
419
  model (`torch.nn.Module`): The model to inspect.
 
420
  Returns:
421
  List[List[str]]: A list of lists of parameter names being all tied together.
 
422
  Example:
 
423
  ```py
424
  >>> from collections import OrderedDict
425
  >>> import torch.nn as nn
 
426
  >>> model = nn.Sequential(OrderedDict([("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(4, 4))]))
427
  >>> model.linear2.weight = model.linear1.weight
428
  >>> find_tied_parameters(model)
 
433
  named_parameters = kwargs.get("named_parameters", None)
434
  prefix = kwargs.get("prefix", "")
435
  result = kwargs.get("result", {})
 
436
  if named_parameters is None:
437
  named_parameters = {n: p for n, p in model.named_parameters()}
438
  else:
 
448
  if new_name not in result:
449
  result[new_name] = []
450
  result[new_name].append(full_name)
 
451
  # Once we have treated direct parameters, we move to the child modules.
452
  for name, child in model.named_children():
453
  child_name = name if prefix == "" else f"{prefix}.{name}"
454
  find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result)
 
455
  return FindTiedParametersResult([sorted([weight] + list(set(tied))) for weight, tied in result.items()])
 
 
456
  def retie_parameters(model, tied_params):
457
  """
458
  Reties tied parameters in a given model if the link was broken (for instance when adding hooks).
 
459
  Args:
460
  model (`torch.nn.Module`):
461
  The model in which to retie parameters.
 
481
  for split in splits[:-1]:
482
  module = getattr(module, split)
483
  setattr(module, splits[-1], param_to_tie)
 
 
484
  def _get_proper_dtype(dtype: Union[str, torch.device]) -> torch.dtype:
485
  """
486
  Just does torch.dtype(dtype) if necessary.
 
490
  dtype = dtype.replace("torch.", "")
491
  dtype = getattr(torch, dtype)
492
  return dtype
 
 
493
  def compute_module_sizes(
494
  model: nn.Module,
495
  dtype: Optional[Union[str, torch.device]] = None,
 
515
  name_parts = name.split(".")
516
  for idx in range(len(name_parts) + 1):
517
  module_sizes[".".join(name_parts[:idx])] += size
 
518
  return module_sizes
 
 
519
  def get_max_layer_size(
520
  modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str]
521
  ):
 
524
  definition of a layer being:
525
  - a module with no direct children (just parameters and buffers)
526
  - a module whose class name is in the list `no_split_module_classes`
 
527
  Args:
528
  modules (`List[Tuple[str, torch.nn.Module]]`):
529
  The list of named modules where we want to determine the maximum layer size.
 
531
  A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`).
532
  no_split_module_classes (`List[str]`):
533
  A list of class names for layers we don't want to be split.
 
534
  Returns:
535
  `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.
536
  """
 
551
  else:
552
  modules_to_treat = [(f"{module_name}.{n}", v) for n, v in modules_children] + modules_to_treat
553
  return max_size, layer_names
 
 
554
  def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None):
555
  """
556
  Get the maximum memory available if nothing is passed, converts string to int otherwise.
557
  """
558
  import psutil
 
559
  if max_memory is None:
560
  if not (torch.cuda.is_available() or is_npu_available() or is_xpu_available()):
561
  max_memory = {}
 
562
  else:
563
  # Make sure CUDA is initialized on each GPU to have the right memory info.
564
  if is_npu_available():
 
579
  else:
580
  max_memory["cpu"] = psutil.virtual_memory().available
581
  return max_memory
 
582
  for key in max_memory:
583
  if isinstance(max_memory[key], str):
584
  max_memory[key] = convert_file_size_to_int(max_memory[key])
 
585
  # Need to sort the device by type to make sure that we allocate the gpu first.
586
  # As gpu/npu/xpu are represented by int, we need to sort them first.
587
  gpu_devices = [k for k in max_memory.keys() if isinstance(k, int)]
 
605
  f"Device {k} is not recognized, available devices are integers(for GPU/XPU), 'mps', 'cpu' and 'disk'"
606
  )
607
  max_memory = {k: max_memory[k] for k in all_devices}
 
608
  return max_memory
 
 
609
  def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = ""):
610
  """
611
  Cleans a device_map by grouping all submodules that go on the same device together.
 
617
  for k in [k for k in device_map if k.startswith(prefix)]:
618
  del device_map[k]
619
  device_map[module_name] = values[0]
 
620
  # Recurse over the children
621
  children_modules = [k for k in device_map.keys() if k.startswith(prefix) and len(k) > len(module_name)]
622
  idx = len(module_name.split(".")) + 1 if len(module_name) > 0 else 1
623
  children_modules = set(".".join(k.split(".")[:idx]) for k in children_modules)
624
  for child in children_modules:
625
  clean_device_map(device_map, module_name=child)
 
626
  return device_map
 
 
627
  def load_offloaded_weights(model, index, offload_folder):
628
  """
629
  Loads the weights from the offload folder into the model.
 
630
  Args:
631
  model (`torch.nn.Module`):
632
  The model to load the weights into.
 
651
  tensor_file = os.path.join(offload_folder, f"{param_name}.dat")
652
  weight = load_offloaded_weight(tensor_file, metadata)
653
  set_module_tensor_to_device(model, param_name, "cpu", value=weight, fp16_statistics=fp16_statistics)
 
 
654
  def get_balanced_memory(
655
  model: nn.Module,
656
  max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
 
661
  ):
662
  """
663
  Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU.
 
664
  <Tip>
 
665
  All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the
666
  meta device (as it would if initialized within the `init_empty_weights` context manager).
 
667
  </Tip>
 
668
  Args:
669
  model (`torch.nn.Module`):
670
  The model to analyze.
 
685
  # Get default / clean up max_memory
686
  user_not_set_max_memory = max_memory is None
687
  max_memory = get_max_memory(max_memory)
 
688
  if is_npu_available():
689
  num_devices = len([d for d in max_memory if torch.device(d).type == "npu" and max_memory[d] > 0])
690
  elif is_xpu_available():
 
701
  )
702
  else:
703
  num_devices = len([d for d in max_memory if torch.device(d).type == "cuda" and max_memory[d] > 0])
 
704
  if num_devices == 0:
705
  return max_memory
 
706
  if num_devices == 1:
707
  # We cannot do low_zero on just one GPU, but we will still reserve some memory for the buffer
708
  low_zero = False
 
716
  "You can set `max_memory` in to a higher value to use more memory (at your own risk)."
717
  )
718
  break # only one device
 
719
  module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes)
720
  per_gpu = module_sizes[""] // (num_devices - 1 if low_zero else num_devices)
 
721
  # We can't just set the memory to model_size // num_devices as it will end being too small: each GPU will get
722
  # slightly less layers and some layers will end up offload at the end. So this function computes a buffer size to
723
  # add which is the biggest of:
 
727
  no_split_module_classes = []
728
  elif not isinstance(no_split_module_classes, (list, tuple)):
729
  no_split_module_classes = [no_split_module_classes]
 
730
  # Identify the size of the no_split_block modules
731
  if len(no_split_module_classes) > 0:
732
  no_split_children = {}
 
739
  class_name = submodule.__class__.__name__
740
  if class_name in no_split_module_classes and class_name not in no_split_children:
741
  no_split_children[class_name] = size
 
742
  if set(no_split_children.keys()) == set(no_split_module_classes):
743
  break
744
  buffer = max(no_split_children.values()) if len(no_split_children) > 0 else 0
745
  else:
746
  buffer = 0
 
747
  # Compute mean of final modules. In the first dict of module sizes, leaves are the parameters
748
  leaves = [n for n in module_sizes if len([p for p in module_sizes if n == "" or p.startswith(n + ".")]) == 0]
749
  module_sizes = {n: v for n, v in module_sizes.items() if n not in leaves}
 
752
  mean_leaves = int(sum([module_sizes[n] for n in leaves]) / max(len(leaves), 1))
753
  buffer = int(1.25 * max(buffer, mean_leaves))
754
  per_gpu += buffer
 
755
  # Sorted list of GPUs id (we may have some gpu ids not included in the our max_memory list - let's ignore them)
756
  gpus_idx_list = list(
757
  sorted(
 
761
  # The last device is left with max_memory just in case the buffer is not enough.
762
  for idx in gpus_idx_list[:-1]:
763
  max_memory[idx] = min(max_memory[0] if low_zero and idx == 0 else per_gpu, max_memory[idx])
 
764
  if low_zero:
765
  min_zero = max(0, module_sizes[""] - sum([max_memory[i] for i in range(1, num_devices)]))
766
  max_memory[0] = min(min_zero, max_memory[0])
 
767
  return max_memory
 
 
768
  def calculate_maximum_sizes(model: torch.nn.Module):
769
  "Computes the total size of the model and its largest layer"
770
  sizes = compute_module_sizes(model)
 
772
  no_split_modules = getattr(model, "_no_split_modules", None)
773
  if no_split_modules is None:
774
  no_split_modules = []
 
775
  modules_to_treat = (
776
  list(model.named_parameters(recurse=False))
777
  + list(model.named_children())
 
780
  largest_layer = get_max_layer_size(modules_to_treat, sizes, no_split_modules)
781
  total_size = sizes[""]
782
  return total_size, largest_layer
 
 
783
  def infer_auto_device_map(
784
  model: nn.Module,
785
  max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
 
798
  - if offload to the CPU is needed,we don't exceed the RAM available on the CPU.
799
  - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk
800
  that has the largest size.
 
801
  <Tip>
 
802
  All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the
803
  meta device (as it would if initialized within the `init_empty_weights` context manager).
 
804
  </Tip>
 
805
  Args:
806
  model (`torch.nn.Module`):
807
  The model to analyze.
 
826
  no_split_module_classes = []
827
  elif not isinstance(no_split_module_classes, (list, tuple)):
828
  no_split_module_classes = [no_split_module_classes]
 
829
  devices = list(max_memory.keys())
830
  if "disk" not in devices:
831
  devices.append("disk")
832
  gpus = [device for device in devices if device not in ["cpu", "disk"]]
 
833
  # Devices that need to keep space for a potential offloaded layer.
834
  if "mps" in gpus:
835
  main_devices = ["mps"]
 
837
  main_devices = [gpus[0], "cpu"]
838
  else:
839
  main_devices = ["cpu"]
 
840
  module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes)
841
  tied_parameters = find_tied_parameters(model)
 
842
  if check_tied_parameters_in_config(model) and len(tied_parameters) == 0:
843
  logger.warn(
844
  "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function."
845
  )
 
846
  device_map = OrderedDict()
847
  current_device = 0
848
  current_memory_used = 0
 
849
  # Direct submodules and parameters
850
  modules_to_treat = (
851
  list(model.named_parameters(recurse=False))
 
854
  )
855
  # Initialize maximum largest layer, to know which space to keep in memory
856
  max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)
 
857
  # Ready ? This is going to be a bit messy.
858
  while len(modules_to_treat) > 0:
859
  name, module = modules_to_treat.pop(0)
 
869
  )
870
  # Assess size needed
871
  module_size = module_sizes[name]
 
872
  # We keep relevant tied parameters only: one of the tied parameters in the group is inside the current module
873
  # and the other is not.
874
  tied_param_goups = [
 
882
  tied_params = sum([[p for p in tied_group if name not in p] for tied_group in tied_param_goups], [])
883
  if verbose and len(tied_params) > 0:
884
  print(f" So those parameters need to be taken into account {tied_params}")
 
885
  device = devices[current_device]
886
  current_max_size = max_memory[device] if device != "disk" else None
887
  # Reduce max size available by the largest layer.
 
915
  module_sizes,
916
  no_split_module_classes,
917
  )
 
918
  # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters.
919
  elif len(tied_params) > 0:
920
  # First locate all tied modules
 
929
  f" It looks like {name} is going to fit on {devices[current_device]} but we have tied "
930
  f"parameters to account for.\n - Names {tied_params}\n - Module names {tied_module_names}"
931
  )
 
932
  # Let's see if it all fits first
933
  module_size_with_ties = module_size
934
  for tied_param, tied_module_name in zip(tied_params, tied_module_names):
935
  module_size_with_ties += module_sizes[tied_module_name] - module_sizes[tied_param]
 
936
  if current_max_size is None or current_memory_used + module_size_with_ties <= current_max_size:
937
  # We really really fit!
938
  if verbose:
 
947
  ]
948
  modules_to_treat.pop(tied_module_index)
949
  device_map[tied_module_name] = devices[current_device]
 
950
  else:
951
  # We don't fit with the tied modules. Next question is: can we split one of the tied modules to make it
952
  # smaller or do we need to go on the next device?
 
961
  if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes:
962
  # can't break this one.
963
  continue
 
964
  if verbose:
965
  print(f"Splitting {tied_module_name}.")
966
  tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children
967
  tied_module_children = [(f"{tied_module_name}.{n}", v) for n, v in tied_module_children]
968
  tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][0]
 
969
  modules_to_treat = (
970
  [(name, module)]
971
  + modules_to_treat[:tied_module_index]
 
980
  )
981
  split_happened = True
982
  break
 
983
  if not split_happened:
984
  # If the tied module is not split, we go to the next device
985
  if verbose:
 
987
  current_device += 1
988
  modules_to_treat = [(name, module)] + modules_to_treat
989
  current_memory_used = 0
 
990
  else:
991
  if verbose:
992
  if current_max_size is None:
 
998
  )
999
  current_memory_used += module_size
1000
  device_map[name] = devices[current_device]
 
1001
  if clean_result:
1002
  device_map = clean_device_map(device_map)
1003
  return device_map
 
 
1004
  def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]):
1005
  """
1006
  Checks a device map covers everything in a given model.
 
1007
  Args:
1008
  model (`torch.nn.Module`): The model to check the device map against.
1009
  device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check.
 
1024
  raise ValueError(
1025
  f"The device_map provided does not give any device for the following parameters: {non_covered_params}"
1026
  )
 
 
1027
  def load_state_dict(checkpoint_file, device_map=None):
1028
  """
1029
  Load a checkpoint from a given file. If the checkpoint is in the safetensors format and a device map is passed, the
1030
  weights can be fast-loaded directly on the GPU.
 
1031
  Args:
1032
  checkpoint_file (`str`): The path to the checkpoint to load.
1033
  device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
 
1038
  with safe_open(checkpoint_file, framework="pt") as f:
1039
  metadata = f.metadata()
1040
  weight_names = f.keys()
 
1041
  if metadata is None:
1042
  logger.warn(
1043
  f"The safetensors archive passed at {checkpoint_file} does not contain metadata. "
1044
  "Make sure to save your model with the `save_pretrained` method. Defaulting to 'pt' metadata."
1045
  )
1046
  metadata = {"format": "pt"}
 
1047
  if metadata.get("format") not in ["pt", "tf", "flax"]:
1048
  raise OSError(
1049
  f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure "
 
1057
  # if we only have one device we can load everything directly
1058
  if len(set(device_map.values())) == 1:
1059
  return safe_load_file(checkpoint_file, device=list(device_map.values())[0])
 
1060
  devices = list(set(device_map.values()) - {"disk"})
1061
  # cpu device should always exist as fallback option
1062
  if "cpu" not in devices:
1063
  devices.append("cpu")
 
1064
  # For each device, get the weights that go there
1065
  device_weights = {device: [] for device in devices}
1066
  for module_name, device in device_map.items():
 
1068
  device_weights[device].extend(
1069
  [k for k in weight_names if k == module_name or k.startswith(module_name + ".")]
1070
  )
 
1071
  # all weights that haven't defined a device should be loaded on CPU
1072
  device_weights["cpu"].extend([k for k in weight_names if k not in sum(device_weights.values(), [])])
1073
  tensors = {}
 
1092
  progress_bar.update()
1093
  if progress_bar is not None:
1094
  progress_bar.close()
 
1095
  return tensors
1096
  else:
1097
  return torch.load(checkpoint_file, map_location=torch.device("cpu"))
 
 
1098
  def get_state_dict_offloaded_model(model: nn.Module):
1099
  """
1100
  Returns the state dictionary for an offloaded model via iterative onloading
 
1101
  Args:
1102
  model (`torch.nn.Module`):
1103
  The offloaded model we want to save
1104
  """
1105
  from ..hooks import AlignDevicesHook
 
1106
  state_dict = {}
1107
  placeholders = set()
1108
  for name, module in model.named_modules():
 
1124
  module._hf_hook.execution_device = original_device
1125
  else:
1126
  module_state_dict = module.state_dict()
 
1127
  for key in module_state_dict:
1128
  # ignore placeholder parameters that are still on the meta device
1129
  if module_state_dict[key].device == torch.device("meta"):
 
1136
  placeholders.remove(key)
1137
  if placeholders:
1138
  logger.warning(f"The following tensors were not saved because they were still on meta device: {placeholders}")
 
1139
  return state_dict
 
 
1140
  def load_checkpoint_in_model(
1141
  model: nn.Module,
1142
  checkpoint: Union[str, os.PathLike],
 
1151
  """
1152
  Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
1153
  loaded.
 
1154
  <Tip warning={true}>
 
1155
  Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To
1156
  group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`].
 
1157
  </Tip>
 
1158
  Args:
1159
  model (`torch.nn.Module`):
1160
  The model in which we want to load a checkpoint.
 
1180
  A list of the modules that we keep in `torch.float32` dtype.
1181
  offload_8bit_bnb (`bool`, *optional*):
1182
  Whether or not to enable offload of 8-bit modules on cpu/disk.
 
1183
  """
1184
  if offload_8bit_bnb:
1185
  from .bnb import quantize_and_offload_8bit
 
1186
  tied_params = find_tied_parameters(model)
 
1187
  if check_tied_parameters_in_config(model) and len(tied_params) == 0:
1188
  logger.warn(
1189
  "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function."
1190
  )
1191
  if device_map is not None:
1192
  check_tied_parameters_on_same_device(tied_params, device_map)
 
1193
  if offload_folder is None and device_map is not None and "disk" in device_map.values():
1194
  raise ValueError(
1195
  "At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`."
1196
  )
1197
  elif offload_folder is not None and device_map is not None and "disk" in device_map.values():
1198
  os.makedirs(offload_folder, exist_ok=True)
 
1199
  if isinstance(dtype, str):
1200
  # We accept "torch.float16" or just "float16"
1201
  dtype = dtype.replace("torch.", "")
1202
  dtype = getattr(torch, dtype)
 
1203
  checkpoint_files = None
1204
  index_filename = None
1205
  if os.path.isfile(checkpoint):
 
1233
  "`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded "
1234
  f"checkpoint, or a folder containing a sharded checkpoint or the whole state dict, but got {checkpoint}."
1235
  )
 
1236
  if index_filename is not None:
1237
  checkpoint_folder = os.path.split(index_filename)[0]
1238
  with open(index_filename, "r") as f:
1239
  index = json.loads(f.read())
 
1240
  if "weight_map" in index:
1241
  index = index["weight_map"]
1242
  checkpoint_files = sorted(list(set(index.values())))
1243
  checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files]
 
1244
  # Logic for missing/unexepected keys goes here.
 
1245
  offload_index = {}
1246
  if offload_state_dict:
1247
  state_dict_folder = tempfile.mkdtemp()
1248
  state_dict_index = {}
 
1249
  buffer_names = [name for name, _ in model.named_buffers()]
1250
  for checkpoint_file in checkpoint_files:
1251
  checkpoint = load_state_dict(checkpoint_file, device_map=device_map)
 
1256
  # skip SCB parameter (for 8-bit serialization)
1257
  if "SCB" in param_name:
1258
  continue
 
1259
  module_name = param_name
 
1260
  while len(module_name) > 0 and module_name not in device_map:
1261
  module_name = ".".join(module_name.split(".")[:-1])
1262
  if module_name == "" and "" not in device_map:
 
1273
  break
1274
  if proceed:
1275
  new_dtype = torch.float32
 
1276
  if "weight" in param_name and param_name.replace("weight", "SCB") in checkpoint.keys():
1277
  if param.dtype == torch.int8:
1278
  fp16_statistics = checkpoint[param_name.replace("weight", "SCB")]
1279
  else:
1280
  fp16_statistics = None
 
1281
  if param_device == "disk":
1282
  if offload_buffers or param_name not in buffer_names:
1283
  if new_dtype is None:
 
1309
  dtype=new_dtype,
1310
  fp16_statistics=fp16_statistics,
1311
  )
 
1312
  # Force Python to clean up.
1313
  del checkpoint
1314
  gc.collect()
 
1315
  save_offload_index(offload_index, offload_folder)
 
1316
  # Load back offloaded state dict on CPU
1317
  if offload_state_dict:
1318
  load_offloaded_weights(model, state_dict_index, state_dict_folder)
1319
  shutil.rmtree(state_dict_folder)
 
1320
  retie_parameters(model, tied_params)
 
 
1321
  def get_mixed_precision_context_manager(native_amp: bool = False, autocast_kwargs: AutocastKwargs = None):
1322
  """
1323
  Return a context manager for autocasting mixed precision
 
1324
  Args:
1325
  native_amp (`bool`, *optional*, defaults to False):
1326
  Whether mixed precision is actually enabled.
src/utils/offload.py CHANGED
@@ -17,35 +17,26 @@ def offload_weight(weight, weight_name, offload_folder, index=None):
17
  file_array[:] = array[:]
18
  file_array.flush()
19
  return index
20
-
21
-
22
  def load_offloaded_weight(weight_file, weight_info):
23
  shape = tuple(weight_info["shape"])
24
  if shape == ():
25
  # NumPy memory-mapped arrays can't have 0 dims so it was saved as 1d tensor
26
  shape = (1,)
27
-
28
  dtype = weight_info["dtype"]
29
  if dtype == "bfloat16":
30
  # NumPy does not support bfloat16 so this was saved as a int16
31
  dtype = "int16"
32
-
33
  weight = np.memmap(weight_file, dtype=dtype, shape=shape, mode="r")
34
-
35
  if len(weight_info["shape"]) == 0:
36
  weight = weight[0]
37
  weight = torch.tensor(weight)
38
  if weight_info["dtype"] == "bfloat16":
39
  weight = weight.view(torch.bfloat16)
40
-
41
  return weight
42
-
43
-
44
  def save_offload_index(index, offload_folder):
45
  if index is None or len(index) == 0:
46
  # Nothing to save
47
  return
48
-
49
  offload_index_file = os.path.join(offload_folder, "index.json")
50
  if os.path.isfile(offload_index_file):
51
  with open(offload_index_file, "r", encoding="utf-8") as f:
@@ -53,15 +44,11 @@ def save_offload_index(index, offload_folder):
53
  else:
54
  current_index = {}
55
  current_index.update(index)
56
-
57
  with open(offload_index_file, "w", encoding="utf-8") as f:
58
  json.dump(current_index, f, indent=2)
59
-
60
-
61
  def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]):
62
  """
63
  Offload a state dict in a given folder.
64
-
65
  Args:
66
  save_dir (`str` or `os.PathLike`):
67
  The directory in which to offload the state dict.
@@ -72,15 +59,11 @@ def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str,
72
  index = {}
73
  for name, parameter in state_dict.items():
74
  index = offload_weight(parameter, name, save_dir, index=index)
75
-
76
  # Update index
77
  save_offload_index(index, save_dir)
78
-
79
-
80
  class PrefixedDataset(Mapping):
81
  """
82
  Will access keys in a given dataset by adding a prefix.
83
-
84
  Args:
85
  dataset (`Mapping`): Any map with string keys.
86
  prefix (`str`): A prefix to add when trying to access any element in the underlying dataset.
@@ -88,21 +71,15 @@ class PrefixedDataset(Mapping):
88
  def __init__(self, dataset: Mapping, prefix: str):
89
  self.dataset = dataset
90
  self.prefix = prefix
91
-
92
  def __getitem__(self, key):
93
  return self.dataset[f"{self.prefix}{key}"]
94
-
95
  def __iter__(self):
96
  return iter([key for key in self.dataset if key.startswith(self.prefix)])
97
-
98
  def __len__(self):
99
  return len(self.dataset)
100
-
101
-
102
  class OffloadedWeightsLoader(Mapping):
103
  """
104
  A collection that loads weights stored in a given state dict or memory-mapped on disk.
105
-
106
  Args:
107
  state_dict (`Dict[str, torch.Tensor]`, *optional*):
108
  A dictionary parameter name to tensor.
@@ -121,7 +98,6 @@ class OffloadedWeightsLoader(Mapping):
121
  ):
122
  if state_dict is None and save_folder is None and index is None:
123
  raise ValueError("Need either a `state_dict`, a `save_folder` or an `index` containing offloaded weights.")
124
-
125
  self.state_dict = {} if state_dict is None else state_dict
126
  self.save_folder = save_folder
127
  if index is None and save_folder is not None:
@@ -131,7 +107,6 @@ class OffloadedWeightsLoader(Mapping):
131
  self.all_keys = list(self.state_dict.keys())
132
  self.all_keys.extend([key for key in self.index if key not in self.all_keys])
133
  self.device = device
134
-
135
  def __getitem__(self, key: str):
136
  # State dict gets priority
137
  if key in self.state_dict:
@@ -147,28 +122,20 @@ class OffloadedWeightsLoader(Mapping):
147
  # if failed to get_tensor on the device, such as bf16 on mps, try to load it on CPU first
148
  with safe_open(weight_info["safetensors_file"], framework="pt", device="cpu") as f:
149
  tensor = f.get_tensor(weight_info.get("weight_name", key))
150
-
151
  if "dtype" in weight_info:
152
  tensor = tensor.to(getattr(torch, weight_info["dtype"]))
153
-
154
  if tensor.device != torch.device(device):
155
  tensor = tensor.to(device)
156
  return tensor
157
-
158
  weight_file = os.path.join(self.save_folder, f"{key}.dat")
159
  return load_offloaded_weight(weight_file, weight_info)
160
-
161
  def __iter__(self):
162
  return iter(self.all_keys)
163
-
164
  def __len__(self):
165
  return len(self.all_keys)
166
-
167
-
168
  def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]):
169
  """
170
  Extract the sub state-dict corresponding to a list of given submodules.
171
-
172
  Args:
173
  state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from.
174
  submodule_names (`List[str]`): The list of submodule names we want to extract.
 
17
  file_array[:] = array[:]
18
  file_array.flush()
19
  return index
 
 
20
  def load_offloaded_weight(weight_file, weight_info):
21
  shape = tuple(weight_info["shape"])
22
  if shape == ():
23
  # NumPy memory-mapped arrays can't have 0 dims so it was saved as 1d tensor
24
  shape = (1,)
 
25
  dtype = weight_info["dtype"]
26
  if dtype == "bfloat16":
27
  # NumPy does not support bfloat16 so this was saved as a int16
28
  dtype = "int16"
 
29
  weight = np.memmap(weight_file, dtype=dtype, shape=shape, mode="r")
 
30
  if len(weight_info["shape"]) == 0:
31
  weight = weight[0]
32
  weight = torch.tensor(weight)
33
  if weight_info["dtype"] == "bfloat16":
34
  weight = weight.view(torch.bfloat16)
 
35
  return weight
 
 
36
  def save_offload_index(index, offload_folder):
37
  if index is None or len(index) == 0:
38
  # Nothing to save
39
  return
 
40
  offload_index_file = os.path.join(offload_folder, "index.json")
41
  if os.path.isfile(offload_index_file):
42
  with open(offload_index_file, "r", encoding="utf-8") as f:
 
44
  else:
45
  current_index = {}
46
  current_index.update(index)
 
47
  with open(offload_index_file, "w", encoding="utf-8") as f:
48
  json.dump(current_index, f, indent=2)
 
 
49
  def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]):
50
  """
51
  Offload a state dict in a given folder.
 
52
  Args:
53
  save_dir (`str` or `os.PathLike`):
54
  The directory in which to offload the state dict.
 
59
  index = {}
60
  for name, parameter in state_dict.items():
61
  index = offload_weight(parameter, name, save_dir, index=index)
 
62
  # Update index
63
  save_offload_index(index, save_dir)
 
 
64
  class PrefixedDataset(Mapping):
65
  """
66
  Will access keys in a given dataset by adding a prefix.
 
67
  Args:
68
  dataset (`Mapping`): Any map with string keys.
69
  prefix (`str`): A prefix to add when trying to access any element in the underlying dataset.
 
71
  def __init__(self, dataset: Mapping, prefix: str):
72
  self.dataset = dataset
73
  self.prefix = prefix
 
74
  def __getitem__(self, key):
75
  return self.dataset[f"{self.prefix}{key}"]
 
76
  def __iter__(self):
77
  return iter([key for key in self.dataset if key.startswith(self.prefix)])
 
78
  def __len__(self):
79
  return len(self.dataset)
 
 
80
  class OffloadedWeightsLoader(Mapping):
81
  """
82
  A collection that loads weights stored in a given state dict or memory-mapped on disk.
 
83
  Args:
84
  state_dict (`Dict[str, torch.Tensor]`, *optional*):
85
  A dictionary parameter name to tensor.
 
98
  ):
99
  if state_dict is None and save_folder is None and index is None:
100
  raise ValueError("Need either a `state_dict`, a `save_folder` or an `index` containing offloaded weights.")
 
101
  self.state_dict = {} if state_dict is None else state_dict
102
  self.save_folder = save_folder
103
  if index is None and save_folder is not None:
 
107
  self.all_keys = list(self.state_dict.keys())
108
  self.all_keys.extend([key for key in self.index if key not in self.all_keys])
109
  self.device = device
 
110
  def __getitem__(self, key: str):
111
  # State dict gets priority
112
  if key in self.state_dict:
 
122
  # if failed to get_tensor on the device, such as bf16 on mps, try to load it on CPU first
123
  with safe_open(weight_info["safetensors_file"], framework="pt", device="cpu") as f:
124
  tensor = f.get_tensor(weight_info.get("weight_name", key))
 
125
  if "dtype" in weight_info:
126
  tensor = tensor.to(getattr(torch, weight_info["dtype"]))
 
127
  if tensor.device != torch.device(device):
128
  tensor = tensor.to(device)
129
  return tensor
 
130
  weight_file = os.path.join(self.save_folder, f"{key}.dat")
131
  return load_offloaded_weight(weight_file, weight_info)
 
132
  def __iter__(self):
133
  return iter(self.all_keys)
 
134
  def __len__(self):
135
  return len(self.all_keys)
 
 
136
  def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]):
137
  """
138
  Extract the sub state-dict corresponding to a list of given submodules.
 
139
  Args:
140
  state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from.
141
  submodule_names (`List[str]`): The list of submodule names we want to extract.
src/utils/operations.py CHANGED
@@ -3,8 +3,6 @@ A set of basic tensor ops compatible with tpu, gpu, and multigpu
3
  """
4
  def is_torch_tensor(tensor):
5
  return isinstance(tensor, torch.Tensor)
6
-
7
-
8
  def is_torch_xpu_tensor(tensor):
9
  return isinstance(
10
  tensor,
@@ -16,12 +14,8 @@ def is_torch_xpu_tensor(tensor):
16
  torch.xpu.DoubleTensor,
17
  torch.xpu.BFloat16Tensor,
18
  )
19
-
20
-
21
  def is_tensor_information(tensor_info):
22
  return isinstance(tensor_info, TensorInformation)
23
-
24
-
25
  def is_namedtuple(data):
26
  """
27
  Checks if `x` is a `namedtuple` or not. Can have false positives, but only if a user is trying to mimic a
@@ -35,8 +29,6 @@ def is_namedtuple(data):
35
  if not isinstance(fields, tuple):
36
  return False
37
  return all(isinstance(member, str) for member in fields)
38
-
39
-
40
  def honor_type(obj, generator):
41
  """
42
  Cast a generator to the same type as obj (list, tuple, or namedtuple)
@@ -46,12 +38,9 @@ def honor_type(obj, generator):
46
  return type(obj)(*list(generator))
47
  else:
48
  return type(obj)(generator)
49
-
50
-
51
  def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs):
52
  """
53
  Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type.
54
-
55
  Args:
56
  func (`callable`):
57
  The function to recursively apply.
@@ -66,7 +55,6 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth
66
  `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged.
67
  **kwargs:
68
  Keyword arguments that will be passed to `func` when applied on the unpacked data.
69
-
70
  Returns:
71
  The same data structure as `data` with `func` applied to every object of type `main_type`.
72
  """
@@ -97,18 +85,14 @@ def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_oth
97
  f"objects that are valid for `{test_type.__name__}` should be passed."
98
  )
99
  return data
100
-
101
-
102
  def send_to_device(tensor, device, non_blocking=False, skip_keys=None):
103
  """
104
  Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.
105
-
106
  Args:
107
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
108
  The data to send to a given device.
109
  device (`torch.device`):
110
  The device to send the data to.
111
-
112
  Returns:
113
  The same data structure as `tensor` with all tensors sent to the proper device.
114
  """
@@ -137,68 +121,49 @@ def send_to_device(tensor, device, non_blocking=False, skip_keys=None):
137
  return tensor.to(device)
138
  else:
139
  return tensor
140
-
141
-
142
  def get_data_structure(data):
143
  """
144
  Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors.
145
-
146
  Args:
147
  data (nested list/tuple/dictionary of `torch.Tensor`):
148
  The data to send to analyze.
149
-
150
  Returns:
151
  The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.
152
  """
153
  def _get_data_structure(tensor):
154
  return TensorInformation(shape=tensor.shape, dtype=tensor.dtype)
155
-
156
  return recursively_apply(_get_data_structure, data)
157
-
158
-
159
  def get_shape(data):
160
  """
161
  Recursively gathers the shape of a nested list/tuple/dictionary of tensors as a list.
162
-
163
  Args:
164
  data (nested list/tuple/dictionary of `torch.Tensor`):
165
  The data to send to analyze.
166
-
167
  Returns:
168
  The same data structure as `data` with lists of tensor shapes instead of tensors.
169
  """
170
  def _get_shape(tensor):
171
  return list(tensor.shape)
172
-
173
  return recursively_apply(_get_shape, data)
174
-
175
-
176
  def initialize_tensors(data_structure):
177
  """
178
  Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`].
179
-
180
  Returns:
181
  The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].
182
  """
183
  def _initialize_tensor(tensor_info):
184
  return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype)
185
-
186
  return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information)
187
-
188
-
189
  def find_batch_size(data):
190
  """
191
  Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors.
192
-
193
  Args:
194
  data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.
195
-
196
  Returns:
197
  `int`: The batch size.
198
  """
199
  if isinstance(data, (tuple, list, Mapping)) and (len(data) == 0):
200
  raise ValueError(f"Cannot find the batch size from empty {type(data)}.")
201
-
202
  if isinstance(data, (tuple, list)):
203
  return find_batch_size(data[0])
204
  elif isinstance(data, Mapping):
@@ -207,15 +172,11 @@ def find_batch_size(data):
207
  elif not isinstance(data, torch.Tensor):
208
  raise TypeError(f"Can only find the batch size of tensors but got {type(data)}.")
209
  return data.shape[0]
210
-
211
-
212
  def listify(data):
213
  """
214
  Recursively finds tensors in a nested list/tuple/dictionary and converts them to a list of numbers.
215
-
216
  Args:
217
  data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to convert to regular numbers.
218
-
219
  Returns:
220
  The same data structure as `data` with lists of numbers instead of `torch.Tensor`.
221
  """
@@ -227,40 +188,30 @@ def listify(data):
227
  # Until Numpy adds bfloat16, we must convert float32.
228
  tensor = tensor.to(torch.float32)
229
  return tensor.tolist()
230
-
231
  return recursively_apply(_convert_to_list, data)
232
-
233
-
234
  def _tpu_gather(tensor):
235
  def _tpu_gather_one(tensor):
236
  if tensor.ndim == 0:
237
  tensor = tensor.clone()[None]
238
-
239
  # Can only gather contiguous tensors
240
  if not tensor.is_contiguous():
241
  tensor = tensor.contiguous()
242
  return xm.all_gather(tensor)
243
-
244
  res = recursively_apply(_tpu_gather_one, tensor, error_on_other_type=True)
245
  xm.mark_step()
246
  return res
247
-
248
-
249
  def _gpu_gather(tensor):
250
  state = PartialState()
251
  if is_torch_version(">=", "1.13"):
252
  gather_op = torch.distributed.all_gather_into_tensor
253
  else:
254
  gather_op = torch.distributed._all_gather_base
255
-
256
  def _gpu_gather_one(tensor):
257
  if tensor.ndim == 0:
258
  tensor = tensor.clone()[None]
259
-
260
  # Can only gather contiguous tensors
261
  if not tensor.is_contiguous():
262
  tensor = tensor.contiguous()
263
-
264
  if state.backend is not None and state.backend != "gloo":
265
  # We use `empty` as `all_gather_into_tensor` slightly
266
  # differs from `all_gather` for better efficiency,
@@ -280,18 +231,13 @@ def _gpu_gather(tensor):
280
  output_tensors = [torch.empty_like(tensor) for _ in range(state.num_processes)]
281
  torch.distributed.all_gather(output_tensors, tensor)
282
  return torch.cat(output_tensors, dim=0)
283
-
284
  return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
285
-
286
-
287
  class DistributedOperationException(Exception):
288
  """
289
  An exception class for distributed operations. Raised if the operation cannot be performed due to the shape of the
290
  tensors.
291
  """
292
  pass
293
-
294
-
295
  def verify_operation(function):
296
  """
297
  Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`.
@@ -322,10 +268,7 @@ def verify_operation(function):
322
  f"\n\nOperation: `{operation}`\nInput shapes:\n - {process_shape_str}"
323
  )
324
  return function(*args, **kwargs)
325
-
326
  return wrapper
327
-
328
-
329
  def chained_operation(function):
330
  """
331
  Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing
@@ -340,19 +283,14 @@ def chained_operation(function):
340
  raise DistributedOperationException(
341
  f"Error found while calling `{operation}`. Please see the earlier error for more details."
342
  ) from e
343
-
344
  return wrapper
345
-
346
-
347
  @verify_operation
348
  def gather(tensor):
349
  """
350
  Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices.
351
-
352
  Args:
353
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
354
  The data to gather.
355
-
356
  Returns:
357
  The same data structure as `tensor` with all tensors sent to the proper device.
358
  """
@@ -362,23 +300,17 @@ def gather(tensor):
362
  return _gpu_gather(tensor)
363
  else:
364
  return tensor
365
-
366
-
367
  def _gpu_gather_object(object: Any):
368
  output_objects = [None for _ in range(PartialState().num_processes)]
369
  torch.distributed.all_gather_object(output_objects, object)
370
  # all_gather_object returns a list of lists, so we need to flatten it
371
  return [x for y in output_objects for x in y]
372
-
373
-
374
  def gather_object(object: Any):
375
  """
376
  Recursively gather object in a nested list/tuple/dictionary of objects from all devices.
377
-
378
  Args:
379
  object (nested list/tuple/dictionary of picklable object):
380
  The data to gather.
381
-
382
  Returns:
383
  The same data structure as `object` with all the objects sent to every device.
384
  """
@@ -388,35 +320,26 @@ def gather_object(object: Any):
388
  return _gpu_gather_object(object)
389
  else:
390
  return object
391
-
392
-
393
  def _gpu_broadcast(data, src=0):
394
  def _gpu_broadcast_one(tensor, src=0):
395
  torch.distributed.broadcast(tensor, src=src)
396
  return tensor
397
-
398
  return recursively_apply(_gpu_broadcast_one, data, error_on_other_type=True, src=src)
399
-
400
-
401
  def _tpu_broadcast(tensor, src=0, name="broadcast tensor"):
402
  if isinstance(tensor, (list, tuple)):
403
  return honor_type(tensor, (_tpu_broadcast(t, name=f"{name}_{i}") for i, t in enumerate(tensor)))
404
  elif isinstance(tensor, Mapping):
405
  return type(tensor)({k: _tpu_broadcast(v, name=f"{name}_{k}") for k, v in tensor.items()})
406
  return xm.mesh_reduce(name, tensor, lambda x: x[src])
407
-
408
-
409
  @verify_operation
410
  def broadcast(tensor, from_process: int = 0):
411
  """
412
  Recursively broadcast tensor in a nested list/tuple/dictionary of tensors to all devices.
413
-
414
  Args:
415
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
416
  The data to gather.
417
  from_process (`int`, *optional*, defaults to 0):
418
  The process from which to send the data
419
-
420
  Returns:
421
  The same data structure as `tensor` with all tensors broadcasted to the proper device.
422
  """
@@ -426,18 +349,14 @@ def broadcast(tensor, from_process: int = 0):
426
  return _gpu_broadcast(tensor, src=from_process)
427
  else:
428
  return tensor
429
-
430
-
431
  def broadcast_object_list(object_list, from_process: int = 0):
432
  """
433
  Broadcast a list of picklable objects form one process to the others.
434
-
435
  Args:
436
  object_list (list of picklable objects):
437
  The list of objects to broadcast. This list will be modified inplace.
438
  from_process (`int`, *optional*, defaults to 0):
439
  The process from which to send the data.
440
-
441
  Returns:
442
  The same list containing the objects from process 0.
443
  """
@@ -447,37 +366,28 @@ def broadcast_object_list(object_list, from_process: int = 0):
447
  elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
448
  torch.distributed.broadcast_object_list(object_list, src=from_process)
449
  return object_list
450
-
451
-
452
  def slice_tensors(data, tensor_slice, process_index=None, num_processes=None):
453
  """
454
  Recursively takes a slice in a nested list/tuple/dictionary of tensors.
455
-
456
  Args:
457
  data (nested list/tuple/dictionary of `torch.Tensor`):
458
  The data to slice.
459
  tensor_slice (`slice`):
460
  The slice to take.
461
-
462
  Returns:
463
  The same data structure as `data` with all the tensors slices.
464
  """
465
  def _slice_tensor(tensor, tensor_slice):
466
  return tensor[tensor_slice]
467
-
468
  return recursively_apply(_slice_tensor, data, tensor_slice)
469
-
470
-
471
  def concatenate(data, dim=0):
472
  """
473
  Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape.
474
-
475
  Args:
476
  data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`):
477
  The data to concatenate.
478
  dim (`int`, *optional*, defaults to 0):
479
  The dimension on which to concatenate.
480
-
481
  Returns:
482
  The same data structure as `data` with all the tensors concatenated.
483
  """
@@ -488,18 +398,13 @@ def concatenate(data, dim=0):
488
  elif not isinstance(data[0], torch.Tensor):
489
  raise TypeError(f"Can only concatenate tensors but got {type(data[0])}")
490
  return torch.cat(data, dim=dim)
491
-
492
-
493
  class CannotPadNestedTensorWarning(UserWarning):
494
  pass
495
-
496
-
497
  @chained_operation
498
  def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
499
  """
500
  Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they
501
  can safely be gathered.
502
-
503
  Args:
504
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
505
  The data to gather.
@@ -519,7 +424,6 @@ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
519
  return tensor
520
  if dim >= len(tensor.shape):
521
  return tensor
522
-
523
  # Gather all sizes
524
  size = torch.tensor(tensor.shape, device=tensor.device)[None]
525
  sizes = gather(size).cpu()
@@ -527,7 +431,6 @@ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
527
  max_size = max(s[dim] for s in sizes)
528
  if max_size == tensor.shape[dim]:
529
  return tensor
530
-
531
  old_size = tensor.shape
532
  new_size = list(old_size)
533
  new_size[dim] = max_size
@@ -540,18 +443,14 @@ def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
540
  indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))
541
  new_tensor[indices] = tensor
542
  return new_tensor
543
-
544
  return recursively_apply(
545
  _pad_across_processes, tensor, error_on_other_type=True, dim=dim, pad_index=pad_index, pad_first=pad_first
546
  )
547
-
548
-
549
  @verify_operation
550
  def reduce(tensor, reduction="mean", scale=1.0):
551
  """
552
  Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the
553
  mean of a given operation.
554
-
555
  Args:
556
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
557
  The data to reduce.
@@ -559,7 +458,6 @@ def reduce(tensor, reduction="mean", scale=1.0):
559
  A reduction method. Can be of "mean", "sum", or "none"
560
  scale (`float`, *optional*):
561
  A default scaling value to be applied after the reduce, only valied on XLA.
562
-
563
  Returns:
564
  The same data structure as `data` with all the tensors reduced.
565
  """
@@ -575,73 +473,52 @@ def reduce(tensor, reduction="mean", scale=1.0):
575
  if reduction == "mean":
576
  cloned_tensor /= state.num_processes
577
  return cloned_tensor
578
-
579
  return recursively_apply(
580
  _reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction, scale=scale
581
  )
582
-
583
-
584
  def convert_to_fp32(tensor):
585
  """
586
  Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32.
587
-
588
  Args:
589
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
590
  The data to convert from FP16/BF16 to FP32.
591
-
592
  Returns:
593
  The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.
594
  """
595
  def _convert_to_fp32(tensor):
596
  return tensor.float()
597
-
598
  def _is_fp16_bf16_tensor(tensor):
599
  return hasattr(tensor, "dtype") and tensor.dtype in (torch.float16, torch.bfloat16)
600
-
601
  return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)
602
-
603
-
604
  class ConvertOutputsToFp32:
605
  """
606
  Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16
607
  precision will be convert back to FP32.
608
-
609
  Args:
610
  model_forward (`Callable`):
611
  The function which outputs we want to treat.
612
-
613
  Returns:
614
  The same function as `model_forward` but with converted outputs.
615
  """
616
  def __init__(self, model_forward):
617
  self.model_forward = model_forward
618
  update_wrapper(self, model_forward)
619
-
620
  def __call__(self, *args, **kwargs):
621
  return convert_to_fp32(self.model_forward(*args, **kwargs))
622
-
623
  def __getstate__(self):
624
  raise pickle.PicklingError(
625
  "Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it."
626
  )
627
-
628
-
629
  def convert_outputs_to_fp32(model_forward):
630
  model_forward = ConvertOutputsToFp32(model_forward)
631
-
632
  def forward(*args, **kwargs):
633
  return model_forward(*args, **kwargs)
634
-
635
  # To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
636
  forward.__wrapped__ = model_forward
637
-
638
  return forward
639
-
640
-
641
  def find_device(data):
642
  """
643
  Finds the device on which a nested dict/list/tuple of tensors lies (assuming they are all on the same device).
644
-
645
  Args:
646
  (nested list/tuple/dictionary of `torch.Tensor`): The data we want to know the device of.
647
  """
 
3
  """
4
  def is_torch_tensor(tensor):
5
  return isinstance(tensor, torch.Tensor)
 
 
6
  def is_torch_xpu_tensor(tensor):
7
  return isinstance(
8
  tensor,
 
14
  torch.xpu.DoubleTensor,
15
  torch.xpu.BFloat16Tensor,
16
  )
 
 
17
  def is_tensor_information(tensor_info):
18
  return isinstance(tensor_info, TensorInformation)
 
 
19
  def is_namedtuple(data):
20
  """
21
  Checks if `x` is a `namedtuple` or not. Can have false positives, but only if a user is trying to mimic a
 
29
  if not isinstance(fields, tuple):
30
  return False
31
  return all(isinstance(member, str) for member in fields)
 
 
32
  def honor_type(obj, generator):
33
  """
34
  Cast a generator to the same type as obj (list, tuple, or namedtuple)
 
38
  return type(obj)(*list(generator))
39
  else:
40
  return type(obj)(generator)
 
 
41
  def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs):
42
  """
43
  Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type.
 
44
  Args:
45
  func (`callable`):
46
  The function to recursively apply.
 
55
  `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged.
56
  **kwargs:
57
  Keyword arguments that will be passed to `func` when applied on the unpacked data.
 
58
  Returns:
59
  The same data structure as `data` with `func` applied to every object of type `main_type`.
60
  """
 
85
  f"objects that are valid for `{test_type.__name__}` should be passed."
86
  )
87
  return data
 
 
88
  def send_to_device(tensor, device, non_blocking=False, skip_keys=None):
89
  """
90
  Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.
 
91
  Args:
92
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
93
  The data to send to a given device.
94
  device (`torch.device`):
95
  The device to send the data to.
 
96
  Returns:
97
  The same data structure as `tensor` with all tensors sent to the proper device.
98
  """
 
121
  return tensor.to(device)
122
  else:
123
  return tensor
 
 
124
  def get_data_structure(data):
125
  """
126
  Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors.
 
127
  Args:
128
  data (nested list/tuple/dictionary of `torch.Tensor`):
129
  The data to send to analyze.
 
130
  Returns:
131
  The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.
132
  """
133
  def _get_data_structure(tensor):
134
  return TensorInformation(shape=tensor.shape, dtype=tensor.dtype)
 
135
  return recursively_apply(_get_data_structure, data)
 
 
136
  def get_shape(data):
137
  """
138
  Recursively gathers the shape of a nested list/tuple/dictionary of tensors as a list.
 
139
  Args:
140
  data (nested list/tuple/dictionary of `torch.Tensor`):
141
  The data to send to analyze.
 
142
  Returns:
143
  The same data structure as `data` with lists of tensor shapes instead of tensors.
144
  """
145
  def _get_shape(tensor):
146
  return list(tensor.shape)
 
147
  return recursively_apply(_get_shape, data)
 
 
148
  def initialize_tensors(data_structure):
149
  """
150
  Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`].
 
151
  Returns:
152
  The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].
153
  """
154
  def _initialize_tensor(tensor_info):
155
  return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype)
 
156
  return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information)
 
 
157
  def find_batch_size(data):
158
  """
159
  Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors.
 
160
  Args:
161
  data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size.
 
162
  Returns:
163
  `int`: The batch size.
164
  """
165
  if isinstance(data, (tuple, list, Mapping)) and (len(data) == 0):
166
  raise ValueError(f"Cannot find the batch size from empty {type(data)}.")
 
167
  if isinstance(data, (tuple, list)):
168
  return find_batch_size(data[0])
169
  elif isinstance(data, Mapping):
 
172
  elif not isinstance(data, torch.Tensor):
173
  raise TypeError(f"Can only find the batch size of tensors but got {type(data)}.")
174
  return data.shape[0]
 
 
175
  def listify(data):
176
  """
177
  Recursively finds tensors in a nested list/tuple/dictionary and converts them to a list of numbers.
 
178
  Args:
179
  data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to convert to regular numbers.
 
180
  Returns:
181
  The same data structure as `data` with lists of numbers instead of `torch.Tensor`.
182
  """
 
188
  # Until Numpy adds bfloat16, we must convert float32.
189
  tensor = tensor.to(torch.float32)
190
  return tensor.tolist()
 
191
  return recursively_apply(_convert_to_list, data)
 
 
192
  def _tpu_gather(tensor):
193
  def _tpu_gather_one(tensor):
194
  if tensor.ndim == 0:
195
  tensor = tensor.clone()[None]
 
196
  # Can only gather contiguous tensors
197
  if not tensor.is_contiguous():
198
  tensor = tensor.contiguous()
199
  return xm.all_gather(tensor)
 
200
  res = recursively_apply(_tpu_gather_one, tensor, error_on_other_type=True)
201
  xm.mark_step()
202
  return res
 
 
203
  def _gpu_gather(tensor):
204
  state = PartialState()
205
  if is_torch_version(">=", "1.13"):
206
  gather_op = torch.distributed.all_gather_into_tensor
207
  else:
208
  gather_op = torch.distributed._all_gather_base
 
209
  def _gpu_gather_one(tensor):
210
  if tensor.ndim == 0:
211
  tensor = tensor.clone()[None]
 
212
  # Can only gather contiguous tensors
213
  if not tensor.is_contiguous():
214
  tensor = tensor.contiguous()
 
215
  if state.backend is not None and state.backend != "gloo":
216
  # We use `empty` as `all_gather_into_tensor` slightly
217
  # differs from `all_gather` for better efficiency,
 
231
  output_tensors = [torch.empty_like(tensor) for _ in range(state.num_processes)]
232
  torch.distributed.all_gather(output_tensors, tensor)
233
  return torch.cat(output_tensors, dim=0)
 
234
  return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
 
 
235
  class DistributedOperationException(Exception):
236
  """
237
  An exception class for distributed operations. Raised if the operation cannot be performed due to the shape of the
238
  tensors.
239
  """
240
  pass
 
 
241
  def verify_operation(function):
242
  """
243
  Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`.
 
268
  f"\n\nOperation: `{operation}`\nInput shapes:\n - {process_shape_str}"
269
  )
270
  return function(*args, **kwargs)
 
271
  return wrapper
 
 
272
  def chained_operation(function):
273
  """
274
  Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing
 
283
  raise DistributedOperationException(
284
  f"Error found while calling `{operation}`. Please see the earlier error for more details."
285
  ) from e
 
286
  return wrapper
 
 
287
  @verify_operation
288
  def gather(tensor):
289
  """
290
  Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices.
 
291
  Args:
292
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
293
  The data to gather.
 
294
  Returns:
295
  The same data structure as `tensor` with all tensors sent to the proper device.
296
  """
 
300
  return _gpu_gather(tensor)
301
  else:
302
  return tensor
 
 
303
  def _gpu_gather_object(object: Any):
304
  output_objects = [None for _ in range(PartialState().num_processes)]
305
  torch.distributed.all_gather_object(output_objects, object)
306
  # all_gather_object returns a list of lists, so we need to flatten it
307
  return [x for y in output_objects for x in y]
 
 
308
  def gather_object(object: Any):
309
  """
310
  Recursively gather object in a nested list/tuple/dictionary of objects from all devices.
 
311
  Args:
312
  object (nested list/tuple/dictionary of picklable object):
313
  The data to gather.
 
314
  Returns:
315
  The same data structure as `object` with all the objects sent to every device.
316
  """
 
320
  return _gpu_gather_object(object)
321
  else:
322
  return object
 
 
323
  def _gpu_broadcast(data, src=0):
324
  def _gpu_broadcast_one(tensor, src=0):
325
  torch.distributed.broadcast(tensor, src=src)
326
  return tensor
 
327
  return recursively_apply(_gpu_broadcast_one, data, error_on_other_type=True, src=src)
 
 
328
  def _tpu_broadcast(tensor, src=0, name="broadcast tensor"):
329
  if isinstance(tensor, (list, tuple)):
330
  return honor_type(tensor, (_tpu_broadcast(t, name=f"{name}_{i}") for i, t in enumerate(tensor)))
331
  elif isinstance(tensor, Mapping):
332
  return type(tensor)({k: _tpu_broadcast(v, name=f"{name}_{k}") for k, v in tensor.items()})
333
  return xm.mesh_reduce(name, tensor, lambda x: x[src])
 
 
334
  @verify_operation
335
  def broadcast(tensor, from_process: int = 0):
336
  """
337
  Recursively broadcast tensor in a nested list/tuple/dictionary of tensors to all devices.
 
338
  Args:
339
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
340
  The data to gather.
341
  from_process (`int`, *optional*, defaults to 0):
342
  The process from which to send the data
 
343
  Returns:
344
  The same data structure as `tensor` with all tensors broadcasted to the proper device.
345
  """
 
349
  return _gpu_broadcast(tensor, src=from_process)
350
  else:
351
  return tensor
 
 
352
  def broadcast_object_list(object_list, from_process: int = 0):
353
  """
354
  Broadcast a list of picklable objects form one process to the others.
 
355
  Args:
356
  object_list (list of picklable objects):
357
  The list of objects to broadcast. This list will be modified inplace.
358
  from_process (`int`, *optional*, defaults to 0):
359
  The process from which to send the data.
 
360
  Returns:
361
  The same list containing the objects from process 0.
362
  """
 
366
  elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES:
367
  torch.distributed.broadcast_object_list(object_list, src=from_process)
368
  return object_list
 
 
369
  def slice_tensors(data, tensor_slice, process_index=None, num_processes=None):
370
  """
371
  Recursively takes a slice in a nested list/tuple/dictionary of tensors.
 
372
  Args:
373
  data (nested list/tuple/dictionary of `torch.Tensor`):
374
  The data to slice.
375
  tensor_slice (`slice`):
376
  The slice to take.
 
377
  Returns:
378
  The same data structure as `data` with all the tensors slices.
379
  """
380
  def _slice_tensor(tensor, tensor_slice):
381
  return tensor[tensor_slice]
 
382
  return recursively_apply(_slice_tensor, data, tensor_slice)
 
 
383
  def concatenate(data, dim=0):
384
  """
385
  Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape.
 
386
  Args:
387
  data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`):
388
  The data to concatenate.
389
  dim (`int`, *optional*, defaults to 0):
390
  The dimension on which to concatenate.
 
391
  Returns:
392
  The same data structure as `data` with all the tensors concatenated.
393
  """
 
398
  elif not isinstance(data[0], torch.Tensor):
399
  raise TypeError(f"Can only concatenate tensors but got {type(data[0])}")
400
  return torch.cat(data, dim=dim)
 
 
401
  class CannotPadNestedTensorWarning(UserWarning):
402
  pass
 
 
403
  @chained_operation
404
  def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False):
405
  """
406
  Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they
407
  can safely be gathered.
 
408
  Args:
409
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
410
  The data to gather.
 
424
  return tensor
425
  if dim >= len(tensor.shape):
426
  return tensor
 
427
  # Gather all sizes
428
  size = torch.tensor(tensor.shape, device=tensor.device)[None]
429
  sizes = gather(size).cpu()
 
431
  max_size = max(s[dim] for s in sizes)
432
  if max_size == tensor.shape[dim]:
433
  return tensor
 
434
  old_size = tensor.shape
435
  new_size = list(old_size)
436
  new_size[dim] = max_size
 
443
  indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size)))
444
  new_tensor[indices] = tensor
445
  return new_tensor
 
446
  return recursively_apply(
447
  _pad_across_processes, tensor, error_on_other_type=True, dim=dim, pad_index=pad_index, pad_first=pad_first
448
  )
 
 
449
  @verify_operation
450
  def reduce(tensor, reduction="mean", scale=1.0):
451
  """
452
  Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the
453
  mean of a given operation.
 
454
  Args:
455
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
456
  The data to reduce.
 
458
  A reduction method. Can be of "mean", "sum", or "none"
459
  scale (`float`, *optional*):
460
  A default scaling value to be applied after the reduce, only valied on XLA.
 
461
  Returns:
462
  The same data structure as `data` with all the tensors reduced.
463
  """
 
473
  if reduction == "mean":
474
  cloned_tensor /= state.num_processes
475
  return cloned_tensor
 
476
  return recursively_apply(
477
  _reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction, scale=scale
478
  )
 
 
479
  def convert_to_fp32(tensor):
480
  """
481
  Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32.
 
482
  Args:
483
  tensor (nested list/tuple/dictionary of `torch.Tensor`):
484
  The data to convert from FP16/BF16 to FP32.
 
485
  Returns:
486
  The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.
487
  """
488
  def _convert_to_fp32(tensor):
489
  return tensor.float()
 
490
  def _is_fp16_bf16_tensor(tensor):
491
  return hasattr(tensor, "dtype") and tensor.dtype in (torch.float16, torch.bfloat16)
 
492
  return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)
 
 
493
  class ConvertOutputsToFp32:
494
  """
495
  Decorator to apply to a function outputing tensors (like a model forward pass) that ensures the outputs in FP16
496
  precision will be convert back to FP32.
 
497
  Args:
498
  model_forward (`Callable`):
499
  The function which outputs we want to treat.
 
500
  Returns:
501
  The same function as `model_forward` but with converted outputs.
502
  """
503
  def __init__(self, model_forward):
504
  self.model_forward = model_forward
505
  update_wrapper(self, model_forward)
 
506
  def __call__(self, *args, **kwargs):
507
  return convert_to_fp32(self.model_forward(*args, **kwargs))
 
508
  def __getstate__(self):
509
  raise pickle.PicklingError(
510
  "Cannot pickle a prepared model with automatic mixed precision, please unwrap the model with `Accelerator.unwrap_model(model)` before pickling it."
511
  )
 
 
512
  def convert_outputs_to_fp32(model_forward):
513
  model_forward = ConvertOutputsToFp32(model_forward)
 
514
  def forward(*args, **kwargs):
515
  return model_forward(*args, **kwargs)
 
516
  # To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
517
  forward.__wrapped__ = model_forward
 
518
  return forward
 
 
519
  def find_device(data):
520
  """
521
  Finds the device on which a nested dict/list/tuple of tensors lies (assuming they are all on the same device).
 
522
  Args:
523
  (nested list/tuple/dictionary of `torch.Tensor`): The data we want to know the device of.
524
  """
src/utils/other.py CHANGED
@@ -1,10 +1,6 @@
1
  logger = get_logger(__name__)
2
-
3
-
4
  if is_tpu_available(check_device=False):
5
  import torch_xla.core.xla_model as xm
6
-
7
-
8
  def is_compiled_module(module):
9
  """
10
  Check whether the module was compiled with torch.compile()
@@ -12,41 +8,30 @@ def is_compiled_module(module):
12
  if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"):
13
  return False
14
  return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)
15
-
16
-
17
  def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):
18
  """
19
  Extract a model from its distributed containers.
20
-
21
  Args:
22
  model (`torch.nn.Module`):
23
  The model to extract.
24
  keep_fp32_wrapper (`bool`, *optional*):
25
  Whether to remove mixed precision hooks from the model.
26
-
27
  Returns:
28
  `torch.nn.Module`: The extracted model.
29
  """
30
  options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
31
-
32
  is_compiled = is_compiled_module(model)
33
  if is_compiled:
34
  compiled_model = model
35
  model = model._orig_mod
36
-
37
  if is_deepspeed_available():
38
  from deepspeed import DeepSpeedEngine
39
-
40
  options += (DeepSpeedEngine,)
41
-
42
  if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available():
43
  from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
44
-
45
  options += (FSDP,)
46
-
47
  while isinstance(model, options):
48
  model = model.module
49
-
50
  if not keep_fp32_wrapper:
51
  forward = getattr(model, "forward")
52
  original_forward = model.__dict__.pop("_original_forward", None)
@@ -58,31 +43,21 @@ def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):
58
  model.forward = MethodType(forward, model)
59
  if getattr(model, "_converted_to_transformer_engine", False):
60
  convert_model(model, to_transformer_engine=False)
61
-
62
  if is_compiled:
63
  compiled_model._orig_mod = model
64
  model = compiled_model
65
-
66
  return model
67
-
68
-
69
  def wait_for_everyone():
70
  """
71
  Introduces a blocking point in the script, making sure all processes have reached this point before continuing.
72
-
73
  <Tip warning={true}>
74
-
75
  Make sure all processes will reach this instruction otherwise one of your processes will hang forever.
76
-
77
  </Tip>
78
  """
79
  PartialState().wait_for_everyone()
80
-
81
-
82
  def clean_state_dict_for_safetensors(state_dict: dict):
83
  """
84
  Cleans the state dictionary from a model and removes tensor aliasing if present.
85
-
86
  Args:
87
  state_dict (`dict`):
88
  The state dictionary from a model
@@ -92,7 +67,6 @@ def clean_state_dict_for_safetensors(state_dict: dict):
92
  for name, tensor in state_dict.items():
93
  if not isinstance(tensor, str):
94
  ptrs[id_tensor_storage(tensor)].append(name)
95
-
96
  # These are all pointers of tensors with shared memory
97
  shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
98
  warn_names = set()
@@ -112,12 +86,9 @@ def clean_state_dict_for_safetensors(state_dict: dict):
112
  )
113
  state_dict = {k: v.contiguous() if isinstance(v, torch.Tensor) else v for k, v in state_dict.items()}
114
  return state_dict
115
-
116
-
117
  def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = False):
118
  """
119
  Save the data to disk. Use in place of `torch.save()`.
120
-
121
  Args:
122
  obj:
123
  The data to save
@@ -135,28 +106,21 @@ def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = Fal
135
  obj = clean_state_dict_for_safetensors(obj)
136
  else:
137
  save_func = torch.save
138
-
139
  if PartialState().distributed_type == DistributedType.TPU:
140
  xm.save(obj, f)
141
  elif PartialState().is_main_process and not save_on_each_node:
142
  save_func(obj, f)
143
  elif PartialState().is_local_main_process and save_on_each_node:
144
  save_func(obj, f)
145
-
146
-
147
  @contextmanager
148
  def clear_environment():
149
  """
150
  A context manager that will cache origin `os.environ` and replace it with a empty dictionary in this context.
151
-
152
  When this context exits, the cached `os.environ` will be back.
153
-
154
  Example:
155
-
156
  ```python
157
  >>> import os
158
  >>> from accelerate.utils import clear_environment
159
-
160
  >>> os.environ["FOO"] = "bar"
161
  >>> with clear_environment():
162
  ... print(os.environ)
@@ -164,32 +128,23 @@ def clear_environment():
164
  ... print(os.environ["FOO"])
165
  {}
166
  new_bar
167
-
168
  >>> print(os.environ["FOO"])
169
  bar
170
  ```
171
  """
172
  _old_os_environ = os.environ
173
  os.environ = dict()
174
-
175
  yield
176
-
177
  os.environ = _old_os_environ
178
-
179
-
180
  @contextmanager
181
  def patch_environment(**kwargs):
182
  """
183
  A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.
184
-
185
  Will convert the values in `kwargs` to strings and upper-case all the keys.
186
-
187
  Example:
188
-
189
  ```python
190
  >>> import os
191
  >>> from accelerate.utils import patch_environment
192
-
193
  >>> with patch_environment(FOO="bar"):
194
  ... print(os.environ["FOO"]) # prints "bar"
195
  >>> print(os.environ["FOO"]) # raises KeyError
@@ -201,9 +156,7 @@ def patch_environment(**kwargs):
201
  if key in os.environ:
202
  existing_vars[key] = os.environ[key]
203
  os.environ[key] = str(value)
204
-
205
  yield
206
-
207
  for key in kwargs:
208
  key = key.upper()
209
  if key in existing_vars:
@@ -211,8 +164,6 @@ def patch_environment(**kwargs):
211
  os.environ[key] = existing_vars[key]
212
  else:
213
  os.environ.pop(key, None)
214
-
215
-
216
  def get_pretty_name(obj):
217
  """
218
  Gets a pretty name from `obj`.
@@ -224,12 +175,9 @@ def get_pretty_name(obj):
224
  if hasattr(obj, "__name__"):
225
  return obj.__name__
226
  return str(obj)
227
-
228
-
229
  def merge_dicts(source, destination):
230
  """
231
  Recursively merges two dictionaries.
232
-
233
  Args:
234
  source (`dict`): The dictionary to merge into `destination`.
235
  destination (`dict`): The dictionary to merge `source` into.
@@ -240,10 +188,7 @@ def merge_dicts(source, destination):
240
  merge_dicts(value, node)
241
  else:
242
  destination[key] = value
243
-
244
  return destination
245
-
246
-
247
  def is_port_in_use(port: int = None) -> bool:
248
  """
249
  Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been
@@ -253,18 +198,13 @@ def is_port_in_use(port: int = None) -> bool:
253
  port = 29500
254
  with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
255
  return s.connect_ex(("localhost", port)) == 0
256
-
257
-
258
  def convert_bytes(size):
259
  "Converts `size` from bytes to the largest possible unit"
260
  for x in ["bytes", "KB", "MB", "GB", "TB"]:
261
  if size < 1024.0:
262
  return f"{round(size, 2)} {x}"
263
  size /= 1024.0
264
-
265
  return f"{round(size, 2)} PB"
266
-
267
-
268
  def check_os_kernel():
269
  """Warns if the kernel version is below the recommended minimum on Linux."""
270
  # see issue #1929
@@ -272,7 +212,6 @@ def check_os_kernel():
272
  system = info.system
273
  if system != "Linux":
274
  return
275
-
276
  _, version, *_ = re.split(r"(\d+\.\d+\.\d+)", info.release)
277
  min_version = "5.5.0"
278
  if Version(version) < Version(min_version):
 
1
  logger = get_logger(__name__)
 
 
2
  if is_tpu_available(check_device=False):
3
  import torch_xla.core.xla_model as xm
 
 
4
  def is_compiled_module(module):
5
  """
6
  Check whether the module was compiled with torch.compile()
 
8
  if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"):
9
  return False
10
  return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)
 
 
11
  def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):
12
  """
13
  Extract a model from its distributed containers.
 
14
  Args:
15
  model (`torch.nn.Module`):
16
  The model to extract.
17
  keep_fp32_wrapper (`bool`, *optional*):
18
  Whether to remove mixed precision hooks from the model.
 
19
  Returns:
20
  `torch.nn.Module`: The extracted model.
21
  """
22
  options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
 
23
  is_compiled = is_compiled_module(model)
24
  if is_compiled:
25
  compiled_model = model
26
  model = model._orig_mod
 
27
  if is_deepspeed_available():
28
  from deepspeed import DeepSpeedEngine
 
29
  options += (DeepSpeedEngine,)
 
30
  if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available():
31
  from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
 
32
  options += (FSDP,)
 
33
  while isinstance(model, options):
34
  model = model.module
 
35
  if not keep_fp32_wrapper:
36
  forward = getattr(model, "forward")
37
  original_forward = model.__dict__.pop("_original_forward", None)
 
43
  model.forward = MethodType(forward, model)
44
  if getattr(model, "_converted_to_transformer_engine", False):
45
  convert_model(model, to_transformer_engine=False)
 
46
  if is_compiled:
47
  compiled_model._orig_mod = model
48
  model = compiled_model
 
49
  return model
 
 
50
  def wait_for_everyone():
51
  """
52
  Introduces a blocking point in the script, making sure all processes have reached this point before continuing.
 
53
  <Tip warning={true}>
 
54
  Make sure all processes will reach this instruction otherwise one of your processes will hang forever.
 
55
  </Tip>
56
  """
57
  PartialState().wait_for_everyone()
 
 
58
  def clean_state_dict_for_safetensors(state_dict: dict):
59
  """
60
  Cleans the state dictionary from a model and removes tensor aliasing if present.
 
61
  Args:
62
  state_dict (`dict`):
63
  The state dictionary from a model
 
67
  for name, tensor in state_dict.items():
68
  if not isinstance(tensor, str):
69
  ptrs[id_tensor_storage(tensor)].append(name)
 
70
  # These are all pointers of tensors with shared memory
71
  shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
72
  warn_names = set()
 
86
  )
87
  state_dict = {k: v.contiguous() if isinstance(v, torch.Tensor) else v for k, v in state_dict.items()}
88
  return state_dict
 
 
89
  def save(obj, f, save_on_each_node: bool = False, safe_serialization: bool = False):
90
  """
91
  Save the data to disk. Use in place of `torch.save()`.
 
92
  Args:
93
  obj:
94
  The data to save
 
106
  obj = clean_state_dict_for_safetensors(obj)
107
  else:
108
  save_func = torch.save
 
109
  if PartialState().distributed_type == DistributedType.TPU:
110
  xm.save(obj, f)
111
  elif PartialState().is_main_process and not save_on_each_node:
112
  save_func(obj, f)
113
  elif PartialState().is_local_main_process and save_on_each_node:
114
  save_func(obj, f)
 
 
115
  @contextmanager
116
  def clear_environment():
117
  """
118
  A context manager that will cache origin `os.environ` and replace it with a empty dictionary in this context.
 
119
  When this context exits, the cached `os.environ` will be back.
 
120
  Example:
 
121
  ```python
122
  >>> import os
123
  >>> from accelerate.utils import clear_environment
 
124
  >>> os.environ["FOO"] = "bar"
125
  >>> with clear_environment():
126
  ... print(os.environ)
 
128
  ... print(os.environ["FOO"])
129
  {}
130
  new_bar
 
131
  >>> print(os.environ["FOO"])
132
  bar
133
  ```
134
  """
135
  _old_os_environ = os.environ
136
  os.environ = dict()
 
137
  yield
 
138
  os.environ = _old_os_environ
 
 
139
  @contextmanager
140
  def patch_environment(**kwargs):
141
  """
142
  A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.
 
143
  Will convert the values in `kwargs` to strings and upper-case all the keys.
 
144
  Example:
 
145
  ```python
146
  >>> import os
147
  >>> from accelerate.utils import patch_environment
 
148
  >>> with patch_environment(FOO="bar"):
149
  ... print(os.environ["FOO"]) # prints "bar"
150
  >>> print(os.environ["FOO"]) # raises KeyError
 
156
  if key in os.environ:
157
  existing_vars[key] = os.environ[key]
158
  os.environ[key] = str(value)
 
159
  yield
 
160
  for key in kwargs:
161
  key = key.upper()
162
  if key in existing_vars:
 
164
  os.environ[key] = existing_vars[key]
165
  else:
166
  os.environ.pop(key, None)
 
 
167
  def get_pretty_name(obj):
168
  """
169
  Gets a pretty name from `obj`.
 
175
  if hasattr(obj, "__name__"):
176
  return obj.__name__
177
  return str(obj)
 
 
178
  def merge_dicts(source, destination):
179
  """
180
  Recursively merges two dictionaries.
 
181
  Args:
182
  source (`dict`): The dictionary to merge into `destination`.
183
  destination (`dict`): The dictionary to merge `source` into.
 
188
  merge_dicts(value, node)
189
  else:
190
  destination[key] = value
 
191
  return destination
 
 
192
  def is_port_in_use(port: int = None) -> bool:
193
  """
194
  Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been
 
198
  port = 29500
199
  with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
200
  return s.connect_ex(("localhost", port)) == 0
 
 
201
  def convert_bytes(size):
202
  "Converts `size` from bytes to the largest possible unit"
203
  for x in ["bytes", "KB", "MB", "GB", "TB"]:
204
  if size < 1024.0:
205
  return f"{round(size, 2)} {x}"
206
  size /= 1024.0
 
207
  return f"{round(size, 2)} PB"
 
 
208
  def check_os_kernel():
209
  """Warns if the kernel version is below the recommended minimum on Linux."""
210
  # see issue #1929
 
212
  system = info.system
213
  if system != "Linux":
214
  return
 
215
  _, version, *_ = re.split(r"(\d+\.\d+\.\d+)", info.release)
216
  min_version = "5.5.0"
217
  if Version(version) < Version(min_version):
src/utils/random.py CHANGED
@@ -1,7 +1,6 @@
1
  def set_seed(seed: int, device_specific: bool = False):
2
  """
3
  Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`.
4
-
5
  Args:
6
  seed (`int`):
7
  The seed to set.
@@ -22,8 +21,6 @@ def set_seed(seed: int, device_specific: bool = False):
22
  # ^^ safe to call this function even if cuda is not available
23
  if is_tpu_available():
24
  xm.set_rng_state(seed)
25
-
26
-
27
  def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):
28
  # Get the proper rng state
29
  if rng_type == RNGType.TORCH:
@@ -42,7 +39,6 @@ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optiona
42
  elif rng_type == RNGType.GENERATOR:
43
  assert generator is not None, "Need a generator to synchronize its seed."
44
  rng_state = generator.get_state()
45
-
46
  # Broadcast the rng state from device 0 to other devices
47
  state = AcceleratorState()
48
  if state.distributed_type == DistributedType.TPU:
@@ -60,7 +56,6 @@ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optiona
60
  rng_state = rng_state.cpu()
61
  elif state.distributed_type == DistributedType.MULTI_CPU:
62
  torch.distributed.broadcast(rng_state, 0)
63
-
64
  # Set the broadcast rng state
65
  if rng_type == RNGType.TORCH:
66
  torch.set_rng_state(rng_state)
@@ -74,8 +69,6 @@ def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optiona
74
  xm.set_rng_state(rng_state.item())
75
  elif rng_type == RNGType.GENERATOR:
76
  generator.set_state(rng_state)
77
-
78
-
79
  def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):
80
  for rng_type in rng_types:
81
  synchronize_rng_state(RNGType(rng_type), generator=generator)
 
1
  def set_seed(seed: int, device_specific: bool = False):
2
  """
3
  Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`.
 
4
  Args:
5
  seed (`int`):
6
  The seed to set.
 
21
  # ^^ safe to call this function even if cuda is not available
22
  if is_tpu_available():
23
  xm.set_rng_state(seed)
 
 
24
  def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):
25
  # Get the proper rng state
26
  if rng_type == RNGType.TORCH:
 
39
  elif rng_type == RNGType.GENERATOR:
40
  assert generator is not None, "Need a generator to synchronize its seed."
41
  rng_state = generator.get_state()
 
42
  # Broadcast the rng state from device 0 to other devices
43
  state = AcceleratorState()
44
  if state.distributed_type == DistributedType.TPU:
 
56
  rng_state = rng_state.cpu()
57
  elif state.distributed_type == DistributedType.MULTI_CPU:
58
  torch.distributed.broadcast(rng_state, 0)
 
59
  # Set the broadcast rng state
60
  if rng_type == RNGType.TORCH:
61
  torch.set_rng_state(rng_state)
 
69
  xm.set_rng_state(rng_state.item())
70
  elif rng_type == RNGType.GENERATOR:
71
  generator.set_state(rng_state)
 
 
72
  def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):
73
  for rng_type in rng_types:
74
  synchronize_rng_state(RNGType(rng_type), generator=generator)
src/utils/rich.py CHANGED
@@ -1,7 +1,5 @@
1
  if is_rich_available():
2
  from rich.traceback import install
3
-
4
  install(show_locals=False)
5
-
6
  else:
7
  raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
 
1
  if is_rich_available():
2
  from rich.traceback import install
 
3
  install(show_locals=False)
 
4
  else:
5
  raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
src/utils/torch_xla.py CHANGED
@@ -1,23 +1,18 @@
1
  def install_xla(upgrade: bool = False):
2
  """
3
  Helper function to install appropriate xla wheels based on the `torch` version in Google Colaboratory.
4
-
5
  Args:
6
  upgrade (`bool`, *optional*, defaults to `False`):
7
  Whether to upgrade `torch` and install the latest `torch_xla` wheels.
8
-
9
  Example:
10
-
11
  ```python
12
  >>> from accelerate.utils import install_xla
13
-
14
  >>> install_xla(upgrade=True)
15
  ```
16
  """
17
  in_colab = False
18
  if "IPython" in sys.modules:
19
  in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython())
20
-
21
  if in_colab:
22
  if upgrade:
23
  torch_install_cmd = ["pip", "install", "-U", "torch"]
 
1
  def install_xla(upgrade: bool = False):
2
  """
3
  Helper function to install appropriate xla wheels based on the `torch` version in Google Colaboratory.
 
4
  Args:
5
  upgrade (`bool`, *optional*, defaults to `False`):
6
  Whether to upgrade `torch` and install the latest `torch_xla` wheels.
 
7
  Example:
 
8
  ```python
9
  >>> from accelerate.utils import install_xla
 
10
  >>> install_xla(upgrade=True)
11
  ```
12
  """
13
  in_colab = False
14
  if "IPython" in sys.modules:
15
  in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython())
 
16
  if in_colab:
17
  if upgrade:
18
  torch_install_cmd = ["pip", "install", "-U", "torch"]
src/utils/tqdm.py CHANGED
@@ -1,7 +1,6 @@
1
  def tqdm(main_process_only: bool = True, *args, **kwargs):
2
  """
3
  Wrapper around `tqdm.tqdm` that optionally displays only on the main process.
4
-
5
  Args:
6
  main_process_only (`bool`, *optional*):
7
  Whether to display the progress bar only on the main process
 
1
  def tqdm(main_process_only: bool = True, *args, **kwargs):
2
  """
3
  Wrapper around `tqdm.tqdm` that optionally displays only on the main process.
 
4
  Args:
5
  main_process_only (`bool`, *optional*):
6
  Whether to display the progress bar only on the main process
src/utils/transformer_engine.py CHANGED
@@ -16,13 +16,11 @@ def convert_model(model, to_transformer_engine=True, _convert_linear=True, _conv
16
  module.weight.copy_(te_module.weight)
17
  if has_bias:
18
  module.bias.copy_(te_module.bias)
19
-
20
  setattr(model, name, te_module)
21
  elif isinstance(module, nn.LayerNorm) and to_transformer_engine and _convert_ln:
22
  te_module = te.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype)
23
  module.weight.copy_(te_module.weight)
24
  module.bias.copy_(te_module.bias)
25
-
26
  setattr(model, name, te_module)
27
  elif isinstance(module, te.Linear) and not to_transformer_engine and _convert_linear:
28
  has_bias = module.bias is not None
@@ -32,13 +30,11 @@ def convert_model(model, to_transformer_engine=True, _convert_linear=True, _conv
32
  module.weight.copy_(new_module.weight)
33
  if has_bias:
34
  module.bias.copy_(new_module.bias)
35
-
36
  setattr(model, name, new_module)
37
  elif isinstance(module, te.LayerNorm) and not to_transformer_engine and _convert_ln:
38
  new_module = nn.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype)
39
  module.weight.copy_(new_module.weight)
40
  module.bias.copy_(new_module.bias)
41
-
42
  setattr(model, name, new_module)
43
  else:
44
  convert_model(
@@ -47,8 +43,6 @@ def convert_model(model, to_transformer_engine=True, _convert_linear=True, _conv
47
  _convert_linear=_convert_linear,
48
  _convert_ln=_convert_ln,
49
  )
50
-
51
-
52
  def has_transformer_engine_layers(model):
53
  """
54
  Returns whether a given model has some `transformer_engine` layer or not.
 
16
  module.weight.copy_(te_module.weight)
17
  if has_bias:
18
  module.bias.copy_(te_module.bias)
 
19
  setattr(model, name, te_module)
20
  elif isinstance(module, nn.LayerNorm) and to_transformer_engine and _convert_ln:
21
  te_module = te.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype)
22
  module.weight.copy_(te_module.weight)
23
  module.bias.copy_(te_module.bias)
 
24
  setattr(model, name, te_module)
25
  elif isinstance(module, te.Linear) and not to_transformer_engine and _convert_linear:
26
  has_bias = module.bias is not None
 
30
  module.weight.copy_(new_module.weight)
31
  if has_bias:
32
  module.bias.copy_(new_module.bias)
 
33
  setattr(model, name, new_module)
34
  elif isinstance(module, te.LayerNorm) and not to_transformer_engine and _convert_ln:
35
  new_module = nn.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype)
36
  module.weight.copy_(new_module.weight)
37
  module.bias.copy_(new_module.bias)
 
38
  setattr(model, name, new_module)
39
  else:
40
  convert_model(
 
43
  _convert_linear=_convert_linear,
44
  _convert_ln=_convert_ln,
45
  )
 
 
46
  def has_transformer_engine_layers(model):
47
  """
48
  Returns whether a given model has some `transformer_engine` layer or not.
src/utils/versions.py CHANGED
@@ -1,10 +1,7 @@
1
  torch_version = parse(importlib.metadata.version("torch"))
2
-
3
-
4
  def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):
5
  """
6
  Compares a library version to some requirement using a given operation.
7
-
8
  Args:
9
  library_or_version (`str` or `packaging.version.Version`):
10
  A library name or a version to check.
@@ -19,12 +16,9 @@ def compare_versions(library_or_version: Union[str, Version], operation: str, re
19
  if isinstance(library_or_version, str):
20
  library_or_version = parse(importlib.metadata.version(library_or_version))
21
  return operation(library_or_version, parse(requirement_version))
22
-
23
-
24
  def is_torch_version(operation: str, version: str):
25
  """
26
  Compares the current PyTorch version to a given reference with an operation.
27
-
28
  Args:
29
  operation (`str`):
30
  A string representation of an operator, such as `">"` or `"<="`
 
1
  torch_version = parse(importlib.metadata.version("torch"))
 
 
2
  def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):
3
  """
4
  Compares a library version to some requirement using a given operation.
 
5
  Args:
6
  library_or_version (`str` or `packaging.version.Version`):
7
  A library name or a version to check.
 
16
  if isinstance(library_or_version, str):
17
  library_or_version = parse(importlib.metadata.version(library_or_version))
18
  return operation(library_or_version, parse(requirement_version))
 
 
19
  def is_torch_version(operation: str, version: str):
20
  """
21
  Compares the current PyTorch version to a given reference with an operation.
 
22
  Args:
23
  operation (`str`):
24
  A string representation of an operator, such as `">"` or `"<="`