response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Forward step for passed-in model. If first stage, input tensor is obtained from data_iterator, otherwise passed-in input_tensor is used. Returns output tensor.
def forward_step(forward_step_func, data_iterator, model, input_tensor, losses_reduced): """Forward step for passed-in model. If first stage, input tensor is obtained from data_iterator, otherwise passed-in input_tensor is used. Returns output tensor.""" timers = get_timers() args = get_args() timers("forward-compute").start() unwrapped_model = unwrap_model(model, (torchDDP, LocalDDP, Float16Module)) if not args.deepspeed: unwrapped_model.set_input_tensor(input_tensor) else: unwrapped_model.module.set_input_tensor(input_tensor) output_tensor, loss_func = forward_step_func(data_iterator, model) if mpu.is_pipeline_last_stage(): output_tensor = loss_func(output_tensor) loss, loss_reduced = output_tensor output_tensor = loss / get_num_microbatches() losses_reduced.append(loss_reduced) timers("forward-compute").stop() return output_tensor
Backward step through passed-in output tensor. If last stage, output_tensor_grad is None, otherwise gradient of loss with respect to stage's output tensor. Returns gradient of loss with respect to input tensor (None if first stage).
def backward_step( optimizer, input_tensor, output_tensor, output_tensor_grad, model=None ): """Backward step through passed-in output tensor. If last stage, output_tensor_grad is None, otherwise gradient of loss with respect to stage's output tensor. Returns gradient of loss with respect to input tensor (None if first stage).""" args = get_args() if args.deepspeed: assert model is not None timers = get_timers() timers("backward-compute").start() # Retain the grad on the input_tensor. if input_tensor is not None: input_tensor.retain_grad() if args.deepspeed: model.backward(output_tensor) else: # Backward pass. if output_tensor_grad is None: output_tensor = optimizer.scale_loss(output_tensor) torch.autograd.backward(output_tensor, grad_tensors=output_tensor_grad) # Collect the grad of the input_tensor. input_tensor_grad = None if input_tensor is not None: input_tensor_grad = input_tensor.grad timers("backward-compute").stop() return input_tensor_grad
Run forward and backward passes with no pipeline parallelism (no inter-stage communication). Returns dictionary with losses.
def forward_backward_no_pipelining( forward_step_func, data_iterator, model, optimizer, timers, forward_only ): """Run forward and backward passes with no pipeline parallelism (no inter-stage communication). Returns dictionary with losses.""" assert len(model) == 1 model = model[0] args = get_args() context_handler = dummy_handler if isinstance(model, torchDDP): context_handler = model.no_sync if args.deepspeed: model.set_gradient_accumulation_boundary(False) losses_reduced = [] input_tensor, output_tensor_grad = None, None with context_handler(): for i in range(get_num_microbatches() - 1): # print_rank_0("====> start of microstep {i}") # print_rank_0("====> forward") output_tensor = forward_step( forward_step_func, data_iterator, model, input_tensor, losses_reduced ) # print_rank_0("====> backward") if not forward_only: backward_step( optimizer, input_tensor, output_tensor, output_tensor_grad, model ) # print_rank_0("====> end of microstep {i}") if args.deepspeed: model.set_gradient_accumulation_boundary(True) # Run computation for last microbatch out of context handler (want to # synchronize gradients). # print_rank_0("====> start of the last microstep") # print_rank_0("====> forward") output_tensor = forward_step( forward_step_func, data_iterator, model, input_tensor, losses_reduced ) # print_rank_0("====> backward") if not forward_only: backward_step(optimizer, input_tensor, output_tensor, output_tensor_grad, model) # print_rank_0("====> end of the last microstep") return losses_reduced
Run interleaved 1F1B schedule (model split into model chunks), with communication between pipeline stages as needed. Returns dictionary with losses if the last stage, empty dict otherwise.
def forward_backward_pipelining_with_interleaving( forward_step_func, data_iterator, model, optimizer, timers, forward_only ): """Run interleaved 1F1B schedule (model split into model chunks), with communication between pipeline stages as needed. Returns dictionary with losses if the last stage, empty dict otherwise.""" input_tensors = [[] for _ in range(len(model))] output_tensors = [[] for _ in range(len(model))] losses_reduced = [] if not forward_only: output_tensor_grads = [[] for _ in range(len(model))] pipeline_parallel_size = mpu.get_pipeline_model_parallel_world_size() pipeline_parallel_rank = mpu.get_pipeline_model_parallel_rank() # Compute number of warmup and remaining microbatches. num_model_chunks = len(model) num_microbatches = get_num_microbatches() * num_model_chunks all_warmup_microbatches = False if forward_only: num_warmup_microbatches = num_microbatches else: # Run all forward passes and then all backward passes if number of # microbatches is just the number of pipeline stages. # Otherwise, perform (num_model_chunks-1)*pipeline_parallel_size on # all workers, followed by more microbatches after depending on # stage ID (more forward passes for earlier stages, later stages can # immediately start with 1F1B). if get_num_microbatches() == pipeline_parallel_size: num_warmup_microbatches = num_microbatches all_warmup_microbatches = True else: num_warmup_microbatches = ( pipeline_parallel_size - pipeline_parallel_rank - 1 ) * 2 num_warmup_microbatches += (num_model_chunks - 1) * pipeline_parallel_size num_warmup_microbatches = min(num_warmup_microbatches, num_microbatches) num_microbatches_remaining = num_microbatches - num_warmup_microbatches def get_model_chunk_id(microbatch_id, forward): """Helper method to get the model chunk ID given the iteration number.""" microbatch_id_in_group = microbatch_id % ( pipeline_parallel_size * num_model_chunks ) model_chunk_id = microbatch_id_in_group // pipeline_parallel_size if not forward: model_chunk_id = num_model_chunks - model_chunk_id - 1 return model_chunk_id def forward_step_helper(microbatch_id): """Helper method to run forward step with model split into chunks (run set_virtual_pipeline_model_parallel_rank() before calling forward_step()).""" model_chunk_id = get_model_chunk_id(microbatch_id, forward=True) mpu.set_virtual_pipeline_model_parallel_rank(model_chunk_id) if mpu.is_pipeline_first_stage(): if len(input_tensors[model_chunk_id]) == len( output_tensors[model_chunk_id] ): input_tensors[model_chunk_id].append(None) input_tensor = input_tensors[model_chunk_id][-1] output_tensor = forward_step( forward_step_func, data_iterator[model_chunk_id], model[model_chunk_id], input_tensor, losses_reduced, ) output_tensors[model_chunk_id].append(output_tensor) return output_tensor def backward_step_helper(microbatch_id): """Helper method to run backward step with model split into chunks (run set_virtual_pipeline_model_parallel_rank() before calling backward_step()).""" model_chunk_id = get_model_chunk_id(microbatch_id, forward=False) mpu.set_virtual_pipeline_model_parallel_rank(model_chunk_id) if mpu.is_pipeline_last_stage(): if len(output_tensor_grads[model_chunk_id]) == 0: output_tensor_grads[model_chunk_id].append(None) input_tensor = input_tensors[model_chunk_id].pop(0) output_tensor = output_tensors[model_chunk_id].pop(0) output_tensor_grad = output_tensor_grads[model_chunk_id].pop(0) input_tensor_grad = backward_step( optimizer, input_tensor, output_tensor, output_tensor_grad ) return input_tensor_grad # Run warmup forward passes. mpu.set_virtual_pipeline_model_parallel_rank(0) input_tensors[0].append(p2p_communication.recv_forward(timers)) for k in range(num_warmup_microbatches): output_tensor = forward_step_helper(k) # Determine if tensor should be received from previous stage. next_forward_model_chunk_id = get_model_chunk_id(k + 1, forward=True) recv_prev = True if mpu.is_pipeline_first_stage(ignore_virtual=True): if next_forward_model_chunk_id == 0: recv_prev = False if k == (num_microbatches - 1): recv_prev = False # Don't send tensor downstream if on last stage. if mpu.is_pipeline_last_stage(): output_tensor = None # Send and receive tensors as appropriate (send tensors computed # in this iteration; receive tensors for next iteration). if ( k == (num_warmup_microbatches - 1) and not forward_only and not all_warmup_microbatches ): input_tensor_grad = None recv_next = True if mpu.is_pipeline_last_stage(ignore_virtual=True): recv_next = False ( input_tensor, output_tensor_grad, ) = p2p_communication.send_forward_backward_recv_forward_backward( output_tensor, input_tensor_grad, recv_prev=recv_prev, recv_next=recv_next, timers=timers, ) output_tensor_grads[num_model_chunks - 1].append(output_tensor_grad) else: input_tensor = p2p_communication.send_forward_recv_forward( output_tensor, recv_prev, timers ) input_tensors[next_forward_model_chunk_id].append(input_tensor) # Run 1F1B in steady state. for k in range(num_microbatches_remaining): # Forward pass. forward_k = k + num_warmup_microbatches output_tensor = forward_step_helper(forward_k) # Backward pass. backward_k = k input_tensor_grad = backward_step_helper(backward_k) # Send output_tensor and input_tensor_grad, receive input_tensor # and output_tensor_grad. # Determine if current stage has anything to send in either direction, # otherwise set tensor to None. forward_model_chunk_id = get_model_chunk_id(forward_k, forward=True) mpu.set_virtual_pipeline_model_parallel_rank(forward_model_chunk_id) if mpu.is_pipeline_last_stage(): output_tensor = None backward_model_chunk_id = get_model_chunk_id(backward_k, forward=False) mpu.set_virtual_pipeline_model_parallel_rank(backward_model_chunk_id) if mpu.is_pipeline_first_stage(): input_tensor_grad = None # Determine if peers are sending, and where in data structure to put # received tensors. recv_prev = True if mpu.is_pipeline_first_stage(ignore_virtual=True): # First stage is ahead of last stage by (pipeline_parallel_size - 1). next_forward_model_chunk_id = get_model_chunk_id( forward_k - (pipeline_parallel_size - 1), forward=True ) if next_forward_model_chunk_id == (num_model_chunks - 1): recv_prev = False next_forward_model_chunk_id += 1 else: next_forward_model_chunk_id = get_model_chunk_id( forward_k + 1, forward=True ) recv_next = True if mpu.is_pipeline_last_stage(ignore_virtual=True): # Last stage is ahead of first stage by (pipeline_parallel_size - 1). next_backward_model_chunk_id = get_model_chunk_id( backward_k - (pipeline_parallel_size - 1), forward=False ) if next_backward_model_chunk_id == 0: recv_next = False next_backward_model_chunk_id -= 1 else: next_backward_model_chunk_id = get_model_chunk_id( backward_k + 1, forward=False ) # If last iteration, don't receive; we already received one extra # before the start of the for loop. if k == (num_microbatches_remaining - 1): recv_prev = False # Communicate tensors. ( input_tensor, output_tensor_grad, ) = p2p_communication.send_forward_backward_recv_forward_backward( output_tensor, input_tensor_grad, recv_prev=recv_prev, recv_next=recv_next, timers=timers, ) # Put input_tensor and output_tensor_grad in data structures in the # right location. if recv_prev: input_tensors[next_forward_model_chunk_id].append(input_tensor) if recv_next: output_tensor_grads[next_backward_model_chunk_id].append(output_tensor_grad) # Run cooldown backward passes (flush out pipeline). if not forward_only: if all_warmup_microbatches: output_tensor_grads[num_model_chunks - 1].append( p2p_communication.recv_backward(timers) ) for k in range(num_microbatches_remaining, num_microbatches): input_tensor_grad = backward_step_helper(k) next_backward_model_chunk_id = get_model_chunk_id(k + 1, forward=False) recv_next = True if mpu.is_pipeline_last_stage(ignore_virtual=True): if next_backward_model_chunk_id == (num_model_chunks - 1): recv_next = False if k == (num_microbatches - 1): recv_next = False output_tensor_grads[next_backward_model_chunk_id].append( p2p_communication.send_backward_recv_backward( input_tensor_grad, recv_next, timers ) ) return losses_reduced
Run non-interleaved 1F1B schedule, with communication between pipeline stages. Returns dictionary with losses if the last stage, empty dict otherwise.
def forward_backward_pipelining_without_interleaving( forward_step_func, data_iterator, model, optimizer, timers, forward_only ): """Run non-interleaved 1F1B schedule, with communication between pipeline stages. Returns dictionary with losses if the last stage, empty dict otherwise.""" timers = get_timers() assert len(model) == 1 model = model[0] # Compute number of warmup microbatches. num_microbatches = get_num_microbatches() num_warmup_microbatches = ( mpu.get_pipeline_model_parallel_world_size() - mpu.get_pipeline_model_parallel_rank() - 1 ) num_warmup_microbatches = min(num_warmup_microbatches, num_microbatches) num_microbatches_remaining = num_microbatches - num_warmup_microbatches input_tensors = [] output_tensors = [] losses_reduced = [] # Run warmup forward passes. for i in range(num_warmup_microbatches): input_tensor = p2p_communication.recv_forward(timers) output_tensor = forward_step( forward_step_func, data_iterator, model, input_tensor, losses_reduced ) p2p_communication.send_forward(output_tensor, timers) input_tensors.append(input_tensor) output_tensors.append(output_tensor) # Before running 1F1B, need to receive first forward tensor. # If all microbatches are run in warmup / cooldown phase, then no need to # receive this tensor here. if num_microbatches_remaining > 0: input_tensor = p2p_communication.recv_forward(timers) # Run 1F1B in steady state. for i in range(num_microbatches_remaining): last_iteration = i == (num_microbatches_remaining - 1) output_tensor = forward_step( forward_step_func, data_iterator, model, input_tensor, losses_reduced ) if forward_only: p2p_communication.send_forward(output_tensor, timers) else: output_tensor_grad = p2p_communication.send_forward_recv_backward( output_tensor, timers ) # Add input_tensor and output_tensor to end of list, then pop from the # start of the list for backward pass. input_tensors.append(input_tensor) output_tensors.append(output_tensor) if forward_only: if not last_iteration: input_tensor = p2p_communication.recv_forward(timers) else: input_tensor, output_tensor = input_tensors.pop(0), output_tensors.pop(0) input_tensor_grad = backward_step( optimizer, input_tensor, output_tensor, output_tensor_grad, model ) if last_iteration: input_tensor = None p2p_communication.send_backward(input_tensor_grad, timers) else: input_tensor = p2p_communication.send_backward_recv_forward( input_tensor_grad, timers ) # Run cooldown backward passes. if not forward_only: for i in range(num_warmup_microbatches): input_tensor = input_tensors.pop(0) output_tensor = output_tensors.pop(0) output_tensor_grad = p2p_communication.recv_backward(timers) input_tensor_grad = backward_step( optimizer, input_tensor, output_tensor, output_tensor_grad, model ) p2p_communication.send_backward(input_tensor_grad, timers) return losses_reduced
Note that this call will sync across all ranks.
def print_datetime(string): """Note that this call will sync across all ranks.""" torch.distributed.barrier() time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S") print_rank_0("[" + string + "] datetime: {} ".format(time_str))
Main training program. This function will run the followings in the order provided: 1) initialize Megatron. 2) setup model, optimizer and lr schedule using the model_provider. 3) call train_val_test_data_provider to get train/val/test datasets. 4) train the modle using the forward_step_func. Arguments: train_valid_test_dataset_provider: a function that takes the size of train/valid/test dataset and returns `train, valid, test` datasets. model_provider: a function that returns a vanilla version of the model. By vanilla we mean a simple model on cpu with no fp16 or ddp. forward_step_func: a function that takes a `data iterator` and `model`, and returns a `loss` scalar with a dictionary with key:values being the info we would like to monitor during training, for example `lm-loss: value`. We also require that this function add `batch generator` to the timers class. extra_args_provider: a function that takes a parser and adds arguments to it. It is used for programs to add their own arguments. args_defaults: a dictionary from argument-name to argument-value. It to set already parse arguments.
def pretrain( train_valid_test_dataset_provider, model_provider, forward_step_func, valid_forward_step_func=None, extra_args_provider=None, args_defaults={}, ): """Main training program. This function will run the followings in the order provided: 1) initialize Megatron. 2) setup model, optimizer and lr schedule using the model_provider. 3) call train_val_test_data_provider to get train/val/test datasets. 4) train the modle using the forward_step_func. Arguments: train_valid_test_dataset_provider: a function that takes the size of train/valid/test dataset and returns `train, valid, test` datasets. model_provider: a function that returns a vanilla version of the model. By vanilla we mean a simple model on cpu with no fp16 or ddp. forward_step_func: a function that takes a `data iterator` and `model`, and returns a `loss` scalar with a dictionary with key:values being the info we would like to monitor during training, for example `lm-loss: value`. We also require that this function add `batch generator` to the timers class. extra_args_provider: a function that takes a parser and adds arguments to it. It is used for programs to add their own arguments. args_defaults: a dictionary from argument-name to argument-value. It to set already parse arguments. """ # Initalize and get arguments, timers, and Tensorboard writer. initialize_megatron( extra_args_provider=extra_args_provider, args_defaults=args_defaults ) # Adjust the startup time so it reflects the largest value. # This will be closer to what scheduler will see (outside of # image ... launches. global _TRAIN_START_TIME start_time_tensor = torch.cuda.FloatTensor([_TRAIN_START_TIME]) torch.distributed.all_reduce(start_time_tensor, op=torch.distributed.ReduceOp.MIN) _TRAIN_START_TIME = start_time_tensor.item() print_rank_0( "time to initialize megatron (seconds): {:.3f}".format( time.time() - _TRAIN_START_TIME ) ) print_datetime("after megatron is initialized") args = get_args() timers = get_timers() if args.local_rank == 0 and args.save is not None: print(f"Creating output dir ...") os.makedirs(args.save, exist_ok=True) if args.deepspeed: args.deepspeed_configuration = json.load( open(args.deepspeed_config, "r", encoding="utf-8") ) # Model, optimizer, and learning rate. timers("model-and-optimizer-setup").start() model, optimizer, lr_scheduler = setup_model_and_optimizer(model_provider) timers("model-and-optimizer-setup").stop() print_datetime("after model, optimizer, and learning rate " "scheduler are built") # Data stuff. timers("train/valid/test-data-iterators-setup").start() if args.virtual_pipeline_model_parallel_size is not None: all_data_iterators = [ build_train_valid_test_data_iterators(train_valid_test_dataset_provider) for _ in range(len(model)) ] train_data_iterator = [ data_iterators[0] for data_iterators in all_data_iterators ] valid_data_iterator = [ data_iterators[1] for data_iterators in all_data_iterators ] test_data_iterator = [ data_iterators[2] for data_iterators in all_data_iterators ] else: ( train_data_iterator, valid_data_iterator, test_data_iterator, ) = build_train_valid_test_data_iterators(train_valid_test_dataset_provider) timers("train/valid/test-data-iterators-setup").stop() print_datetime("after dataloaders are built") # Print setup timing. print_rank_0("done with setup ...") timers.log(["model-and-optimizer-setup", "train/valid/test-data-iterators-setup"]) print_rank_0("training ...") iteration = 0 if args.do_train and args.train_iters > 0: iteration = train( forward_step_func, valid_forward_step_func, model, optimizer, lr_scheduler, train_data_iterator, valid_data_iterator, ) print_datetime("after training is done") if args.do_valid: prefix = "the end of training for val data" if args.co_evaluation: for key, value in valid_data_iterator.items(): evaluate_and_print_results( prefix, valid_forward_step_func, value, model, iteration, False, tag=key ) else: evaluate_and_print_results( prefix, valid_forward_step_func, valid_data_iterator, model, iteration, False ) if args.save and iteration != 0: save_checkpoint(iteration, model, optimizer, lr_scheduler) if args.do_test: # Run on test data. prefix = "the end of training for test data" if args.co_evaluation: for key, value in test_data_iterator.items(): evaluate_and_print_results( prefix, forward_step_func, value, model, 0, True, tag=key ) else: evaluate_and_print_results( prefix, forward_step_func, test_data_iterator, model, 0, True ) if args.wandb_logging and is_last_rank(): wandb.finish()
Build the model.
def get_model(model_provider_func): """Build the model.""" args = get_args() # Build model. if ( mpu.get_pipeline_model_parallel_world_size() > 1 and args.virtual_pipeline_model_parallel_size is not None ): model = [] for i in range(args.virtual_pipeline_model_parallel_size): mpu.set_virtual_pipeline_model_parallel_rank(i) # Set pre_process and post_process only after virtual rank is set. pre_process = mpu.is_pipeline_first_stage() post_process = mpu.is_pipeline_last_stage() this_model = model_provider_func( pre_process=pre_process, post_process=post_process ) model.append(this_model) else: pre_process = mpu.is_pipeline_first_stage() post_process = mpu.is_pipeline_last_stage() model = model_provider_func(pre_process=pre_process, post_process=post_process) if not isinstance(model, list): model = [model] # Set tensor model parallel attributes if not set. # Only parameters that are already tensor model parallel have these # attributes set for them. We should make sure the default attributes # are set for all params so the optimizer can use them. for model_module in model: for param in model_module.parameters(): mpu.set_defaults_if_not_set_tensor_model_parallel_attributes(param) # Print number of parameters. if mpu.get_data_parallel_rank() == 0: print( " > number of parameters on (tensor, pipeline) " "model parallel rank ({}, {}): {}".format( mpu.get_tensor_model_parallel_rank(), mpu.get_pipeline_model_parallel_rank(), sum( [ sum( [ p.ds_numel if hasattr(p, "ds_id") else p.nelement() for p in model_module.parameters() ] ) for model_module in model ] ), ), flush=True, ) if args.deepspeed: return model # GPU allocation. print(f" > moving model to GPU ...", flush=True) for model_module in model: model_module.cuda(torch.cuda.current_device()) print(f" > moving to GPU done", flush=True) # Fp16 conversion. if args.fp16 or args.bf16: print(f" > converting model to fp16 ...", flush=True) model = [Float16Module(model_module, args) for model_module in model] print(f" > converting to fp16 done", flush=True) if args.DDP_impl == "torch": i = torch.cuda.current_device() model = [ torchDDP( model_module, device_ids=[i], output_device=i, process_group=mpu.get_data_parallel_group(), ) for model_module in model ] return model if args.DDP_impl == "local": print(f" > creating DDP model ...", flush=True) model = [ LocalDDP( model_module, args.accumulate_allreduce_grads_in_fp32, args.use_contiguous_buffers_in_ddp, ) for model_module in model ] print(f" > creating DDP model done", flush=True) return model raise NotImplementedError( "Unknown DDP implementation specified: {}. " "Exiting.".format(args.DDP_impl) )
Build the learning rate scheduler.
def get_learning_rate_scheduler(optimizer): """Build the learning rate scheduler.""" args = get_args() # Iteration-based training. if args.train_iters: if args.lr_decay_iters is None: args.lr_decay_iters = args.train_iters decay_steps = args.lr_decay_iters * args.global_batch_size if args.lr_warmup_fraction is not None: warmup_steps = args.lr_warmup_fraction * decay_steps else: warmup_steps = args.lr_warmup_iters * args.global_batch_size # Sample-based training. elif args.train_samples: # We need to set training iters for later use. Technically # we need to adjust the training samples too (due to last # batch being incomplete) but we leave it as is for now. update_train_iters(args) if args.lr_decay_samples is None: args.lr_decay_samples = args.train_samples decay_steps = args.lr_decay_samples if args.lr_warmup_fraction is not None: warmup_steps = args.lr_warmup_fraction * decay_steps else: warmup_steps = args.lr_warmup_samples else: raise Exception("either train-iters or train-samples should be provided.") lr_scheduler = AnnealingLR( optimizer, max_lr=args.lr, min_lr=args.min_lr, warmup_steps=warmup_steps, decay_steps=decay_steps, decay_style=args.lr_decay_style, use_checkpoint_lr_scheduler=args.use_checkpoint_lr_scheduler, override_lr_scheduler=args.override_lr_scheduler, ) return lr_scheduler
Setup model and optimizer.
def setup_model_and_optimizer(model_provider_func): """Setup model and optimizer.""" args = get_args() model = get_model(model_provider_func) unwrapped_model = unwrap_model(model, (torchDDP, LocalDDP, Float16Module)) optimizer = get_megatron_optimizer(unwrapped_model) lr_scheduler = get_learning_rate_scheduler(optimizer) if args.deepspeed: print_rank_0("DeepSpeed is enabled.") pp = mpu.get_pipeline_model_parallel_world_size() print_rank_0(pp) model, optimizer, _, lr_scheduler = deepspeed.initialize( model=model[0], optimizer=optimizer, args=args, lr_scheduler=lr_scheduler, mpu=mpu if args.no_pipeline_parallel else None, ) print_rank_0("FinishInitialization.") if isinstance(model, deepspeed.PipelineEngine): # hack to get batch_fn from pretrain_gpt.py print_rank_0("InstancePipelineEngine.") model.set_batch_fn(model.module._megatron_batch_fn) assert ( model.grid.get_pipe_parallel_rank() == mpu.get_pipeline_model_parallel_rank() ) assert ( model.grid.get_slice_parallel_rank() == mpu.get_tensor_model_parallel_rank() ) assert model.grid.get_data_parallel_rank() == mpu.get_data_parallel_rank() model = [model] print_rank_0("Finishparallel.") if args.load is not None: timers = get_timers() # Extra barrier is added to make sure all ranks report the # max time. torch.distributed.barrier() timers("load-checkpoint").start() if args.low_memory_load: load_start = time.perf_counter() with FileLock(os.path.join(pathlib.Path.home(), "checkpoint_lock"), timeout=-1): this_rank_load_start = time.perf_counter() print(f"Rank {args.rank} is loading checkpoint ...") args.iteration = load_checkpoint(model, optimizer, lr_scheduler) this_rank_load_time = time.perf_counter() - this_rank_load_start load_time = time.perf_counter() - load_start print(f"Rank {args.rank} loaded checkpoint, this rank time: {this_rank_load_time}, total time: {load_time}") else: args.iteration = load_checkpoint(model, optimizer, lr_scheduler) print(f"Rank {args.rank} loaded checkpoint and waiting for other ranks") torch.distributed.barrier() timers("load-checkpoint").stop() timers.log(["load-checkpoint"]) else: args.iteration = 0 # We only support local DDP with multiple micro-batches. if len(model) > 1 or mpu.get_pipeline_model_parallel_world_size() > 1: assert args.DDP_impl == "local" # get model without FP16 and/or TorchDDP wrappers if ( args.iteration == 0 and len(unwrapped_model) == 1 and hasattr(unwrapped_model[0], "init_state_dict_from_bert") ): print_rank_0("Initializing ICT from pretrained BERT model") unwrapped_model[0].init_state_dict_from_bert() if args.fp16: optimizer.reload_model_params() return model, optimizer, lr_scheduler
Single training step.
def train_step(forward_step_func, data_iterator, model, optimizer, lr_scheduler): """Single training step.""" args = get_args() timers = get_timers() if args.deepspeed and args.ds_pipeline_enabled: skipped_iter = 0 num_zeros_in_grad = 0 assert isinstance(model[0], deepspeed.PipelineEngine) loss = model[0].train_batch(data_iter=data_iterator) grad_norm = model[0].get_global_grad_norm() return {"lm loss": loss}, skipped_iter, grad_norm, num_zeros_in_grad # Set grad to zero. if not args.deepspeed: if args.DDP_impl == "local" and args.use_contiguous_buffers_in_ddp: for partition in model: partition.zero_grad_buffer() else: optimizer.zero_grad() if mpu.get_pipeline_model_parallel_world_size() > 1: if args.virtual_pipeline_model_parallel_size is not None: # print_rank_0("===> fb_func = w/ interleaving") forward_backward_func = forward_backward_pipelining_with_interleaving assert get_num_microbatches() % args.pipeline_model_parallel_size == 0, ( "number of microbatches is not divisible by pipeline-parallel " "size when using interleaved schedule" ) else: # print_rank_0("===> fb_func = w/o interleaving") forward_backward_func = forward_backward_pipelining_without_interleaving else: # print_rank_0("===> fb_func = no_pp") forward_backward_func = forward_backward_no_pipelining # print_rank_0("===> running fb_func") losses_reduced = forward_backward_func( forward_step_func, data_iterator, model, optimizer, timers, forward_only=False ) # All-reduce if needed. if not args.deepspeed and args.DDP_impl == "local": timers("backward-params-all-reduce").start() for model_module in model: model_module.allreduce_gradients() timers("backward-params-all-reduce").stop() # All-reduce word_embeddings' grad across first and last stages to ensure # that word_embeddings parameters stay in sync. # This should only run for models that support pipelined model parallelism # (BERT and GPT-2). if not args.deepspeed: timers("backward-embedding-all-reduce").start() if ( mpu.is_pipeline_first_stage(ignore_virtual=True) or mpu.is_pipeline_last_stage(ignore_virtual=True) ) and mpu.get_pipeline_model_parallel_world_size() > 1: if mpu.is_pipeline_first_stage(ignore_virtual=True): unwrapped_model = model[0] elif mpu.is_pipeline_last_stage(ignore_virtual=True): unwrapped_model = model[-1] unwrapped_model = unwrap_model( unwrapped_model, (torchDDP, LocalDDP, Float16Module) ) if unwrapped_model.share_word_embeddings: word_embeddings_weight = unwrapped_model.word_embeddings_weight() if args.DDP_impl == "local": grad = word_embeddings_weight.main_grad else: grad = word_embeddings_weight.grad torch.distributed.all_reduce(grad, group=mpu.get_embedding_group()) timers("backward-embedding-all-reduce").stop() # Update parameters. timers("optimizer").start() # print_rank_0("===> start of update params") if args.deepspeed: increment = ( get_num_microbatches() * args.micro_batch_size * args.data_parallel_size ) model[0].step(lr_kwargs={"increment": increment}) update_successful = model[0].was_step_applied() else: update_successful, grad_norm, num_zeros_in_grad = optimizer.step() # print_rank_0("===> end of update params") timers("optimizer").stop() # Update learning rate. if args.deepspeed: skipped_iter = 0 grad_norm = None num_zeros_in_grad = None loss_reduced = {} for key in losses_reduced[0]: losses_reduced_for_key = [x[key] for x in losses_reduced] loss_reduced[key] = sum(losses_reduced_for_key) / len( losses_reduced_for_key ) return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad else: if update_successful: increment = ( get_num_microbatches() * args.micro_batch_size * args.data_parallel_size ) lr_scheduler.step(increment=increment) skipped_iter = 0 else: skipped_iter = 1 if mpu.is_pipeline_last_stage(ignore_virtual=True): # Average loss across microbatches. loss_reduced = {} for key in losses_reduced[0]: losses_reduced_for_key = [x[key] for x in losses_reduced] loss_reduced[key] = sum(losses_reduced_for_key) / len( losses_reduced_for_key ) return loss_reduced, skipped_iter, grad_norm, num_zeros_in_grad return {}, skipped_iter, grad_norm, num_zeros_in_grad
Log training information such as losses, timing, ....
def training_log( loss_dict, total_loss_dict, learning_rate, iteration, loss_scale, report_memory_flag, skipped_iter, grad_norm, params_norm, num_zeros_in_grad, model=None, ): """Log training information such as losses, timing, ....""" args = get_args() timers = get_timers() writer = get_tensorboard_writer() # Advanced, skipped, and Nan iterations. advanced_iters_key = "advanced iterations" skipped_iters_key = "skipped iterations" nan_iters_key = "nan iterations" # Advanced iterations. if not skipped_iter: total_loss_dict[advanced_iters_key] = ( total_loss_dict.get(advanced_iters_key, 0) + 1 ) else: if advanced_iters_key not in total_loss_dict: total_loss_dict[advanced_iters_key] = 0 # Skipped iterations. total_loss_dict[skipped_iters_key] = ( total_loss_dict.get(skipped_iters_key, 0) + skipped_iter ) # Update losses and set nan iterations got_nan = False for key in loss_dict: if not skipped_iter: total_loss_dict[key] = ( total_loss_dict.get(key, torch.cuda.FloatTensor([0.0])) + loss_dict[key] ) else: value = loss_dict[key].float().sum().item() is_nan = value == float("inf") or value == -float("inf") or value != value got_nan = got_nan or is_nan total_loss_dict[nan_iters_key] = total_loss_dict.get(nan_iters_key, 0) + int( got_nan ) # Logging. timers_to_log = [] def add_to_logging(name): if name in timers.timers: timers_to_log.append(name) add_to_logging("forward-compute") add_to_logging("forward-recv") add_to_logging("forward-send") add_to_logging("forward-backward-send-forward-backward-recv") add_to_logging("backward-compute") add_to_logging("backward-recv") add_to_logging("backward-send") add_to_logging("backward-send-forward-recv") add_to_logging("backward-send-backward-recv") add_to_logging("backward-params-all-reduce") add_to_logging("backward-embedding-all-reduce") add_to_logging("optimizer-copy-to-main-grad") add_to_logging("optimizer-unscale-and-check-inf") add_to_logging("optimizer-clip-main-grad") add_to_logging("optimizer-copy-main-to-model-params") add_to_logging("optimizer") add_to_logging("batch-generator") # Calculate batch size. batch_size = ( args.micro_batch_size * args.data_parallel_size * get_num_microbatches() ) total_iterations = ( total_loss_dict[advanced_iters_key] + total_loss_dict[skipped_iters_key] ) # wandb logging. if ( args.wandb_logging and (iteration % args.wandb_log_interval == 0) and is_last_rank() ): wandb.log( { "train/tokens": args.consumed_train_tokens, "train/lr": learning_rate, }, step=iteration, ) for k, v in loss_dict.items(): wandb.log({f"train/{k}": v}, step=iteration) for k in timers_to_log: value = timers.timers[k].elapsed(reset=False) wandb.log({f"timer/{k}": value}, step=iteration) # Tensorboard values. if writer and (iteration % args.tensorboard_log_interval == 0) and is_last_rank(): writer.add_scalar( "steps-vs-samples/y=steps,x=samples", iteration, args.consumed_train_samples ) writer.add_scalar( "steps-vs-samples/y=samples,x=steps", args.consumed_train_samples, iteration ) writer.add_scalar( "steps-vs-tokens/y=steps,x=tokens", iteration, args.consumed_train_tokens ) writer.add_scalar( "steps-vs-tokens/y=tokens,x=steps", args.consumed_train_tokens, iteration ) if args.log_learning_rate_to_tensorboard: writer.add_scalar("learning-rate/learning-rate", learning_rate, iteration) writer.add_scalar( "learning-rate/learning-rate vs samples", learning_rate, args.consumed_train_samples, ) writer.add_scalar( "learning-rate/learning-rate vs tokens", learning_rate, args.consumed_train_tokens, ) if args.log_batch_size_to_tensorboard: writer.add_scalar("batch-size/batch-size", batch_size, iteration) writer.add_scalar( "batch-size/batch-size vs samples", batch_size, args.consumed_train_samples, ) for key in loss_dict: writer.add_scalar(f"lm-loss-training/{key}", loss_dict[key], iteration) # writer.add_scalar( # f"lm-loss-training/{key}" + " vs samples", # loss_dict[key], # args.consumed_train_samples, # ) # writer.add_scalar( # f"lm-loss-training/{key}" + " vs tokens", # loss_dict[key], # args.consumed_train_tokens, # ) if args.log_loss_scale_to_tensorboard: writer.add_scalar("loss-scale/loss-scale", loss_scale, iteration) writer.add_scalar( "loss-scale/loss-scale vs samples", loss_scale, args.consumed_train_samples, ) writer.add_scalar( "loss-scale/loss-scale vs tokens", loss_scale, args.consumed_train_tokens, ) if grad_norm is not None: writer.add_scalar("grad-norm/grad-norm", grad_norm, iteration) writer.add_scalar( "grad-norm/grad-norm vs samples", grad_norm, args.consumed_train_samples ) writer.add_scalar( "grad-norm/grad-norm vs tokens", grad_norm, args.consumed_train_tokens ) if num_zeros_in_grad is not None: writer.add_scalar("num-zeros/num-zeros", num_zeros_in_grad, iteration) writer.add_scalar( "num-zeros/num-zeros vs samples", num_zeros_in_grad, args.consumed_train_samples, ) writer.add_scalar( "num-zeros/num-zeros vs tokens", num_zeros_in_grad, args.consumed_train_tokens, ) if params_norm is not None: writer.add_scalar("params-norm/params-norm", params_norm, iteration) writer.add_scalar( "params-norm/params-norm vs samples", params_norm, args.consumed_train_samples, ) writer.add_scalar( "params-norm/params-norm vs tokens", params_norm, args.consumed_train_tokens, ) if args.log_timers_to_tensorboard: timers.write(timers_to_log, writer, iteration, normalizer=total_iterations) if iteration % args.log_interval == 0: elapsed_time = timers("interval-time").elapsed() elapsed_time_per_iteration = elapsed_time / total_iterations # log iteration time to wandb if args.wandb_logging and is_last_rank(): wandb.log( { "train/iteration-time": elapsed_time_per_iteration, }, step=iteration, ) # only the last rank process has a non-None _GLOBAL_TENSORBOARD_WRITER if writer and is_last_rank(): if args.log_timers_to_tensorboard: writer.add_scalar( "iteration-time/iteration-time", elapsed_time_per_iteration, iteration, ) writer.add_scalar( "iteration-time/iteration-time vs samples", elapsed_time_per_iteration, args.consumed_train_samples, ) writer.add_scalar( "iteration-time/iteration-time vs tokens", elapsed_time_per_iteration, args.consumed_train_tokens, ) log_string = "==> iteration {:8d}/{:8d} |".format(iteration, args.train_iters) log_string += " consumed samples: {:12d} |".format(args.consumed_train_samples) log_string += " consumed tokens: {:12d} |".format(args.consumed_train_tokens) log_string += " elapsed time per iteration (ms): {:.1f} |".format( elapsed_time_per_iteration * 1000.0 ) log_string += " learning rate: {:.3E} |".format(learning_rate) log_string += " global batch size: {:5d} |".format(batch_size) for key in total_loss_dict: if key not in [advanced_iters_key, skipped_iters_key, nan_iters_key]: avg = total_loss_dict[key].item() / float( max(1, total_loss_dict[advanced_iters_key]) ) if avg > 0.0: log_string += " {}: {:.6E} |".format(key, avg) total_loss_dict[key] = torch.cuda.FloatTensor([0.0]) log_string += " loss scale: {:.1f} |".format(loss_scale) if grad_norm is not None: log_string += " grad norm: {:.3f} |".format(grad_norm) if num_zeros_in_grad is not None: log_string += " num zeros: {:.1f} |".format(num_zeros_in_grad) if params_norm is not None: log_string += " params norm: {:.3f} |".format(params_norm) log_string += " number of skipped iterations: {:3d} |".format( total_loss_dict[skipped_iters_key] ) log_string += " number of nan iterations: {:3d} |".format( total_loss_dict[nan_iters_key] ) total_loss_dict[advanced_iters_key] = 0 total_loss_dict[skipped_iters_key] = 0 total_loss_dict[nan_iters_key] = 0 print_rank_last(log_string) if report_memory_flag and learning_rate > 0.0: # Report memory after optimizer state has been initialized. report_memory("(after {} iterations)".format(iteration)) report_memory_flag = False timers.log(timers_to_log, normalizer=args.log_interval) flops_calculator(model, args, elapsed_time) return report_memory_flag
Train the model function.
def train( forward_step_func, valid_forward_step_func, model, optimizer, lr_scheduler, train_data_iterator, valid_data_iterator, ): """Train the model function.""" args = get_args() timers = get_timers() # Write args to tensorboard write_args_to_tensorboard() if args.wandb_logging: torch.distributed.barrier() print_datetime("before the initialization of wandb") timers("wandb-init").start() if is_last_rank(): initialize_wandb_experiment() torch.distributed.barrier() timers("wandb-init").stop() timers.log(["wandb-init"]) # Turn on training mode which enables dropout. for model_module in model: model_module.train() # Tracking loss. total_loss_dict = {} # Iterations. iteration = args.iteration timers("interval-time").start() print_datetime("before the start of training step") report_memory_flag = True while iteration < args.train_iters and ( args.train_tokens is None or args.consumed_train_tokens < args.train_tokens ): # print_rank_0(f'=> iteration {iteration}') update_num_microbatches(args.consumed_train_samples) if args.deepspeed: # inform deepspeed of any batch size changes global_batch_size = ( mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches() ) model[0].set_train_batch_size(global_batch_size) # print_rank_0(f"==> running train step for iteration {iteration}") loss_dict, skipped_iter, grad_norm, num_zeros_in_grad = train_step( forward_step_func, train_data_iterator, model, optimizer, lr_scheduler ) iteration += 1 args.iteration = iteration new_samples = ( mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches() ) args.consumed_train_samples += new_samples args.consumed_train_tokens += new_samples * args.seq_length # Logging. if args.deepspeed: loss_scale = model[0].optimizer.cur_scale else: loss_scale = optimizer.get_loss_scale().item() params_norm = None if args.log_params_norm: params_norm = calc_params_l2_norm(model) report_memory_flag = training_log( loss_dict, total_loss_dict, optimizer.param_groups[0]["lr"], iteration, loss_scale, report_memory_flag, skipped_iter, grad_norm, params_norm, num_zeros_in_grad, model, ) # Autoresume if args.adlr_autoresume and (iteration % args.adlr_autoresume_interval == 0): check_adlr_autoresume_termination(iteration, model, optimizer, lr_scheduler) # Evaluation if args.eval_interval and iteration % args.eval_interval == 0 and args.do_valid: prefix = "iteration {}".format(iteration) if args.co_evaluation: for key, value in valid_data_iterator.items(): evaluate_and_print_results( prefix, valid_forward_step_func, value, model, iteration, False, tag=key ) else: if args.gold: evaluate_and_print_results_gold( prefix, forward_step_func, valid_data_iterator, model, iteration, False ) evaluate_and_print_results( prefix, valid_forward_step_func, valid_data_iterator, model, iteration, False ) # Checkpointing saved_checkpoint = False if args.save and args.save_interval and (iteration % args.save_interval == 0): # debugging save_checkpoint_and_time(iteration, model, optimizer, lr_scheduler) saved_checkpoint = True # Exiting based on duration if args.exit_duration_in_mins: train_time = (time.time() - _TRAIN_START_TIME) / 60.0 done_cuda = torch.cuda.IntTensor([train_time > args.exit_duration_in_mins]) torch.distributed.all_reduce(done_cuda, op=torch.distributed.ReduceOp.MAX) done = done_cuda.item() if done: if not saved_checkpoint: save_checkpoint_and_time(iteration, model, optimizer, lr_scheduler) print_datetime("exiting program after {} minutes".format(train_time)) sys.exit() # Exiting based on iterations if args.exit_interval and iteration % args.exit_interval == 0: if not saved_checkpoint: save_checkpoint_and_time(iteration, model, optimizer, lr_scheduler) torch.distributed.barrier() print_datetime("exiting program at iteration {}".format(iteration)) sys.exit() return iteration
Evaluation.
def evaluate(forward_step_func, data_iterator, model, verbose=False): """Evaluation.""" args = get_args() # Turn on evaluation mode which disables dropout. for model_module in model: model_module.eval() total_loss_dict = {} with torch.no_grad(): iteration = 0 while iteration < args.eval_iters: iteration += 1 if verbose and iteration % args.log_interval == 0: print_rank_0("Evaluating iter {}/{}".format(iteration, args.eval_iters)) if mpu.get_pipeline_model_parallel_world_size() > 1: if args.virtual_pipeline_model_parallel_size is not None: forward_backward_func = ( forward_backward_pipelining_with_interleaving ) else: forward_backward_func = ( forward_backward_pipelining_without_interleaving ) else: forward_backward_func = forward_backward_no_pipelining if args.deepspeed and not args.no_pipeline_parallel: # DeepSpeed uses eval_batch() and already aggregates losses. assert isinstance(model, list) and len(model) == 1 loss = model[0].eval_batch(data_iterator) loss_dicts = [{"lm loss": loss}] * get_num_microbatches() else: loss_dicts = forward_backward_func( forward_step_func, data_iterator, model, optimizer=None, timers=None, forward_only=True, ) if mpu.is_pipeline_last_stage(ignore_virtual=True): # Reduce across processes. for loss_dict in loss_dicts: for key in loss_dict: total_loss_dict[key] = ( total_loss_dict.get(key, torch.cuda.FloatTensor([0.0])) + loss_dict[key] ) args.consumed_valid_samples += ( mpu.get_data_parallel_world_size() * args.micro_batch_size * get_num_microbatches() ) # Move model back to the train mode. for model_module in model: model_module.train() for key in total_loss_dict: total_loss_dict[key] /= args.eval_iters * get_num_microbatches() return total_loss_dict
Helper function to evaluate and dump results on screen.
def evaluate_and_print_results( prefix, forward_step_func, data_iterator, model, iteration, verbose=False, tag=None ): """Helper function to evaluate and dump results on screen.""" args = get_args() writer = get_tensorboard_writer() total_loss_dict = evaluate(forward_step_func, data_iterator, model, verbose) if tag is None: string = " validation loss at {} | ".format(prefix) else: string = " validation loss for {} at {} | ".format(tag, prefix) for key in total_loss_dict: string += "{} value: {:.6E} | ".format(key, total_loss_dict[key].item()) ppl = math.exp(min(20, total_loss_dict[key].item())) string += "{} PPL: {:.6E} | ".format(key, ppl) if tag is not None: display_key = tag + "-" + key else: display_key = key if args.wandb_logging and is_last_rank(): wandb.log( { f"eval/{display_key}": total_loss_dict[key].item(), }, step=iteration, ) if writer and is_last_rank(): writer.add_scalar( f"lm-loss-validation/{display_key} validation", total_loss_dict[key].item(), iteration, ) # writer.add_scalar( # f"lm-loss-validation/{display_key} validation vs samples", # total_loss_dict[key].item(), # args.consumed_train_samples, # ) # writer.add_scalar( # f"lm-loss-validation/{display_key} validation vs tokens", # total_loss_dict[key].item(), # args.consumed_train_tokens, # ) if args.log_validation_ppl_to_tensorboard: writer.add_scalar( f"lm-loss-validation/{display_key} validation ppl", ppl, iteration ) writer.add_scalar( f"lm-loss-validation/{display_key} validation ppl vs samples", ppl, args.consumed_train_samples, ) writer.add_scalar( f"lm-loss-validation/{display_key} validation ppl vs tokens", ppl, args.consumed_train_tokens, ) length = len(string) + 1 print_rank_last("-" * length) print_rank_last(string) print_rank_last("-" * length)
Helper function to evaluate and dump results on screen.
def evaluate_and_print_results_gold( prefix, forward_step_func, data_iterator, model, iteration, verbose=False, tag=None ): """Helper function to evaluate and dump results on screen.""" args = get_args() writer = get_tensorboard_writer() total_loss_dict = evaluate(forward_step_func, data_iterator, model, verbose) if tag is None: string = " validation loss (gold) at {} | ".format(prefix) else: string = " validation loss (gold) for {} at {} | ".format(tag, prefix) for key in total_loss_dict: string += "{} value: {:.6E} | ".format(key, total_loss_dict[key].item()) ppl = math.exp(min(20, total_loss_dict[key].item())) string += "{} PPL: {:.6E} | ".format(key, ppl) if tag is not None: display_key = tag + "-" + key else: display_key = key if args.wandb_logging and is_last_rank(): wandb.log( { f"eval/{display_key}": total_loss_dict[key].item(), }, step=iteration, ) if writer and is_last_rank(): writer.add_scalar( f"lm-loss-validation-gold/{display_key} validation", total_loss_dict[key].item(), iteration, ) if args.log_validation_ppl_to_tensorboard: writer.add_scalar( f"lm-loss-validation/{display_key} validation ppl", ppl, iteration ) writer.add_scalar( f"lm-loss-validation/{display_key} validation ppl vs samples", ppl, args.consumed_train_samples, ) writer.add_scalar( f"lm-loss-validation/{display_key} validation ppl vs tokens", ppl, args.consumed_train_tokens, ) length = len(string) + 1 print_rank_last("-" * length) print_rank_last(string) print_rank_last("-" * length)
Calculate l2 norm of parameters
def calc_params_l2_norm(model): """Calculate l2 norm of parameters""" args = get_args() if not isinstance(model, list): model = [model] # Remove duplicate params. params_data = [] for model_ in model: for param in model_.parameters(): is_not_shared = param_is_not_shared(param) is_not_tp_duplicate = param_is_not_tensor_parallel_duplicate(param) if is_not_shared and is_not_tp_duplicate: if args.bf16: params_data.append(param.data.float()) else: params_data.append(param.data) # Calculate norm dummy_overflow_buf = torch.cuda.IntTensor([0]) norm, _ = multi_tensor_applier( amp_C.multi_tensor_l2norm, dummy_overflow_buf, [params_data], False, # no per-parameter norm ) norm_2 = norm * norm # Sum across all model-parallel GPUs. torch.distributed.all_reduce( norm_2, op=torch.distributed.ReduceOp.SUM, group=mpu.get_model_parallel_group() ) return norm_2.item() ** 0.5
Reduce a tensor of losses across all GPUs.
def average_losses_across_data_parallel_group(losses): """Reduce a tensor of losses across all GPUs.""" averaged_losses = torch.cat([loss.clone().detach().view(1) for loss in losses]) torch.distributed.all_reduce(averaged_losses, group=mpu.get_data_parallel_group()) averaged_losses = averaged_losses / torch.distributed.get_world_size( group=mpu.get_data_parallel_group() ) return averaged_losses
Simple GPU memory report.
def report_memory(name): """Simple GPU memory report.""" mega_bytes = 1024.0 * 1024.0 string = name + " memory (MB)" string += " | allocated: {}".format(torch.cuda.memory_allocated() / mega_bytes) string += " | max allocated: {}".format( torch.cuda.max_memory_allocated() / mega_bytes ) string += " | reserved: {}".format(torch.cuda.memory_reserved() / mega_bytes) string += " | max reserved: {}".format( torch.cuda.max_memory_reserved() / mega_bytes ) if mpu.get_data_parallel_rank() == 0: print("[Rank {}] {}".format(torch.distributed.get_rank(), string), flush=True)
Print min, max, and norm of all parameters.
def print_params_min_max_norm(optimizer, iteration): """Print min, max, and norm of all parameters.""" index = 0 rank = torch.distributed.get_rank() string = "iteration, rank, index, tensor-model-parallel, min, max, norm\n" optimizer_ = optimizer.optimizer for param_group in optimizer_.param_groups: for param in param_group["params"]: index += 1 min_ = param.data.min() max_ = param.data.max() norm = torch.linalg.norm(param.data) string += "{:7d}, {:4d}, {:4d}, {:2d}, ".format( iteration, rank, index, int(param.tensor_model_parallel) ) string += "{:.6E}, {:.6E}, {:.6E}\n".format(min_, max_, norm) print(string, flush=True)
Check for autoresume signal and exit if it is received.
def check_adlr_autoresume_termination(iteration, model, optimizer, lr_scheduler): """Check for autoresume signal and exit if it is received.""" from codegeex.megatron.checkpointing import save_checkpoint args = get_args() autoresume = get_adlr_autoresume() # Add barrier to ensure consistnecy. torch.distributed.barrier() if autoresume.termination_requested(): if args.save: save_checkpoint(iteration, model, optimizer, lr_scheduler) print_rank_0(">>> autoresume termination request found!") if torch.distributed.get_rank() == 0: autoresume.request_resume() print_rank_0(">>> training terminated. Returning") sys.exit(0)
Build masks and position id for left to right model.
def get_ltor_masks_and_position_ids( data, eod_token, reset_position_ids, reset_attention_mask, eod_mask_loss ): """Build masks and position id for left to right model.""" # Extract batch size and sequence length. micro_batch_size, seq_length = data.size() # Attention mask (lower triangular). if reset_attention_mask: att_mask_batch = micro_batch_size else: att_mask_batch = 1 attention_mask = torch.tril( torch.ones((att_mask_batch, seq_length, seq_length), device=data.device) ).view(att_mask_batch, 1, seq_length, seq_length) # Loss mask. loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device) if eod_mask_loss: loss_mask[data == eod_token] = 0.0 # Position ids. position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device) position_ids = position_ids.unsqueeze(0).expand_as(data) # We need to clone as the ids will be modifed based on batch index. if reset_position_ids: position_ids = position_ids.clone() if reset_position_ids or reset_attention_mask: # Loop through the batches: for b in range(micro_batch_size): # Find indecies where EOD token is. eod_index = position_ids[b, data[b] == eod_token] # Detach indecies from positions if going to modify positions. if reset_position_ids: eod_index = eod_index.clone() # Loop through EOD indecies: prev_index = 0 for j in range(eod_index.size()[0]): i = eod_index[j] # Mask attention loss. if reset_attention_mask: attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0 # Reset positions. if reset_position_ids: position_ids[b, (i + 1) :] -= i + 1 - prev_index prev_index = i + 1 # Convert attention mask to binary: attention_mask = attention_mask < 0.5 return attention_mask, loss_mask, position_ids
If distributed is initialized, print only on rank 0.
def print_rank_0(message): """If distributed is initialized, print only on rank 0.""" if torch.distributed.is_initialized(): if torch.distributed.get_rank() == 0: print(message, flush=True) else: print(message, flush=True)
If distributed is initialized, print only on last rank.
def print_rank_last(message): """If distributed is initialized, print only on last rank.""" if torch.distributed.is_initialized(): if is_last_rank(): print(message, flush=True) else: print(message, flush=True)
Compile helper function ar runtime. Make sure this is invoked on a single process.
def compile_helper(): """Compile helper function ar runtime. Make sure this is invoked on a single process.""" import os import subprocess path = os.path.abspath(os.path.dirname(__file__)) ret = subprocess.run(["make", "-C", path]) if ret.returncode != 0: print("Making C++ dataset helpers module failed, exiting.") import sys sys.exit(1)
Divide sample into a and b segments.
def get_a_and_b_segments(sample, np_rng): """Divide sample into a and b segments.""" # Number of sentences in the sample. n_sentences = len(sample) # Make sure we always have two sentences. assert n_sentences > 1, "make sure each sample has at least two sentences." # First part: # `a_end` is how many sentences go into the `A`. a_end = 1 if n_sentences >= 3: # Note that randin in numpy is exclusive. a_end = np_rng.randint(1, n_sentences) tokens_a = [] for j in range(a_end): tokens_a.extend(sample[j]) # Second part: tokens_b = [] for j in range(a_end, n_sentences): tokens_b.extend(sample[j]) # Random next: is_next_random = False if np_rng.random() < 0.5: is_next_random = True tokens_a, tokens_b = tokens_b, tokens_a return tokens_a, tokens_b, is_next_random
Truncates a pair of sequences to a maximum sequence length.
def truncate_segments(tokens_a, tokens_b, len_a, len_b, max_num_tokens, np_rng): """Truncates a pair of sequences to a maximum sequence length.""" # print(len_a, len_b, max_num_tokens) assert len_a > 0 if len_a + len_b <= max_num_tokens: return False while len_a + len_b > max_num_tokens: if len_a > len_b: len_a -= 1 tokens = tokens_a else: len_b -= 1 tokens = tokens_b if np_rng.random() < 0.5: del tokens[0] else: tokens.pop() return True
Merge segments A and B, add [CLS] and [SEP] and build tokentypes.
def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id): """Merge segments A and B, add [CLS] and [SEP] and build tokentypes.""" tokens = [] tokentypes = [] # [CLS]. tokens.append(cls_id) tokentypes.append(0) # Segment A. for token in tokens_a: tokens.append(token) tokentypes.append(0) # [SEP]. tokens.append(sep_id) tokentypes.append(0) # Segment B. for token in tokens_b: tokens.append(token) tokentypes.append(1) if tokens_b: # [SEP]. tokens.append(sep_id) tokentypes.append(1) return tokens, tokentypes
Check if the current word piece is the starting piece (BERT).
def is_start_piece(piece): """Check if the current word piece is the starting piece (BERT).""" # When a word has been split into # WordPieces, the first token does not have any marker and any subsequence # tokens are prefixed with ##. So whenever we see the ## token, we # append it to the previous set of word indexes. return not piece.startswith("##")
Creates the predictions for the masked LM objective. Note: Tokens here are vocab ids and not text tokens.
def create_masked_lm_predictions( tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob, cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng, max_ngrams=3, do_whole_word_mask=True, favor_longer_ngram=False, do_permutation=False, geometric_dist=False, masking_style="bert", ): """Creates the predictions for the masked LM objective. Note: Tokens here are vocab ids and not text tokens.""" cand_indexes = [] # Note(mingdachen): We create a list for recording if the piece is # the starting piece of current token, where 1 means true, so that # on-the-fly whole word masking is possible. token_boundary = [0] * len(tokens) for (i, token) in enumerate(tokens): if token == cls_id or token == sep_id: token_boundary[i] = 1 continue # Whole Word Masking means that if we mask all of the wordpieces # corresponding to an original word. # # Note that Whole Word Masking does *not* change the training code # at all -- we still predict each WordPiece independently, softmaxed # over the entire vocabulary. if ( do_whole_word_mask and len(cand_indexes) >= 1 and not is_start_piece(vocab_id_to_token_dict[token]) ): cand_indexes[-1].append(i) else: cand_indexes.append([i]) if is_start_piece(vocab_id_to_token_dict[token]): token_boundary[i] = 1 output_tokens = list(tokens) masked_lm_positions = [] masked_lm_labels = [] if masked_lm_prob == 0: return (output_tokens, masked_lm_positions, masked_lm_labels, token_boundary) num_to_predict = min( max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob))) ) ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64) if not geometric_dist: # Note(mingdachen): # By default, we set the probilities to favor shorter ngram sequences. pvals = 1.0 / np.arange(1, max_ngrams + 1) pvals /= pvals.sum(keepdims=True) if favor_longer_ngram: pvals = pvals[::-1] ngram_indexes = [] for idx in range(len(cand_indexes)): ngram_index = [] for n in ngrams: ngram_index.append(cand_indexes[idx : idx + n]) ngram_indexes.append(ngram_index) np_rng.shuffle(ngram_indexes) (masked_lms, masked_spans) = ([], []) covered_indexes = set() for cand_index_set in ngram_indexes: if len(masked_lms) >= num_to_predict: break if not cand_index_set: continue # Note(mingdachen): # Skip current piece if they are covered in lm masking or previous ngrams. for index_set in cand_index_set[0]: for index in index_set: if index in covered_indexes: continue if not geometric_dist: n = np_rng.choice( ngrams[: len(cand_index_set)], p=pvals[: len(cand_index_set)] / pvals[: len(cand_index_set)].sum(keepdims=True), ) else: # Sampling "n" from the geometric distribution and clipping it to # the max_ngrams. Using p=0.2 default from the SpanBERT paper # https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1) n = min(np_rng.geometric(0.2), max_ngrams) index_set = sum(cand_index_set[n - 1], []) n -= 1 # Note(mingdachen): # Repeatedly looking for a candidate that does not exceed the # maximum number of predictions by trying shorter ngrams. while len(masked_lms) + len(index_set) > num_to_predict: if n == 0: break index_set = sum(cand_index_set[n - 1], []) n -= 1 # If adding a whole-word mask would exceed the maximum number of # predictions, then just skip this candidate. if len(masked_lms) + len(index_set) > num_to_predict: continue is_any_index_covered = False for index in index_set: if index in covered_indexes: is_any_index_covered = True break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_token = None if masking_style == "bert": # 80% of the time, replace with [MASK] if np_rng.random() < 0.8: masked_token = mask_id else: # 10% of the time, keep original if np_rng.random() < 0.5: masked_token = tokens[index] # 10% of the time, replace with random word else: masked_token = vocab_id_list[ np_rng.randint(0, len(vocab_id_list)) ] elif masking_style == "t5": masked_token = mask_id else: raise ValueError("invalid value of masking style") output_tokens[index] = masked_token masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) masked_spans.append( MaskedLmInstance( index=index_set, label=[tokens[index] for index in index_set] ) ) assert len(masked_lms) <= num_to_predict np_rng.shuffle(ngram_indexes) select_indexes = set() if do_permutation: for cand_index_set in ngram_indexes: if len(select_indexes) >= num_to_predict: break if not cand_index_set: continue # Note(mingdachen): # Skip current piece if they are covered in lm masking or previous ngrams. for index_set in cand_index_set[0]: for index in index_set: if index in covered_indexes or index in select_indexes: continue n = np.random.choice( ngrams[: len(cand_index_set)], p=pvals[: len(cand_index_set)] / pvals[: len(cand_index_set)].sum(keepdims=True), ) index_set = sum(cand_index_set[n - 1], []) n -= 1 while len(select_indexes) + len(index_set) > num_to_predict: if n == 0: break index_set = sum(cand_index_set[n - 1], []) n -= 1 # If adding a whole-word mask would exceed the maximum number of # predictions, then just skip this candidate. if len(select_indexes) + len(index_set) > num_to_predict: continue is_any_index_covered = False for index in index_set: if index in covered_indexes or index in select_indexes: is_any_index_covered = True break if is_any_index_covered: continue for index in index_set: select_indexes.add(index) assert len(select_indexes) <= num_to_predict select_indexes = sorted(select_indexes) permute_indexes = list(select_indexes) np_rng.shuffle(permute_indexes) orig_token = list(output_tokens) for src_i, tgt_i in zip(select_indexes, permute_indexes): output_tokens[src_i] = orig_token[tgt_i] masked_lms.append(MaskedLmInstance(index=src_i, label=orig_token[src_i])) masked_lms = sorted(masked_lms, key=lambda x: x.index) # Sort the spans by the index of the first span masked_spans = sorted(masked_spans, key=lambda x: x.index[0]) for p in masked_lms: masked_lm_positions.append(p.index) masked_lm_labels.append(p.label) return ( output_tokens, masked_lm_positions, masked_lm_labels, token_boundary, masked_spans, )
Pad sequences and convert them to numpy.
def pad_and_convert_to_numpy( tokens, tokentypes, masked_positions, masked_labels, pad_id, max_seq_length ): """Pad sequences and convert them to numpy.""" # Some checks. num_tokens = len(tokens) padding_length = max_seq_length - num_tokens assert padding_length >= 0 assert len(tokentypes) == num_tokens assert len(masked_positions) == len(masked_labels) # Tokens and token types. filler = [pad_id] * padding_length tokens_np = np.array(tokens + filler, dtype=np.int64) tokentypes_np = np.array(tokentypes + filler, dtype=np.int64) # Padding mask. padding_mask_np = np.array([1] * num_tokens + [0] * padding_length, dtype=np.int64) # Lables and loss mask. labels = [-1] * max_seq_length loss_mask = [0] * max_seq_length for i in range(len(masked_positions)): assert masked_positions[i] < num_tokens labels[masked_positions[i]] = masked_labels[i] loss_mask[masked_positions[i]] = 1 labels_np = np.array(labels, dtype=np.int64) loss_mask_np = np.array(loss_mask, dtype=np.int64) return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np
Get dataset splits from comma or '/' separated string list.
def get_train_valid_test_split_(splits_string, size): """Get dataset splits from comma or '/' separated string list.""" splits = [] if splits_string.find(",") != -1: splits = [float(s) for s in splits_string.split(",")] elif splits_string.find("/") != -1: splits = [float(s) for s in splits_string.split("/")] else: splits = [float(splits_string)] while len(splits) < 3: splits.append(0.0) splits = splits[:3] splits_sum = sum(splits) assert splits_sum > 0.0 splits = [split / splits_sum for split in splits] splits_index = [0] for index, split in enumerate(splits): splits_index.append(splits_index[index] + int(round(split * float(size)))) diff = splits_index[-1] - size for index in range(1, len(splits_index)): splits_index[index] -= diff assert len(splits_index) == 4 assert splits_index[-1] == size return splits_index
Get a list that maps a sample index to a starting sentence index, end sentence index, and length
def get_samples_mapping( indexed_dataset, data_prefix, num_epochs, max_num_samples, max_seq_length, short_seq_prob, seed, name, binary_head, ): """Get a list that maps a sample index to a starting sentence index, end sentence index, and length""" if not num_epochs: if not max_num_samples: raise ValueError("Need to specify either max_num_samples " "or num_epochs") num_epochs = np.iinfo(np.int32).max - 1 if not max_num_samples: max_num_samples = np.iinfo(np.int64).max - 1 # Filename of the index mapping indexmap_filename = data_prefix indexmap_filename += "_{}_indexmap".format(name) if num_epochs != (np.iinfo(np.int32).max - 1): indexmap_filename += "_{}ep".format(num_epochs) if max_num_samples != (np.iinfo(np.int64).max - 1): indexmap_filename += "_{}mns".format(max_num_samples) indexmap_filename += "_{}msl".format(max_seq_length) indexmap_filename += "_{:0.2f}ssp".format(short_seq_prob) indexmap_filename += "_{}s".format(seed) indexmap_filename += ".npy" # Build the indexed mapping if not exist. if torch.distributed.get_rank() == 0 and not os.path.isfile(indexmap_filename): print( " > WARNING: could not find index map file {}, building " "the indices on rank 0 ...".format(indexmap_filename) ) # Make sure the types match the helpers input types. assert indexed_dataset.doc_idx.dtype == np.int64 assert indexed_dataset.sizes.dtype == np.int32 # Build samples mapping verbose = torch.distributed.get_rank() == 0 start_time = time.time() print_rank_0(" > building sapmles index mapping for {} ...".format(name)) # First compile and then import. from megatron.data import helpers samples_mapping = helpers.build_mapping( indexed_dataset.doc_idx, indexed_dataset.sizes, num_epochs, max_num_samples, max_seq_length, short_seq_prob, seed, verbose, 2 if binary_head else 1, ) print_rank_0(" > done building sapmles index maping") np.save(indexmap_filename, samples_mapping, allow_pickle=True) print_rank_0(" > saved the index mapping in {}".format(indexmap_filename)) # Make sure all the ranks have built the mapping print_rank_0( " > elasped time to build and save samples mapping " "(seconds): {:4f}".format(time.time() - start_time) ) # This should be a barrier but nccl barrier assumes # device_index=rank which is not the case for model # parallel case counts = torch.cuda.LongTensor([1]) torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group()) torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group()) assert counts[0].item() == ( torch.distributed.get_world_size() // torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group()) ) # Load indexed dataset. print_rank_0(" > loading indexed mapping from {}".format(indexmap_filename)) start_time = time.time() samples_mapping = np.load(indexmap_filename, allow_pickle=True, mmap_mode="r") print_rank_0( " loaded indexed file in {:3.3f} seconds".format(time.time() - start_time) ) print_rank_0(" total number of samples: {}".format(samples_mapping.shape[0])) return samples_mapping
Buld dataloader given an input dataset.
def build_pretraining_data_loader(dataset, consumed_samples): """Buld dataloader given an input dataset.""" if dataset is None: return None args = get_args() # Megatron sampler if args.dataloader_type == "single": batch_sampler = MegatronPretrainingSampler( total_samples=len(dataset), consumed_samples=consumed_samples, micro_batch_size=args.micro_batch_size, data_parallel_rank=mpu.get_data_parallel_rank(), data_parallel_size=mpu.get_data_parallel_world_size(), ) elif args.dataloader_type == "cyclic": batch_sampler = MegatronPretrainingRandomSampler( total_samples=len(dataset), consumed_samples=consumed_samples, micro_batch_size=args.micro_batch_size, data_parallel_rank=mpu.get_data_parallel_rank(), data_parallel_size=mpu.get_data_parallel_world_size(), ) else: raise Exception( "{} dataloader type is not supported.".format(args.dataloader_type) ) # Torch dataloader. return torch.utils.data.DataLoader( dataset, batch_sampler=batch_sampler, num_workers=args.num_workers, pin_memory=True, )
Build train, valid, and test datasets.
def build_train_valid_test_datasets( data_prefix, data_impl, splits_string, train_valid_test_num_samples, seq_length, seed, skip_warmup, ): """Build train, valid, and test datasets.""" # Single dataset. if len(data_prefix) == 1: return _build_train_valid_test_datasets( data_prefix[0], data_impl, splits_string, train_valid_test_num_samples, seq_length, seed, skip_warmup, ) # Blending dataset. # Parse the values. output = get_datasets_weights_and_num_samples( data_prefix, train_valid_test_num_samples ) prefixes, weights, datasets_train_valid_test_num_samples = output # Build individual datasets. train_datasets = [] valid_datasets = [] test_datasets = [] for i in range(len(prefixes)): train_ds, valid_ds, test_ds = _build_train_valid_test_datasets( prefixes[i], data_impl, splits_string, datasets_train_valid_test_num_samples[i], seq_length, seed, skip_warmup, ) if train_ds: train_datasets.append(train_ds) if valid_ds: valid_datasets.append(valid_ds) if test_ds: test_datasets.append(test_ds) # Blend. blending_train_dataset = None if train_datasets: blending_train_dataset = BlendableDataset(train_datasets, weights) blending_valid_dataset = None if valid_datasets: blending_valid_dataset = BlendableDataset(valid_datasets, weights) blending_test_dataset = None if test_datasets: blending_test_dataset = BlendableDataset(test_datasets, weights) return (blending_train_dataset, blending_valid_dataset, blending_test_dataset)
Build train, valid, and test datasets.
def _build_train_valid_test_datasets( data_prefix, data_impl, splits_string, train_valid_test_num_samples, seq_length, seed, skip_warmup, ): """Build train, valid, and test datasets.""" # Indexed dataset. assert os.path.exists(data_prefix + "_input_ids.bin"), f"Input tokens datafile not found: {data_prefix}_input_ids.bin" assert os.path.exists(data_prefix + "_attention_mask.bin"), f"Attention mask datafile not found: {data_prefix}_attention_mask.bin" assert os.path.exists(data_prefix + "_labels.bin"), f"Labels datafile not found: {data_prefix}_labels.bin" input_ids_indexed_dataset = get_indexed_dataset_(data_prefix + "_input_ids", data_impl, skip_warmup) attention_mask_indexed_dataset = get_indexed_dataset_(data_prefix + "_attention_mask", data_impl, skip_warmup) labels_indexed_dataset = get_indexed_dataset_(data_prefix + "_labels", data_impl, skip_warmup) total_num_of_documents = input_ids_indexed_dataset.sizes.shape[0] splits = get_train_valid_test_split_(splits_string, total_num_of_documents) # Print stats about the splits. print_rank_0(" > dataset split:") def print_split_stats(name, index): print_rank_0(" {}:".format(name)) print_rank_0( " document indices in [{}, {}) total of {} " "documents".format( splits[index], splits[index + 1], splits[index + 1] - splits[index] ) ) print_split_stats("train", 0) print_split_stats("validation", 1) print_split_stats("test", 2) def build_dataset(index, name): dataset = None if splits[index + 1] > splits[index]: documents = np.arange( start=splits[index], stop=splits[index + 1], step=1, dtype=np.int32 ) dataset = PromptDataset( name, data_prefix, documents, input_ids_indexed_dataset, attention_mask_indexed_dataset, labels_indexed_dataset, train_valid_test_num_samples[index], seq_length, seed, ) return dataset train_dataset = build_dataset(0, "train") valid_dataset = build_dataset(1, "valid") test_dataset = build_dataset(2, "test") print_rank_0(f"train_dataset:{type(train_dataset)}") print_rank_0(f"valid_dataset:{type(valid_dataset)}") print_rank_0(f"test_dataset:{type(test_dataset)}") return (train_dataset, valid_dataset, test_dataset)
Build indexed dataset.
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup): """Build indexed dataset.""" print_rank_0(" > building dataset index ...") start_time = time.time() indexed_dataset = make_indexed_dataset(data_prefix, data_impl, skip_warmup) print_rank_0( " > finished creating indexed dataset in {:4f} " "seconds".format(time.time() - start_time) ) print_rank_0(" number of documents: {}".format(indexed_dataset.sizes.shape[0])) return indexed_dataset
Build index mappings. We only have to build doc-idx in prompt dataset. Args: name: name of the dataset. data_prefix: prefix of the data. documents: list of document indices. sizes: sizes of the indexed dataset. num_samples: number of samples to draw from the indexed dataset. seq_length: sequence length. seed: seed for random number generator.
def _build_index_mappings( name, data_prefix, documents, sizes, num_samples, seq_length, seed, ): """Build index mappings. We only have to build doc-idx in prompt dataset. Args: name: name of the dataset. data_prefix: prefix of the data. documents: list of document indices. sizes: sizes of the indexed dataset. num_samples: number of samples to draw from the indexed dataset. seq_length: sequence length. seed: seed for random number generator. """ num_epochs = _num_epochs(documents.shape[0], num_samples) np_rng = np.random.RandomState(seed=seed) _filename = data_prefix _filename += "_{}_indexmap".format(name) _filename += "_{}ns".format(num_samples) _filename += "_{}sl".format(seq_length) _filename += "_{}s".format(seed) doc_idx_filename = _filename + "_doc_idx.npy" if torch.distributed.get_rank() == 0: if not os.path.isfile(doc_idx_filename): print_rank_0( " > WARNING: could not find index map files, building " "the indices on rank 0 ..." ) start_time = time.time() doc_idx = _build_doc_idx(documents, num_epochs, np_rng, False)[:num_samples] np.save(doc_idx_filename, doc_idx, allow_pickle=True) print_rank_0( " > elasped time to build and save doc-idx mapping " "(seconds): {:4f}".format(time.time() - start_time) ) # This should be a barrier but nccl barrier assumes # device_index=rank which is not the case for model # parallel case counts = torch.cuda.LongTensor([1]) torch.distributed.all_reduce(counts, group=mpu.get_data_parallel_group()) torch.distributed.all_reduce(counts, group=mpu.get_pipeline_model_parallel_group()) assert counts[0].item() == ( torch.distributed.get_world_size() // torch.distributed.get_world_size(group=mpu.get_tensor_model_parallel_group()) ) # Load mappings. start_time = time.time() print_rank_0(" > loading doc-idx mapping from {}".format(doc_idx_filename)) doc_idx = np.load(doc_idx_filename, allow_pickle=True, mmap_mode="r") print_rank_0(" total number of samples: {}".format(doc_idx.shape[0])) print_rank_0(" total number of epochs: {}".format(num_epochs)) return doc_idx
Calculate the epoch needed for so many sample.
def _num_epochs(samples_per_epoch, num_samples): """Calculate the epoch needed for so many sample.""" return int(np.ceil(num_samples / samples_per_epoch))
Build an array with length = number-of-epochs * number-of-dcuments. Each index is mapped to a corresponding document.
def _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch): """Build an array with length = number-of-epochs * number-of-dcuments. Each index is mapped to a corresponding document.""" if not separate_last_epoch or num_epochs == 1: doc_idx = np.mgrid[0:num_epochs, 0 : len(documents)][1] doc_idx[:] = documents doc_idx = doc_idx.reshape(-1) doc_idx = doc_idx.astype(np.int32) np_rng.shuffle(doc_idx) return doc_idx doc_idx_first = _build_doc_idx(documents, num_epochs - 1, np_rng, False) doc_idx_last = _build_doc_idx(documents, 1, np_rng, False) return np.concatenate((doc_idx_first, doc_idx_last))
LM logits using word embedding weights.
def parallel_lm_logits(input_, word_embeddings_weight, parallel_output, bias=None): """LM logits using word embedding weights.""" # Parallel logits. input_parallel = mpu.copy_to_tensor_model_parallel_region(input_) # Matrix multiply. args = get_args() if args.shrink_logit_embedding_gradient: if hasattr(args, 'iteration'): alpha = get_shrink_embedding_gradient_alpha(args.iteration + 1) else: alpha = args.shrink_embedding_gradient_alpha word_embeddings_weight = word_embeddings_weight if alpha == 1.0 \ else ( word_embeddings_weight * alpha + word_embeddings_weight.detach() * (1 - alpha) ) if bias is None: logits_parallel = F.linear(input_parallel, word_embeddings_weight.half()) else: logits_parallel = F.linear(input_parallel, word_embeddings_weight.half(), bias) # Gather if needed. if parallel_output: return logits_parallel return mpu.gather_from_tensor_model_parallel_region(logits_parallel)
Build language model and return along with the key to save.
def get_language_model( num_tokentypes, add_pooler, init_method=None, scaled_init_method=None, ): """Build language model and return along with the key to save.""" args = get_args() if init_method is None: init_method = init_method_normal(args.init_method_std) if scaled_init_method is None: scaled_init_method = scaled_init_method_normal(args.init_method_std, args.num_layers) # Language model. language_model = TransformerLanguageModel( init_method=init_method, output_layer_init_method=scaled_init_method, num_tokentypes=num_tokentypes, add_pooler=add_pooler) # key used for checkpoints. language_model_key = 'language_model' return language_model, language_model_key
Apply conversion to val. Recursively apply conversion if `val` #is a nested tuple/list structure.
def conversion_helper(val, conversion): """Apply conversion to val. Recursively apply conversion if `val` #is a nested tuple/list structure.""" if not isinstance(val, (tuple, list)): return conversion(val) rtn = [conversion_helper(v, conversion) for v in val] if isinstance(val, tuple): rtn = tuple(rtn) return rtn
Convert fp32 `val` to fp16/bf16
def fp32_to_float16(val, float16_convertor): """Convert fp32 `val` to fp16/bf16""" def half_conversion(val): val_typecheck = val if isinstance(val_typecheck, (Parameter, Variable)): val_typecheck = val.data if isinstance(val_typecheck, _FLOAT_TYPES): val = float16_convertor(val) return val return conversion_helper(val, half_conversion)
Convert fp16/bf16 `val` to fp32
def float16_to_fp32(val): """Convert fp16/bf16 `val` to fp32""" def float_conversion(val): val_typecheck = val if isinstance(val_typecheck, (Parameter, Variable)): val_typecheck = val.data if isinstance(val_typecheck, (_BF16_TYPES, _HALF_TYPES)): val = val.float() return val return conversion_helper(val, float_conversion)
Init method based on N(0, sigma).
def init_method_normal(sigma): """Init method based on N(0, sigma).""" def init_(tensor): return torch.nn.init.normal_(tensor, mean=0.0, std=sigma) return init_
Init method based on N(0, sigma/sqrt(2*num_layers).
def scaled_init_method_normal(sigma, num_layers): """Init method based on N(0, sigma/sqrt(2*num_layers).""" std = sigma / math.sqrt(2.0 * num_layers) def init_(tensor): return torch.nn.init.normal_(tensor, mean=0.0, std=std) return init_
Simple linear layer with weight initialization.
def get_linear_layer(rows, columns, init_method): """Simple linear layer with weight initialization.""" layer = torch.nn.Linear(rows, columns) init_method(layer.weight) with torch.no_grad(): layer.bias.zero_() return layer
Mindspore's fast gelu implementation.
def fast_gelu(x): """Mindspore's fast gelu implementation.""" return x / (1 + torch.exp(-1.702 * torch.abs(x))) * torch.exp(0.851 * (x - torch.abs(x)))
OpenAI's gelu implementation.
def gelu_impl(x): """OpenAI's gelu implementation.""" return ( 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 + 0.044715 * x * x))) )
Helper function for the cross entropy.
def vocab_parallel_cross_entropy(vocab_parallel_logits, target): """Helper function for the cross entropy.""" return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target)
Check that all the keys have the same target data type.
def _check_data_types(keys, data, target_dtype): """Check that all the keys have the same target data type.""" for key in keys: assert ( data[key].dtype == target_dtype ), "{} has data type {} which " "is different than {}".format( key, data[key].dtype, target_dtype )
Build the size on rank 0 and broadcast.
def _build_key_size_numel_dictionaries(keys, data): """Build the size on rank 0 and broadcast.""" max_dim = _MAX_DATA_DIM sizes = [0 for _ in range(max_dim) for _ in keys] # Pack the sizes on rank zero. if get_tensor_model_parallel_rank() == 0: offset = 0 for key in keys: assert data[key].dim() < max_dim, "you should increase MAX_DATA_DIM" size = data[key].size() for i, s in enumerate(size): sizes[i + offset] = s offset += max_dim # Move to GPU and broadcast. sizes_cuda = torch.cuda.LongTensor(sizes) torch.distributed.broadcast( sizes_cuda, get_tensor_model_parallel_src_rank(), group=get_tensor_model_parallel_group(), ) # Move back to cpu and unpack. sizes_cpu = sizes_cuda.cpu() key_size = {} key_numel = {} total_numel = 0 offset = 0 for key in keys: i = 0 size = [] numel = 1 while sizes_cpu[offset + i] > 0: this_size = sizes_cpu[offset + i] size.append(this_size) numel *= this_size i += 1 key_size[key] = size key_numel[key] = numel total_numel += numel offset += max_dim return key_size, key_numel, total_numel
Broadcast data from rank zero of each model parallel group to the members of the same model parallel group. Arguments: keys: list of keys in the data disctionary to be broadcasted data: data dictionary of string keys and cpu tensor values. datatype: torch data type of all tensors in data associated with keys.
def broadcast_data(keys, data, datatype): """Broadcast data from rank zero of each model parallel group to the members of the same model parallel group. Arguments: keys: list of keys in the data disctionary to be broadcasted data: data dictionary of string keys and cpu tensor values. datatype: torch data type of all tensors in data associated with keys. """ # Build (key, size) and (key, number of elements) dictionaries along # with the total number of elements on all ranks. key_size, key_numel, total_numel = _build_key_size_numel_dictionaries(keys, data) # Pack on rank zero. if get_tensor_model_parallel_rank() == 0: # Check that all keys have the same data type. _check_data_types(keys, data, datatype) # Flatten the data associated with the keys flatten_data = torch.cat( [data[key].contiguous().view(-1) for key in keys], dim=0 ).cuda() else: flatten_data = torch.empty( total_numel, device=torch.cuda.current_device(), dtype=datatype ) # Broadcast torch.distributed.broadcast( flatten_data, get_tensor_model_parallel_src_rank(), group=get_tensor_model_parallel_group(), ) # Unpack output = {} offset = 0 for key in keys: size = key_size[key] numel = key_numel[key] output[key] = flatten_data.narrow(0, offset, numel).view(size) offset += numel return output
Useful for code segments that may be accessed with or without mpu initialization
def is_unitialized(): """Useful for code segments that may be accessed with or without mpu initialization""" return _DATA_PARALLEL_GROUP is None
Initialize model data parallel groups. Arguments: tensor_model_parallel_size: number of GPUs used to parallelize model tensor. pipeline_model_parallel_size: number of GPUs used to parallelize model pipeline. Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize the model pipeline. The present function will create 8 tensor model-parallel groups, 4 pipeline model-parallel groups and 8 data-parallel groups as: 8 data_parallel groups: [g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15] 8 tensor model-parallel groups: [g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15] 4 pipeline model-parallel groups: [g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15] Note that for efficiency, the caller should make sure adjacent ranks are on the same DGX box. For example if we are using 2 DGX-1 boxes with a total of 16 GPUs, rank 0 to 7 belong to the first box and ranks 8 to 15 belong to the second box.
def initialize_model_parallel( tensor_model_parallel_size_=1, pipeline_model_parallel_size_=1, virtual_pipeline_model_parallel_size_=None, ): """ Initialize model data parallel groups. Arguments: tensor_model_parallel_size: number of GPUs used to parallelize model tensor. pipeline_model_parallel_size: number of GPUs used to parallelize model pipeline. Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize the model pipeline. The present function will create 8 tensor model-parallel groups, 4 pipeline model-parallel groups and 8 data-parallel groups as: 8 data_parallel groups: [g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15] 8 tensor model-parallel groups: [g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15] 4 pipeline model-parallel groups: [g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15] Note that for efficiency, the caller should make sure adjacent ranks are on the same DGX box. For example if we are using 2 DGX-1 boxes with a total of 16 GPUs, rank 0 to 7 belong to the first box and ranks 8 to 15 belong to the second box. """ if torch.distributed.get_rank() == 0: print( "> initializing tensor model parallel with size {}".format( tensor_model_parallel_size_ ) ) print( "> initializing pipeline model parallel with size {}".format( pipeline_model_parallel_size_ ) ) # Get world size and rank. Ensure some consistencies. assert torch.distributed.is_initialized() world_size = torch.distributed.get_world_size() tensor_model_parallel_size = min(tensor_model_parallel_size_, world_size) pipeline_model_parallel_size = min(pipeline_model_parallel_size_, world_size) ensure_divisibility( world_size, tensor_model_parallel_size * pipeline_model_parallel_size ) data_parallel_size = world_size // ( tensor_model_parallel_size * pipeline_model_parallel_size ) num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size num_data_parallel_groups = world_size // data_parallel_size if virtual_pipeline_model_parallel_size_ is not None: global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = 0 _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = ( virtual_pipeline_model_parallel_size_ ) rank = torch.distributed.get_rank() # Build the data-parallel groups. global _DATA_PARALLEL_GROUP assert _DATA_PARALLEL_GROUP is None, "data parallel group is already initialized" all_data_parallel_group_ranks = [] for i in range(pipeline_model_parallel_size): start_rank = i * num_pipeline_model_parallel_groups end_rank = (i + 1) * num_pipeline_model_parallel_groups for j in range(tensor_model_parallel_size): ranks = range(start_rank + j, end_rank, tensor_model_parallel_size) all_data_parallel_group_ranks.append(list(ranks)) group = torch.distributed.new_group(ranks) if rank in ranks: _DATA_PARALLEL_GROUP = group # Build the model-parallel groups. global _MODEL_PARALLEL_GROUP assert _MODEL_PARALLEL_GROUP is None, "model parallel group is already initialized" for i in range(data_parallel_size): ranks = [ data_parallel_group_ranks[i] for data_parallel_group_ranks in all_data_parallel_group_ranks ] group = torch.distributed.new_group(ranks) if rank in ranks: _MODEL_PARALLEL_GROUP = group # Build the tensor model-parallel groups. global _TENSOR_MODEL_PARALLEL_GROUP assert ( _TENSOR_MODEL_PARALLEL_GROUP is None ), "tensor model parallel group is already initialized" for i in range(num_tensor_model_parallel_groups): ranks = range( i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size ) group = torch.distributed.new_group(ranks) if rank in ranks: _TENSOR_MODEL_PARALLEL_GROUP = group # Build the pipeline model-parallel groups and embedding groups # (first and last rank in each pipeline model-parallel group). global _PIPELINE_MODEL_PARALLEL_GROUP global _PIPELINE_GLOBAL_RANKS assert ( _PIPELINE_MODEL_PARALLEL_GROUP is None ), "pipeline model parallel group is already initialized" global _EMBEDDING_GROUP assert _EMBEDDING_GROUP is None, "embedding group is already initialized" for i in range(num_pipeline_model_parallel_groups): ranks = range(i, world_size, num_pipeline_model_parallel_groups) group = torch.distributed.new_group(ranks) if rank in ranks: _PIPELINE_MODEL_PARALLEL_GROUP = group _PIPELINE_GLOBAL_RANKS = ranks # Setup embedding group (to exchange gradients between # first and last stages). if len(ranks) > 1: embedding_ranks = [ranks[0], ranks[-1]] else: embedding_ranks = ranks group = torch.distributed.new_group(embedding_ranks) if rank in embedding_ranks: _EMBEDDING_GROUP = group
Check if model and data parallel groups are initialized.
def model_parallel_is_initialized(): """Check if model and data parallel groups are initialized.""" if ( _TENSOR_MODEL_PARALLEL_GROUP is None or _PIPELINE_MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None ): return False return True
Get the model parallel group the caller rank belongs to.
def get_model_parallel_group(): """Get the model parallel group the caller rank belongs to.""" assert _MODEL_PARALLEL_GROUP is not None, "model parallel group is not initialized" return _MODEL_PARALLEL_GROUP
Get the tensor model parallel group the caller rank belongs to.
def get_tensor_model_parallel_group(): """Get the tensor model parallel group the caller rank belongs to.""" assert ( _TENSOR_MODEL_PARALLEL_GROUP is not None ), "intra_layer_model parallel group is not initialized" return _TENSOR_MODEL_PARALLEL_GROUP
Get the pipeline model parallel group the caller rank belongs to.
def get_pipeline_model_parallel_group(): """Get the pipeline model parallel group the caller rank belongs to.""" assert ( _PIPELINE_MODEL_PARALLEL_GROUP is not None ), "pipeline_model parallel group is not initialized" return _PIPELINE_MODEL_PARALLEL_GROUP
Get the data parallel group the caller rank belongs to.
def get_data_parallel_group(): """Get the data parallel group the caller rank belongs to.""" assert _DATA_PARALLEL_GROUP is not None, "data parallel group is not initialized" return _DATA_PARALLEL_GROUP
Get the embedding group the caller rank belongs to.
def get_embedding_group(): """Get the embedding group the caller rank belongs to.""" assert _EMBEDDING_GROUP is not None, "embedding group is not initialized" return _EMBEDDING_GROUP
Set the tensor model parallel size
def set_tensor_model_parallel_world_size(world_size): """Set the tensor model parallel size""" global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE = world_size
Set the pipeline model parallel size
def set_pipeline_model_parallel_world_size(world_size): """Set the pipeline model parallel size""" global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE = world_size
Return world size for the tensor model parallel group.
def get_tensor_model_parallel_world_size(): """Return world size for the tensor model parallel group.""" global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE if _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE is not None: return _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE return torch.distributed.get_world_size(group=get_tensor_model_parallel_group())
Return world size for the pipeline model parallel group.
def get_pipeline_model_parallel_world_size(): """Return world size for the pipeline model parallel group.""" global _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE if _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE is not None: return _MPU_PIPELINE_MODEL_PARALLEL_WORLD_SIZE return torch.distributed.get_world_size(group=get_pipeline_model_parallel_group())
Set tensor model parallel rank.
def set_tensor_model_parallel_rank(rank): """Set tensor model parallel rank.""" global _MPU_TENSOR_MODEL_PARALLEL_RANK _MPU_TENSOR_MODEL_PARALLEL_RANK = rank
Set pipeline model parallel rank.
def set_pipeline_model_parallel_rank(rank): """Set pipeline model parallel rank.""" global _MPU_PIPELINE_MODEL_PARALLEL_RANK _MPU_PIPELINE_MODEL_PARALLEL_RANK = rank
Return my rank for the tensor model parallel group.
def get_tensor_model_parallel_rank(): """Return my rank for the tensor model parallel group.""" global _MPU_TENSOR_MODEL_PARALLEL_RANK if _MPU_TENSOR_MODEL_PARALLEL_RANK is not None: return _MPU_TENSOR_MODEL_PARALLEL_RANK return torch.distributed.get_rank(group=get_tensor_model_parallel_group())
Return my rank for the pipeline model parallel group.
def get_pipeline_model_parallel_rank(): """Return my rank for the pipeline model parallel group.""" global _MPU_PIPELINE_MODEL_PARALLEL_RANK if _MPU_PIPELINE_MODEL_PARALLEL_RANK is not None: return _MPU_PIPELINE_MODEL_PARALLEL_RANK return torch.distributed.get_rank(group=get_pipeline_model_parallel_group())
Return True if in the first pipeline model-parallel stage, False otherwise.
def is_pipeline_first_stage(ignore_virtual=False): """Return True if in the first pipeline model-parallel stage, False otherwise.""" if not ignore_virtual: if ( get_virtual_pipeline_model_parallel_world_size() is not None and get_virtual_pipeline_model_parallel_rank() != 0 ): return False return get_pipeline_model_parallel_rank() == 0
Return True if in the last pipeline model-parallel stage, False otherwise.
def is_pipeline_last_stage(ignore_virtual=False): """Return True if in the last pipeline model-parallel stage, False otherwise.""" if not ignore_virtual: virtual_pipeline_model_parallel_world_size = ( get_virtual_pipeline_model_parallel_world_size() ) if ( virtual_pipeline_model_parallel_world_size is not None and get_virtual_pipeline_model_parallel_rank() != (virtual_pipeline_model_parallel_world_size - 1) ): return False return get_pipeline_model_parallel_rank() == ( get_pipeline_model_parallel_world_size() - 1 )
Return the virtual pipeline-parallel rank.
def get_virtual_pipeline_model_parallel_rank(): """Return the virtual pipeline-parallel rank.""" global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK return _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK
Set the virtual pipeline-parallel rank.
def set_virtual_pipeline_model_parallel_rank(rank): """Set the virtual pipeline-parallel rank.""" global _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK _VIRTUAL_PIPELINE_MODEL_PARALLEL_RANK = rank
Return the virtual pipeline-parallel world size.
def get_virtual_pipeline_model_parallel_world_size(): """Return the virtual pipeline-parallel world size.""" global _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE return _VIRTUAL_PIPELINE_MODEL_PARALLEL_WORLD_SIZE
Calculate the global rank corresponding to the first local rank in the tensor model parallel group.
def get_tensor_model_parallel_src_rank(): """Calculate the global rank corresponding to the first local rank in the tensor model parallel group.""" global_rank = torch.distributed.get_rank() local_world_size = get_tensor_model_parallel_world_size() return (global_rank // local_world_size) * local_world_size
Return world size for the data parallel group.
def get_data_parallel_world_size(): """Return world size for the data parallel group.""" return torch.distributed.get_world_size(group=get_data_parallel_group())
Return my rank for the data parallel group.
def get_data_parallel_rank(): """Return my rank for the data parallel group.""" return torch.distributed.get_rank(group=get_data_parallel_group())
Set the groups to none.
def destroy_model_parallel(): """Set the groups to none.""" global _TENSOR_MODEL_PARALLEL_GROUP _TENSOR_MODEL_PARALLEL_GROUP = None global _PIPELINE_MODEL_PARALLEL_GROUP _PIPELINE_MODEL_PARALLEL_GROUP = None global _DATA_PARALLEL_GROUP _DATA_PARALLEL_GROUP = None
Initialize affine weight for model parallel on GPU.
def _initialize_affine_weight_gpu(weight, init_method, partition_dim, stride=1): """Initialize affine weight for model parallel on GPU.""" set_tensor_model_parallel_attributes( tensor=weight, is_parallel=True, dim=partition_dim, stride=stride ) if ds_checkpointing.is_configured(): global get_cuda_rng_tracker get_cuda_rng_tracker = ds_checkpointing.get_cuda_rng_tracker with get_cuda_rng_tracker().fork(): init_method(weight)
Initialize affine weight for model parallel. Build the master weight on all processes and scatter the relevant chunk.
def _initialize_affine_weight_cpu( weight, output_size, input_size, per_partition_size, partition_dim, init_method, stride=1, return_master_weight=False, ): """Initialize affine weight for model parallel. Build the master weight on all processes and scatter the relevant chunk.""" set_tensor_model_parallel_attributes( tensor=weight, is_parallel=True, dim=partition_dim, stride=stride ) # Initialize master weight master_weight = torch.empty( output_size, input_size, dtype=torch.float, requires_grad=False ) init_method(master_weight) args = get_args() master_weight = master_weight.to(dtype=args.params_dtype) # Split and copy per_partition_per_stride_size = divide(per_partition_size, stride) weight_list = torch.split( master_weight, per_partition_per_stride_size, dim=partition_dim ) rank = get_tensor_model_parallel_rank() world_size = get_tensor_model_parallel_world_size() my_weight_list = weight_list[rank::world_size] with torch.no_grad(): torch.cat(my_weight_list, dim=partition_dim, out=weight) if return_master_weight: return master_weight return None
All-reduce the the input tensor across model parallel group.
def _reduce(input_): """All-reduce the the input tensor across model parallel group.""" # Bypass the function if we are using only 1 GPU. if get_tensor_model_parallel_world_size() == 1: return input_ # All-reduce. torch.distributed.all_reduce(input_, group=get_tensor_model_parallel_group()) return input_
Split the tensor along its last dimension and keep the corresponding slice.
def _split(input_): """Split the tensor along its last dimension and keep the corresponding slice.""" world_size = get_tensor_model_parallel_world_size() # Bypass the function if we are using only 1 GPU. if world_size == 1: return input_ # Split along last dimension. input_list = split_tensor_along_last_dim(input_, world_size) # Note: torch.split does not create contiguous tensors by default. rank = get_tensor_model_parallel_rank() output = input_list[rank].contiguous() return output
Gather tensors and concatinate along the last dimension.
def _gather(input_): """Gather tensors and concatinate along the last dimension.""" world_size = get_tensor_model_parallel_world_size() # Bypass the function if we are using only 1 GPU. if world_size == 1: return input_ # Size and dimension. last_dim = input_.dim() - 1 rank = get_tensor_model_parallel_rank() tensor_list = [torch.empty_like(input_) for _ in range(world_size)] tensor_list[rank] = input_ torch.distributed.all_gather( tensor_list, input_, group=get_tensor_model_parallel_group() ) # Note: torch.cat already creates a contiguous tensor. output = torch.cat(tensor_list, dim=last_dim).contiguous() return output
Initializ the memory buffer for the checkpointed activations.
def init_checkpointed_activations_memory_buffer(): """Initializ the memory buffer for the checkpointed activations.""" args = get_args() per_layer = ( args.micro_batch_size * args.max_position_embeddings * args.hidden_size // args.tensor_model_parallel_size ) assert ( args.num_layers % args.checkpoint_num_layers == 0 ), "number of layers is not divisible by checkpoint-num-layers" num_checkpointer_layers = args.num_layers // args.checkpoint_num_layers numel = per_layer * num_checkpointer_layers dtype = torch.half if not args.fp16: dtype = torch.float global _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER assert ( _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is None ), "checkpointed activations memory buffer is already allocated." _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER = allocate_mem_buff( "checkpointed activations", numel, dtype, track_usage=False )
Reset the memory used for checkpointing.
def reset_checkpointed_activations_memory_buffer(): """Reset the memory used for checkpointing.""" if _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER is not None: _CHECKPOINTED_ACTIVATIONS_MEMORY_BUFFER.reset()
Sets the random number generator state of the current GPU. Argumentss: new_state (torch.ByteTensor): The desired state This function is adapted from PyTorch repo (torch.cuda.set_rng_state) with a single change: the input state is not cloned. Cloning caused major performance issues for +4 GPU cases.
def _set_cuda_rng_state(new_state, device=-1): """Sets the random number generator state of the current GPU. Argumentss: new_state (torch.ByteTensor): The desired state This function is adapted from PyTorch repo (torch.cuda.set_rng_state) with a single change: the input state is not cloned. Cloning caused major performance issues for +4 GPU cases. """ if hasattr(_C, "_cuda_setRNGState") and callable(_C._cuda_setRNGState): # older PyTorch def cb(): with device_ctx_manager(device): _C._cuda_setRNGState(new_state) else: # newer PyTorch if device == -1: device = torch.device("cuda") elif isinstance(device, str): device = torch.device(device) elif isinstance(device, int): device = torch.device("cuda", device) def cb(): idx = device.index if idx is None: idx = torch.cuda.current_device() default_generator = torch.cuda.default_generators[idx] default_generator.set_state(new_state) _lazy_call(cb)
Break a tensor into equal 1D chunks.
def split_tensor_into_1d_equal_chunks(tensor): """Break a tensor into equal 1D chunks.""" data = tensor.view(-1) partition_size = torch.numel(data) // get_tensor_model_parallel_world_size() start_index = partition_size * get_tensor_model_parallel_rank() end_index = start_index + partition_size return data[start_index:end_index]
Opposite of above function, gather values from model parallel ranks.
def gather_split_1d_tensor(tensor): """Opposite of above function, gather values from model parallel ranks.""" world_size = get_tensor_model_parallel_world_size() numel = torch.numel(tensor) numel_gathered = world_size * numel gathered = torch.empty( numel_gathered, dtype=tensor.dtype, device=torch.cuda.current_device(), requires_grad=False, ) chunks = [gathered[i * numel : (i + 1) * numel] for i in range(world_size)] torch.distributed.all_gather( chunks, tensor, group=get_tensor_model_parallel_group() ) return gathered
Get cuda rng tracker.
def get_cuda_rng_tracker(): """Get cuda rng tracker.""" return _CUDA_RNG_STATE_TRACKER
Initialize model parallel cuda seed. This function should be called after the model parallel is initialized. Also, no torch.cuda.manual_seed should be called after this function. Basically, this is replacement for that function. Two set of RNG states are tracked: default state: This is for data parallelism and is the same among a set of model parallel GPUs but different across different model paralle groups. This is used for example for dropout in the non-tensor-model-parallel regions. tensor-model-parallel state: This state is different among a set of model parallel GPUs, but the same across data parallel groups. This is used for example for dropout in model parallel regions.
def model_parallel_cuda_manual_seed(seed): """Initialize model parallel cuda seed. This function should be called after the model parallel is initialized. Also, no torch.cuda.manual_seed should be called after this function. Basically, this is replacement for that function. Two set of RNG states are tracked: default state: This is for data parallelism and is the same among a set of model parallel GPUs but different across different model paralle groups. This is used for example for dropout in the non-tensor-model-parallel regions. tensor-model-parallel state: This state is different among a set of model parallel GPUs, but the same across data parallel groups. This is used for example for dropout in model parallel regions. """ # 2718 is just for fun and any POSITIVE value will work. offset = seed + 2718 tensor_model_parallel_seed = offset + get_tensor_model_parallel_rank() # Data parallel gets the original seed. data_parallel_seed = seed if torch.distributed.get_rank() == 0: print( "> initializing model parallel cuda seeds on global rank {}, " "model parallel rank {}, and data parallel rank {} with " "model parallel seed: {} and data parallel seed: {}".format( torch.distributed.get_rank(), get_tensor_model_parallel_rank(), get_data_parallel_rank(), tensor_model_parallel_seed, data_parallel_seed, ), flush=True, ) _CUDA_RNG_STATE_TRACKER.reset() # Set the default state. torch.cuda.manual_seed(data_parallel_seed) # and model parallel state. _CUDA_RNG_STATE_TRACKER.add( _MODEL_PARALLEL_RNG_TRACKER_NAME, tensor_model_parallel_seed )
Checkpoint a model or part of the model. This has been directly copied from torch.utils.checkpoint.
def checkpoint(function, *args): """Checkpoint a model or part of the model. This has been directly copied from torch.utils.checkpoint.""" return CheckpointFunction.apply(function, *args)
Ensure that numerator is divisible by the denominator.
def ensure_divisibility(numerator, denominator): """Ensure that numerator is divisible by the denominator.""" assert numerator % denominator == 0, "{} is not divisible by {}".format( numerator, denominator )
Ensure that numerator is divisible by the denominator and return the division value.
def divide(numerator, denominator): """Ensure that numerator is divisible by the denominator and return the division value.""" ensure_divisibility(numerator, denominator) return numerator // denominator
Split a tensor along its last dimension. Arguments: tensor: input tensor. num_partitions: number of partitions to split the tensor contiguous_split_chunks: If True, make each chunk contiguous in memory.
def split_tensor_along_last_dim(tensor, num_partitions, contiguous_split_chunks=False): """Split a tensor along its last dimension. Arguments: tensor: input tensor. num_partitions: number of partitions to split the tensor contiguous_split_chunks: If True, make each chunk contiguous in memory. """ # Get the size and dimension. last_dim = tensor.dim() - 1 last_dim_size = divide(tensor.size()[last_dim], num_partitions) # Split. tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) # Note: torch.split does not create contiguous tensors by default. if contiguous_split_chunks: return tuple(chunk.contiguous() for chunk in tensor_list) return tensor_list
Clips gradient norm of an iterable of parameters whose gradients are in fp32. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Note that the gradients are modified in place. Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector).
def clip_grad_norm_fp32(parameters, max_norm, norm_type=2): """Clips gradient norm of an iterable of parameters whose gradients are in fp32. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Note that the gradients are modified in place. Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector). """ if isinstance(parameters, torch.Tensor): parameters = [parameters] # Filter parameters based on: # - grad should not be none # - parameter should not be shared # - should not be a replica due to tensor model parallelism grads = [] grads_for_norm = [] for param in parameters: grad_not_none = param.grad is not None is_not_shared = param_is_not_shared(param) is_not_tp_duplicate = param_is_not_tensor_parallel_duplicate(param) grad = param.grad.detach() if grad_not_none: # Make sure the grads are in fp32 assert param.grad.type() == "torch.cuda.FloatTensor" grads.append(grad) if grad_not_none and is_not_shared and is_not_tp_duplicate: grads_for_norm.append(grad) # Norm parameters. max_norm = float(max_norm) norm_type = float(norm_type) total_norm = 0.0 # Calculate norm. if norm_type == inf: total_norm = max(grad.abs().max() for grad in grads_for_norm) total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) # Take max across all model-parallel GPUs. torch.distributed.all_reduce( total_norm_cuda, op=torch.distributed.ReduceOp.MAX, group=mpu.get_model_parallel_group(), ) total_norm = total_norm_cuda[0].item() else: if norm_type == 2.0: dummy_overflow_buf = torch.cuda.IntTensor([0]) # Use apex's multi-tensor applier for efficiency reasons. # Multi-tensor applier takes a function and a list of list # and performs the operation on that list all in one kernel. grad_norm, _ = multi_tensor_applier( amp_C.multi_tensor_l2norm, dummy_overflow_buf, [grads_for_norm], False, # no per-parameter norm ) # Since we will be summing across data parallel groups, # we need the pow(norm-type). total_norm = grad_norm ** norm_type else: for grad in grads_for_norm: grad_norm = torch.norm(grad, norm_type) total_norm += grad_norm ** norm_type # Sum across all model-parallel GPUs. torch.distributed.all_reduce( total_norm, op=torch.distributed.ReduceOp.SUM, group=mpu.get_model_parallel_group(), ) total_norm = total_norm.item() ** (1.0 / norm_type) # Scale. clip_coeff = max_norm / (total_norm + 1.0e-6) if clip_coeff < 1.0: dummy_overflow_buf = torch.cuda.IntTensor([0]) multi_tensor_applier( amp_C.multi_tensor_scale, dummy_overflow_buf, [grads, grads], clip_coeff ) return total_norm
Zero out the gradient for a group of parameters. Note: copied from torch.optim.optimizer.
def _zero_grad_group_helper(group, set_to_none): """Zero out the gradient for a group of parameters. Note: copied from torch.optim.optimizer.""" for param in group: if param.grad is not None: if set_to_none: param.grad = None else: if param.grad.grad_fn is not None: param.grad.detach_() else: param.grad.requires_grad_(False) param.grad.zero_()
Use multi-tensor-applier to copy values from one list to another. We don't have a blfoat16 implementation so for now if the overflow_buf is not provided, we default back to simple loop copy to be compatible with bfloat16.
def _multi_tensor_copy_this_to_that(this, that, overflow_buf=None): """Use multi-tensor-applier to copy values from one list to another. We don't have a blfoat16 implementation so for now if the overflow_buf is not provided, we default back to simple loop copy to be compatible with bfloat16.""" if overflow_buf: overflow_buf.fill_(0) # Scaling with factor `1.0` is equivalent to copy. multi_tensor_applier(amp_C.multi_tensor_scale, overflow_buf, [this, that], 1.0) else: for this_, that_ in zip(this, that): that_.copy_(this_)
Divide params into with-weight-decay and without-weight-decay groups. Layernorms and baises will have no weight decay but the rest will.
def _get_params_for_weight_decay_optimization(modules): """Divide params into with-weight-decay and without-weight-decay groups. Layernorms and baises will have no weight decay but the rest will. """ weight_decay_params = {"params": []} no_weight_decay_params = {"params": [], "weight_decay": 0.0} for module in modules: for module_ in module.modules(): if isinstance(module_, LayerNorm): no_weight_decay_params["params"].extend( [p for p in list(module_._parameters.values()) if p is not None] ) else: weight_decay_params["params"].extend( [ p for n, p in list(module_._parameters.items()) if p is not None and n != "bias" ] ) no_weight_decay_params["params"].extend( [ p for n, p in list(module_._parameters.items()) if p is not None and n == "bias" ] ) return weight_decay_params, no_weight_decay_params
Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on.
def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ _chr = unichr if sys.version_info[0] == 2 else chr bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [_chr(n) for n in cs] return dict(zip(bs, cs))