code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# DeepSpeed Team
import os
from typing import Union
from enum import Enum
import torch
import json
import hjson
import copy
import base64
from .constants import *
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
MIN_LOSS_SCALE,
)
from .config_utils import (
get_scalar_param,
dict_raise_error_on_duplicate_keys,
ScientificNotationEncoder,
)
from .zero.config import get_zero_config, ZeroStageEnum
from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from ..comm.config import DeepSpeedCommsConfig
from ..monitor.config import get_monitor_config
from deepspeed import comm as dist
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
from ..git_version_info import version as __version__
from ..utils import logger
from ..elasticity import (
elasticity_enabled,
compute_elastic_config,
ensure_immutable_elastic_config,
)
from ..elasticity.config import ElasticityConfigError
from ..elasticity.constants import (
ELASTICITY,
IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT,
MODEL_PARLLEL_SIZE,
MODEL_PARLLEL_SIZE_DEFAULT,
NUM_GPUS_PER_NODE,
NUM_GPUS_PER_NODE_DEFAULT,
)
from ..profiling.config import DeepSpeedFlopsProfilerConfig
from ..autotuning.config import DeepSpeedAutotuningConfig
from ..nebula.config import DeepSpeedNebulaConfig
from ..compression.config import get_compression_config, get_quantize_enabled
from ..compression.constants import *
from .swap_tensor.aio_config import get_aio_config
from .data_pipeline.config import get_data_efficiency_enabled, get_data_efficiency_config, get_curriculum_enabled_legacy, get_curriculum_params_legacy
from .data_pipeline.constants import *
TENSOR_CORE_ALIGN_SIZE = 8
ADAGRAD_OPTIMIZER = 'adagrad'
ADAM_OPTIMIZER = 'adam'
ADAMW_OPTIMIZER = 'adamw'
LAMB_OPTIMIZER = 'lamb'
ONEBIT_ADAM_OPTIMIZER = 'onebitadam'
ZERO_ONE_ADAM_OPTIMIZER = 'zerooneadam'
ONEBIT_LAMB_OPTIMIZER = 'onebitlamb'
DEEPSPEED_OPTIMIZERS = [
ADAGRAD_OPTIMIZER, ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER,
ZERO_ONE_ADAM_OPTIMIZER
]
# extra optimizer parameters for adam/adamw
TORCH_ADAM_PARAM = "torch_adam"
# default to adamw logic for adam/adamw optimizers unless user explicitly opts out
ADAM_W_MODE = "adam_w_mode"
ADAM_W_MODE_DEFAULT = True
class DeepSpeedConfigError(Exception):
pass
class DtypeEnum(Enum):
# The torch dtype must always be the first value (so we return torch.dtype)
fp16 = torch.float16, "torch.float16", "fp16", "float16", "half"
fp32 = torch.float32, "torch.float32", "fp32", "float32", "float"
int8 = torch.int8, "torch.int8", "int8"
bf16 = torch.bfloat16, "torch.bfloat16", "bf16", "bfloat16"
# Copied from https://stackoverflow.com/a/43210118
# Allows us to use multiple values for each Enum index and returns first
# listed value when Enum is called
def __new__(cls, *values):
obj = object.__new__(cls)
# first value is canonical value
obj._value_ = values[0]
for other_value in values[1:]:
cls._value2member_map_[other_value] = obj
obj._all_values = values
return obj
def __repr__(self):
return "<%s.%s: %s>" % (
self.__class__.__name__,
self._name_,
", ".join([repr(v) for v in self._all_values]),
)
def get_pld_enabled(param_dict):
if PROGRESSIVE_LAYER_DROP in param_dict.keys():
return get_scalar_param(param_dict[PROGRESSIVE_LAYER_DROP], PLD_ENABLED, PLD_ENABLED_DEFAULT)
else:
return False
def get_pld_params(param_dict):
if PROGRESSIVE_LAYER_DROP in param_dict.keys():
pld_params = copy.copy(param_dict[PROGRESSIVE_LAYER_DROP])
pld_params.pop(PLD_ENABLED)
return pld_params
else:
return False
def get_amp_enabled(param_dict):
if AMP in param_dict.keys():
return get_scalar_param(param_dict[AMP], AMP_ENABLED, AMP_ENABLED_DEFAULT)
else:
return False
def get_amp_params(param_dict):
if AMP in param_dict.keys():
amp_params = copy.copy(param_dict[AMP])
amp_params.pop(AMP_ENABLED)
return amp_params
else:
return False
def get_fp16_enabled(param_dict):
if FP16 in param_dict.keys():
return get_scalar_param(param_dict[FP16], FP16_ENABLED, FP16_ENABLED_DEFAULT)
else:
return False
def get_bfloat16_enabled(param_dict):
for key in [BFLOAT16, BFLOAT16_OLD]:
if key in param_dict.keys():
return get_scalar_param(param_dict[key], BFLOAT16_ENABLED, BFLOAT16_ENABLED_DEFAULT)
return False
def get_fp16_master_weights_and_grads_enabled(param_dict):
if get_fp16_enabled(param_dict):
return get_scalar_param(param_dict[FP16], FP16_MASTER_WEIGHTS_AND_GRADS, FP16_MASTER_WEIGHTS_AND_GRADS_DEFAULT)
else:
return False
def get_fp16_auto_cast(param_dict):
if get_fp16_enabled(param_dict):
return get_scalar_param(param_dict[FP16], FP16_AUTO_CAST, FP16_AUTO_CAST_DEFAULT)
def get_loss_scale(param_dict):
if get_fp16_enabled(param_dict):
return get_scalar_param(param_dict[FP16], FP16_LOSS_SCALE, FP16_LOSS_SCALE_DEFAULT)
elif get_bfloat16_enabled(param_dict):
return 1.0
else:
return FP16_LOSS_SCALE_DEFAULT
def get_initial_dynamic_scale(param_dict):
if get_fp16_enabled(param_dict):
initial_scale_power = get_scalar_param(param_dict[FP16], FP16_INITIAL_SCALE_POWER,
FP16_INITIAL_SCALE_POWER_DEFAULT)
elif get_bfloat16_enabled(param_dict):
initial_scale_power = 0
else:
initial_scale_power = FP16_INITIAL_SCALE_POWER_DEFAULT
return 2**initial_scale_power
def get_dynamic_loss_scale_args(param_dict):
loss_scale_args = None
if get_fp16_enabled(param_dict):
fp16_dict = param_dict[FP16]
dynamic_loss_args = [
FP16_INITIAL_SCALE_POWER,
FP16_LOSS_SCALE_WINDOW,
FP16_MIN_LOSS_SCALE,
FP16_HYSTERESIS,
]
if any(arg in list(fp16_dict.keys()) for arg in dynamic_loss_args):
init_scale = get_scalar_param(fp16_dict, FP16_INITIAL_SCALE_POWER, FP16_INITIAL_SCALE_POWER_DEFAULT)
scale_window = get_scalar_param(fp16_dict, FP16_LOSS_SCALE_WINDOW, FP16_LOSS_SCALE_WINDOW_DEFAULT)
delayed_shift = get_scalar_param(fp16_dict, FP16_HYSTERESIS, FP16_HYSTERESIS_DEFAULT)
min_loss_scale = get_scalar_param(fp16_dict, FP16_MIN_LOSS_SCALE, FP16_MIN_LOSS_SCALE_DEFAULT)
loss_scale_args = {
INITIAL_LOSS_SCALE: 2**init_scale,
SCALE_WINDOW: scale_window,
DELAYED_SHIFT: delayed_shift,
MIN_LOSS_SCALE: min_loss_scale,
}
return loss_scale_args
def get_gradient_accumulation_steps(param_dict):
return get_scalar_param(param_dict, GRADIENT_ACCUMULATION_STEPS, GRADIENT_ACCUMULATION_STEPS_DEFAULT)
def get_sparse_gradients_enabled(param_dict):
return get_scalar_param(param_dict, SPARSE_GRADIENTS, SPARSE_GRADIENTS_DEFAULT)
def get_communication_data_type(param_dict):
val = get_scalar_param(param_dict, COMMUNICATION_DATA_TYPE, COMMUNICATION_DATA_TYPE_DEFAULT)
val = val.lower() if val is not None else val
if val is None:
return val # we must determine it by other parameters
elif val == "fp32":
return torch.float32
elif val == "fp16":
return torch.float16
elif val == "bfp16":
return torch.bfloat16
raise ValueError(f"Invalid communication_data_type. Supported data types: ['fp16', 'bfp16', 'fp32']. Got: {val}")
def get_prescale_gradients(param_dict):
return get_scalar_param(param_dict, PRESCALE_GRADIENTS, PRESCALE_GRADIENTS_DEFAULT)
def get_gradient_predivide_factor(param_dict):
return get_scalar_param(param_dict, GRADIENT_PREDIVIDE_FACTOR, GRADIENT_PREDIVIDE_FACTOR_DEFAULT)
def get_steps_per_print(param_dict):
return get_scalar_param(param_dict, STEPS_PER_PRINT, STEPS_PER_PRINT_DEFAULT)
def get_disable_allgather(param_dict):
return get_scalar_param(param_dict, DISABLE_ALLGATHER, DISABLE_ALLGATHER_DEFAULT)
def get_dump_state(param_dict):
return get_scalar_param(param_dict, DUMP_STATE, DUMP_STATE_DEFAULT)
def get_gradient_clipping(param_dict):
return get_scalar_param(param_dict, GRADIENT_CLIPPING, GRADIENT_CLIPPING_DEFAULT)
def get_sparse_attention(param_dict):
if SPARSE_ATTENTION in param_dict.keys():
sparsity = param_dict[SPARSE_ATTENTION]
mode = get_sparse_attention_mode(sparsity)
if mode == SPARSE_DENSE_MODE:
return get_sparse_dense_config(sparsity)
elif mode == SPARSE_FIXED_MODE:
return get_sparse_fixed_config(sparsity)
elif mode == SPARSE_VARIABLE_MODE:
return get_sparse_variable_config(sparsity)
elif mode == SPARSE_BIGBIRD_MODE:
return get_sparse_bigbird_config(sparsity)
elif mode == SPARSE_BSLONGFORMER_MODE:
return get_sparse_bslongformer_config(sparsity)
else:
raise NotImplementedError(f"Given sparsity mode, {mode}, has not been implemented yet!")
else:
return None
def get_sparse_dense_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
return {SPARSE_MODE: SPARSE_DENSE_MODE, SPARSE_BLOCK: block}
def get_sparse_fixed_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
)
num_local_blocks = get_scalar_param(sparsity, SPARSE_NUM_LOCAL_BLOCKS, SPARSE_NUM_LOCAL_BLOCKS_DEFAULT)
num_global_blocks = get_scalar_param(sparsity, SPARSE_NUM_GLOBAL_BLOCKS, SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT)
attention = get_scalar_param(sparsity, SPARSE_ATTENTION_TYPE, SPARSE_ATTENTION_TYPE_DEFAULT)
horizontal_global_attention = get_scalar_param(
sparsity,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT,
)
num_different_global_patterns = get_scalar_param(
sparsity,
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS,
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS_DEFAULT,
)
return {
SPARSE_MODE: SPARSE_FIXED_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_LOCAL_BLOCKS: num_local_blocks,
SPARSE_NUM_GLOBAL_BLOCKS: num_global_blocks,
SPARSE_ATTENTION_TYPE: attention,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION: horizontal_global_attention,
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS: num_different_global_patterns,
}
def get_sparse_variable_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
)
num_random_blocks = get_scalar_param(sparsity, SPARSE_NUM_RANDOM_BLOCKS, SPARSE_NUM_RANDOM_BLOCKS_DEFAULT)
local_window_blocks = get_scalar_param(sparsity, SPARSE_LOCAL_WINDOW_BLOCKS, SPARSE_LOCAL_WINDOW_BLOCKS_DEFAULT)
global_block_indices = get_scalar_param(sparsity, SPARSE_GLOBAL_BLOCK_INDICES, SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT)
global_block_end_indices = get_scalar_param(
sparsity,
SPARSE_GLOBAL_BLOCK_END_INDICES,
SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT,
)
attention = get_scalar_param(sparsity, SPARSE_ATTENTION_TYPE, SPARSE_ATTENTION_TYPE_DEFAULT)
horizontal_global_attention = get_scalar_param(
sparsity,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT,
)
return {
SPARSE_MODE: SPARSE_VARIABLE_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_RANDOM_BLOCKS: num_random_blocks,
SPARSE_LOCAL_WINDOW_BLOCKS: local_window_blocks,
SPARSE_GLOBAL_BLOCK_INDICES: global_block_indices,
SPARSE_GLOBAL_BLOCK_END_INDICES: global_block_end_indices,
SPARSE_ATTENTION_TYPE: attention,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION: horizontal_global_attention,
}
def get_sparse_bigbird_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
)
num_random_blocks = get_scalar_param(sparsity, SPARSE_NUM_RANDOM_BLOCKS, SPARSE_NUM_RANDOM_BLOCKS_DEFAULT)
num_sliding_window_blocks = get_scalar_param(
sparsity,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT,
)
num_global_blocks = get_scalar_param(sparsity, SPARSE_NUM_GLOBAL_BLOCKS, SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT)
return {
SPARSE_MODE: SPARSE_BIGBIRD_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_RANDOM_BLOCKS: num_random_blocks,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS: num_sliding_window_blocks,
SPARSE_NUM_GLOBAL_BLOCKS: num_global_blocks,
}
def get_sparse_bslongformer_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT,
)
num_sliding_window_blocks = get_scalar_param(
sparsity,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT,
)
global_block_indices = get_scalar_param(sparsity, SPARSE_GLOBAL_BLOCK_INDICES, SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT)
global_block_end_indices = get_scalar_param(
sparsity,
SPARSE_GLOBAL_BLOCK_END_INDICES,
SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT,
)
return {
SPARSE_MODE: SPARSE_BSLONGFORMER_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS: num_sliding_window_blocks,
SPARSE_GLOBAL_BLOCK_INDICES: global_block_indices,
SPARSE_GLOBAL_BLOCK_END_INDICES: global_block_end_indices,
}
def get_sparse_attention_mode(param_dict):
if SPARSE_MODE in param_dict.keys():
return param_dict[SPARSE_MODE]
else:
return SPARSE_MODE_DEFAULT
def get_sparse_attention_type(param_dict):
if SPARSE_ATTENTION_TYPE in param_dict.keys():
return param_dict[SPARSE_ATTENTION_TYPE]
else:
return SPARSE_ATTENTION_TYPE_DEFAULT
def get_pipeline_config(param_dict):
"""Parses pipeline engine configuration. """
default_pipeline = {
"stages": "auto",
"partition": "best",
"seed_layers": False,
"activation_checkpoint_interval": 0,
}
config = default_pipeline
for key, val in param_dict.get("pipeline", {}).items():
config[key] = val
return config
def get_optimizer_name(param_dict):
if OPTIMIZER in param_dict.keys() and TYPE in param_dict[OPTIMIZER].keys():
return param_dict[OPTIMIZER][TYPE]
else:
return OPTIMIZER_TYPE_DEFAULT
def get_optimizer_params(param_dict):
if (get_optimizer_name(param_dict) is not None and OPTIMIZER_PARAMS in param_dict[OPTIMIZER].keys()):
return param_dict[OPTIMIZER][OPTIMIZER_PARAMS]
else:
return None
def get_optimizer_gradient_clipping(param_dict):
optimizer_params = get_optimizer_params(param_dict)
if optimizer_params is not None and MAX_GRAD_NORM in optimizer_params.keys():
return optimizer_params[MAX_GRAD_NORM]
else:
return None
def get_optimizer_legacy_fusion(param_dict):
if OPTIMIZER in param_dict.keys() and LEGACY_FUSION in param_dict[OPTIMIZER].keys():
return param_dict[OPTIMIZER][LEGACY_FUSION]
else:
return LEGACY_FUSION_DEFAULT
def get_zero_allow_untested_optimizer(param_dict):
return get_scalar_param(param_dict, ZERO_ALLOW_UNTESTED_OPTIMIZER, ZERO_ALLOW_UNTESTED_OPTIMIZER_DEFAULT)
def get_zero_force_ds_cpu_optimizer(param_dict):
return get_scalar_param(param_dict, ZERO_FORCE_DS_CPU_OPTIMIZER, ZERO_FORCE_DS_CPU_OPTIMIZER_DEFAULT)
def get_scheduler_name(param_dict):
if SCHEDULER in param_dict.keys() and TYPE in param_dict[SCHEDULER].keys():
return param_dict[SCHEDULER][TYPE]
else:
return SCHEDULER_TYPE_DEFAULT
def get_scheduler_params(param_dict):
if (get_scheduler_name(param_dict) is not None and SCHEDULER_PARAMS in param_dict[SCHEDULER].keys()):
return param_dict[SCHEDULER][SCHEDULER_PARAMS]
else:
return None
def get_train_batch_size(param_dict):
return get_scalar_param(param_dict, TRAIN_BATCH_SIZE, TRAIN_BATCH_SIZE_DEFAULT)
def get_train_micro_batch_size_per_gpu(param_dict):
return get_scalar_param(
param_dict,
TRAIN_MICRO_BATCH_SIZE_PER_GPU,
TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT,
)
def get_wall_clock_breakdown(param_dict):
return get_scalar_param(param_dict, WALL_CLOCK_BREAKDOWN, WALL_CLOCK_BREAKDOWN_DEFAULT)
def get_memory_breakdown(param_dict):
return get_scalar_param(param_dict, MEMORY_BREAKDOWN, MEMORY_BREAKDOWN_DEFAULT)
class HybridEngineConfig(DeepSpeedConfigModel):
enabled: bool = False
max_out_tokens: int = 512
inference_tp_size: int = 1
release_inference_cache: bool = False
pin_parameters: bool = True
tp_gather_partition_size: int = 8
def get_hybrid_engine_config(param_dict):
hybrid_engine_config_dict = param_dict.get("hybrid_engine", {})
hybrid_engine_config = HybridEngineConfig(**hybrid_engine_config_dict)
return hybrid_engine_config
def get_eigenvalue_config(param_dict):
if get_quantize_enabled(param_dict):
param_dict = param_dict[QUANTIZE_TRAINING]
assert not get_eigenvalue_enabled(param_dict), "Eigenvalue based MoQ is temporarily disabled"
return (
get_eigenvalue_enabled(param_dict),
get_eigenvalue_verbose(param_dict),
get_eigenvalue_max_iter(param_dict),
get_eigenvalue_tol(param_dict),
get_eigenvalue_stability(param_dict),
get_eigenvalue_gas_boundary_resolution(param_dict),
get_eigenvalue_layer_name(param_dict),
get_eigenvalue_layer_num(param_dict),
)
else:
return (
EIGENVALUE_ENABLED_DEFAULT,
EIGENVALUE_VERBOSE_DEFAULT,
EIGENVALUE_MAX_ITER_DEFAULT,
EIGENVALUE_TOL_DEFAULT,
EIGENVALUE_STABILITY_DEFAULT,
EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT,
EIGENVALUE_LAYER_NAME_DEFAULT,
EIGENVALUE_LAYER_NUM_DEFAULT,
)
def get_eigenvalue_enabled(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_ENABLED, EIGENVALUE_ENABLED_DEFAULT)
else:
return EIGENVALUE_ENABLED_DEFAULT
def get_eigenvalue_verbose(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_VERBOSE, EIGENVALUE_VERBOSE_DEFAULT)
else:
return EIGENVALUE_VERBOSE_DEFAULT
def get_eigenvalue_max_iter(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_MAX_ITER, EIGENVALUE_MAX_ITER_DEFAULT)
else:
return EIGENVALUE_MAX_ITER_DEFAULT
def get_eigenvalue_tol(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_TOL, EIGENVALUE_TOL_DEFAULT)
else:
return EIGENVALUE_TOL_DEFAULT
def get_eigenvalue_stability(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_STABILITY, EIGENVALUE_STABILITY_DEFAULT)
else:
return EIGENVALUE_STABILITY_DEFAULT
def get_eigenvalue_gas_boundary_resolution(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(
param_dict[EIGENVALUE],
EIGENVALUE_GAS_BOUNDARY_RESOLUTION,
EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT,
)
else:
return EIGENVALUE_GAS_BOUNDARY_RESOLUTION_DEFAULT
def get_eigenvalue_layer_name(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_LAYER_NAME, EIGENVALUE_LAYER_NAME_DEFAULT)
else:
return EIGENVALUE_LAYER_NAME_DEFAULT
def get_eigenvalue_layer_num(param_dict):
if EIGENVALUE in param_dict.keys():
return get_scalar_param(param_dict[EIGENVALUE], EIGENVALUE_LAYER_NUM, EIGENVALUE_LAYER_NUM_DEFAULT)
else:
return EIGENVALUE_LAYER_NUM_DEFAULT
def get_checkpoint_params(param_dict):
return param_dict.get(CHECKPOINT, {})
def get_data_types_params(param_dict):
return param_dict.get(DATA_TYPES, {})
def get_checkpoint_tag_validation_mode(checkpoint_params):
tag_validation_mode = checkpoint_params.get(CHECKPOINT_TAG_VALIDATION, CHECKPOINT_TAG_VALIDATION_DEFAULT)
tag_validation_mode = tag_validation_mode.upper()
if tag_validation_mode in CHECKPOINT_TAG_VALIDATION_MODES:
return tag_validation_mode
else:
raise DeepSpeedConfigError(
"Checkpoint config contains invalid tag_validation "
f"value of {tag_validation_mode}, expecting one of {CHECKPOINT_TAG_VALIDATION_MODES}")
def get_checkpoint_parallel_write_pipeline(checkpoint_params):
par_write_params = checkpoint_params.get(CHECKPOINT_PARALLEL_WRITE, {})
par_write_pipeline = par_write_params.get(CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE,
CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE_DEFAULT)
if par_write_pipeline in [True, False]:
return par_write_pipeline
else:
raise DeepSpeedConfigError("checkpoint::parallel_write::pipeline_stage "
f"value of '{par_write_pipeline}' is invalid, expecting: true or false")
def get_dataloader_drop_last(param_dict):
return get_scalar_param(param_dict, DATALOADER_DROP_LAST, DATALOADER_DROP_LAST_DEFAULT)
'''Write deepspeed config files by modifying basic templates.
Can be used for quickly changing parameters via command line parameters.'''
class DeepSpeedConfigWriter:
def __init__(self, data=None):
self.data = data if data is not None else {}
def add_config(self, key, value):
self.data[key] = value
def load_config(self, filename):
self.data = json.load(open(filename, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
def write_config(self, filename):
with open(filename, "w") as outfile:
json.dump(self.data, outfile)
class DeepSpeedConfig(object):
def __init__(self, config: Union[str, dict], mpu=None):
super(DeepSpeedConfig, self).__init__()
if isinstance(config, dict):
self._param_dict = config
elif os.path.exists(config):
self._param_dict = hjson.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
else:
try:
config_decoded = base64.urlsafe_b64decode(config).decode('utf-8')
self._param_dict = hjson.loads(config_decoded)
except (UnicodeDecodeError, AttributeError):
raise ValueError(
f"Expected a string path to an existing deepspeed config, or a dictionary or a valid base64. Received: {config}"
)
try:
self.global_rank = dist.get_rank()
if mpu is None:
self.world_size = dist.get_world_size()
else:
self.world_size = mpu.get_data_parallel_world_size()
except:
self.global_rank = 0
self.world_size = 1
# If elastic-mode enabled, update compute + update _param_dict
self.elasticity_enabled = elasticity_enabled(self._param_dict)
if self.elasticity_enabled:
logger.info("DeepSpeed elasticity support enabled")
final_batch_size, valid_gpus, micro_batch_size = compute_elastic_config(
ds_config=self._param_dict,
target_deepspeed_version=__version__,
world_size=self.world_size,
)
elastic_dict = self._param_dict[ELASTICITY]
# Ensure the resource scheduler saw the same elastic config we are using at runtime
ensure_immutable_elastic_config(runtime_elastic_config_dict=elastic_dict)
self.elastic_model_parallel_size = elastic_dict.get(MODEL_PARLLEL_SIZE, MODEL_PARLLEL_SIZE_DEFAULT)
if self.elastic_model_parallel_size < 1:
raise ElasticityConfigError("Model-Parallel size cannot be less than 1, "
f"given model-parallel size: {self.elastic_model_parallel_size}")
self.num_gpus_per_node = elastic_dict.get(NUM_GPUS_PER_NODE, NUM_GPUS_PER_NODE_DEFAULT)
if self.num_gpus_per_node < 1:
raise ElasticityConfigError("NUmber of GPUs per node cannot be less than 1, "
f"given number of GPUs per node: {self.num_gpus_per_node}")
ignore_non_elastic_batch_info = elastic_dict.get(IGNORE_NON_ELASTIC_BATCH_INFO,
IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT)
if not ignore_non_elastic_batch_info:
batch_params = [
TRAIN_BATCH_SIZE,
TRAIN_MICRO_BATCH_SIZE_PER_GPU,
GRADIENT_ACCUMULATION_STEPS,
]
if any(map(lambda t: t in self._param_dict, batch_params)):
raise ElasticityConfigError("One or more batch related parameters were found in your " \
f"ds_config ({TRAIN_BATCH_SIZE}, {TRAIN_MICRO_BATCH_SIZE_PER_GPU}, and/or " \
f"{GRADIENT_ACCUMULATION_STEPS}). These parameters *will not be used* since " \
"elastic training is enabled, which takes control of these parameters. " \
"If you want to suppress this error (the parameters will be silently ignored) " \
f"please set {IGNORE_NON_ELASTIC_BATCH_INFO}':true in your elasticity config.")
# micro_bsz * world_size * gas = total_batch_size
# gas = total_batch_size // (micro_bsz * world_size)
gradient_accu_steps = final_batch_size // (micro_batch_size * self.world_size)
if TRAIN_BATCH_SIZE in self._param_dict:
logger.warning("[Elasticity] overriding training_batch_size: "
f"{self._param_dict[TRAIN_BATCH_SIZE]} -> {final_batch_size}")
if TRAIN_MICRO_BATCH_SIZE_PER_GPU in self._param_dict:
logger.warning("[Elasticity] overriding train_micro_batch_size_per_gpu: "
f"{self._param_dict[TRAIN_MICRO_BATCH_SIZE_PER_GPU]} -> {micro_batch_size}")
if GRADIENT_ACCUMULATION_STEPS in self._param_dict:
logger.warning("[Elasticity] overriding gradient_accumulation_steps: "
f"{self._param_dict[GRADIENT_ACCUMULATION_STEPS]} -> {gradient_accu_steps}")
logger.info(f"[Elasticity] valid GPU counts: {valid_gpus}")
self._param_dict[TRAIN_BATCH_SIZE] = final_batch_size
self._param_dict[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = micro_batch_size
self._param_dict[GRADIENT_ACCUMULATION_STEPS] = gradient_accu_steps
# Pass a copy so that user json is unmodified, e.g. for logging
self._initialize_params(copy.copy(self._param_dict))
self._configure_train_batch_size()
self._do_sanity_check()
def _initialize_params(self, param_dict):
self.train_batch_size = get_train_batch_size(param_dict)
#print(f"beginning get_train_batch_size = {get_train_batch_size}")
self.train_micro_batch_size_per_gpu = get_train_micro_batch_size_per_gpu(param_dict)
self.gradient_accumulation_steps = get_gradient_accumulation_steps(param_dict)
self.steps_per_print = get_steps_per_print(param_dict)
self.dump_state = get_dump_state(param_dict)
self.disable_allgather = get_disable_allgather(param_dict)
self.communication_data_type = get_communication_data_type(param_dict)
self.prescale_gradients = get_prescale_gradients(param_dict)
self.gradient_predivide_factor = get_gradient_predivide_factor(param_dict)
self.sparse_gradients_enabled = get_sparse_gradients_enabled(param_dict)
self.zero_config = get_zero_config(param_dict)
self.mics_shard_size = self.zero_config.mics_shard_size
self.mics_hierarchial_params_gather = self.zero_config.mics_hierarchical_params_gather
self.zero_optimization_stage = self.zero_config.stage
self.zero_enabled = self.zero_optimization_stage > 0
self.activation_checkpointing_config = DeepSpeedActivationCheckpointingConfig(param_dict)
self.comms_config = DeepSpeedCommsConfig(param_dict)
self.monitor_config = get_monitor_config(param_dict)
self.gradient_clipping = get_gradient_clipping(param_dict)
self.fp16_enabled = get_fp16_enabled(param_dict)
self.fp16_auto_cast = get_fp16_auto_cast(param_dict)
self.bfloat16_enabled = get_bfloat16_enabled(param_dict)
assert not (self.fp16_enabled
and self.bfloat16_enabled), 'bfloat16 and fp16 modes cannot be simultaneously enabled'
self.fp16_master_weights_and_gradients = get_fp16_master_weights_and_grads_enabled(param_dict)
self.amp_enabled = get_amp_enabled(param_dict)
self.amp_params = get_amp_params(param_dict)
self.loss_scale = get_loss_scale(param_dict)
self.initial_dynamic_scale = get_initial_dynamic_scale(param_dict)
self.dynamic_loss_scale_args = get_dynamic_loss_scale_args(param_dict)
self.compression_config = get_compression_config(param_dict)
self.optimizer_name = get_optimizer_name(param_dict)
if (self.optimizer_name is not None and self.optimizer_name.lower() in DEEPSPEED_OPTIMIZERS):
self.optimizer_name = self.optimizer_name.lower()
self.optimizer_params = get_optimizer_params(param_dict)
self.optimizer_legacy_fusion = get_optimizer_legacy_fusion(param_dict)
self.zero_allow_untested_optimizer = get_zero_allow_untested_optimizer(param_dict)
self.zero_force_ds_cpu_optimizer = get_zero_force_ds_cpu_optimizer(param_dict)
self.scheduler_name = get_scheduler_name(param_dict)
self.scheduler_params = get_scheduler_params(param_dict)
self.flops_profiler_config = DeepSpeedFlopsProfilerConfig(param_dict)
self.wall_clock_breakdown = (get_wall_clock_breakdown(param_dict) | self.flops_profiler_config.enabled)
self.memory_breakdown = get_memory_breakdown(param_dict)
self.autotuning_config = DeepSpeedAutotuningConfig(param_dict)
(
self.eigenvalue_enabled,
self.eigenvalue_verbose,
self.eigenvalue_max_iter,
self.eigenvalue_tol,
self.eigenvalue_stability,
self.eigenvalue_gas_boundary_resolution,
self.eigenvalue_layer_name,
self.eigenvalue_layer_num,
) = get_eigenvalue_config(param_dict)
self.hybrid_engine = get_hybrid_engine_config(param_dict)
self.sparse_attention = get_sparse_attention(param_dict)
self.pipeline = get_pipeline_config(param_dict)
self.pld_enabled = get_pld_enabled(param_dict)
self.pld_params = get_pld_params(param_dict)
self.curriculum_enabled_legacy = get_curriculum_enabled_legacy(param_dict)
self.curriculum_params_legacy = get_curriculum_params_legacy(param_dict)
self.data_efficiency_enabled = get_data_efficiency_enabled(param_dict)
self.data_efficiency_config = get_data_efficiency_config(param_dict)
checkpoint_params = get_checkpoint_params(param_dict)
validation_mode = get_checkpoint_tag_validation_mode(checkpoint_params)
self.checkpoint_tag_validation_enabled = (validation_mode != ValidationMode.IGNORE)
self.checkpoint_tag_validation_fail = validation_mode == ValidationMode.FAIL
self.load_universal_checkpoint = checkpoint_params.get(LOAD_UNIVERSAL_CHECKPOINT,
LOAD_UNIVERSAL_CHECKPOINT_DEFAULT)
self.use_node_local_storage = checkpoint_params.get(USE_NODE_LOCAL_STORAGE_CHECKPOINT,
USE_NODE_LOCAL_STORAGE_CHECKPOINT_DEFAULT)
data_types_params = get_data_types_params(param_dict)
self.grad_accum_dtype = data_types_params.get(GRAD_ACCUM_DTYPE, GRAD_ACCUM_DTYPE_DEFAULT)
par_write_pipe = get_checkpoint_parallel_write_pipeline(checkpoint_params)
self.checkpoint_parallel_write_pipeline = par_write_pipe
self.aio_config = get_aio_config(param_dict)
self.dataloader_drop_last = get_dataloader_drop_last(param_dict)
self.nebula_config = DeepSpeedNebulaConfig(param_dict)
def _batch_assertion(self):
train_batch = self.train_batch_size
micro_batch = self.train_micro_batch_size_per_gpu
grad_acc = self.gradient_accumulation_steps
assert (train_batch > 0), f"Train batch size: {train_batch} has to be greater than 0"
assert (micro_batch > 0), f"Micro batch size per gpu: {micro_batch} has to be greater than 0"
assert (grad_acc > 0), f"Gradient accumulation steps: {grad_acc} has to be greater than 0"
assert train_batch == micro_batch * grad_acc * self.world_size, (
f"Check batch related parameters. train_batch_size is not equal "
"to micro_batch_per_gpu * gradient_acc_step * world_size "
f"{train_batch} != {micro_batch} * {grad_acc} * {self.world_size}")
def _set_batch_related_parameters(self):
train_batch = self.train_batch_size
micro_batch = self.train_micro_batch_size_per_gpu
grad_acc = self.gradient_accumulation_steps
#print(f"train_batch = {train_batch}, micro_batch={micro_batch}")
# all values are provided nothing needs to be set
if train_batch is not None and micro_batch is not None and grad_acc is not None:
return
# global_accumulation_steps needs to be set
elif train_batch is not None and micro_batch is not None:
grad_acc = train_batch // micro_batch
grad_acc //= self.world_size
self.gradient_accumulation_steps = grad_acc
# micro_batch_per_gpu needs to be set
elif train_batch is not None and grad_acc is not None:
micro_batch = train_batch // self.world_size
micro_batch //= grad_acc
self.train_micro_batch_size_per_gpu = micro_batch
# train_batch_size needs to be set
elif micro_batch is not None and grad_acc is not None:
train_batch_size = micro_batch * grad_acc
train_batch_size *= self.world_size
self.train_batch_size = train_batch_size
# gradient_accumulation_steps and micro_batch_per_gpus is set
elif train_batch is not None:
self.gradient_accumulation_steps = 1
self.train_micro_batch_size_per_gpu = train_batch // self.world_size
# train_batch_size and gradient_accumulation_step is set
elif micro_batch is not None:
self.train_batch_size = micro_batch * self.world_size
self.gradient_accumulation_steps = 1
# either none of the three parameters are provided or just gradient_accumulation_step is provided
else:
assert False, \
'Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided'
def _configure_train_batch_size(self):
self._set_batch_related_parameters()
self._batch_assertion()
def _do_sanity_check(self):
self._do_error_check()
self._do_warning_check()
def print_user_config(self):
logger.info(" json = {}".format(
json.dumps(
self._param_dict,
sort_keys=True,
indent=4,
cls=ScientificNotationEncoder,
separators=(",", ":"),
)))
def print(self, name):
logger.info("{}:".format(name))
for arg in sorted(vars(self)):
if arg != "_param_dict":
dots = "." * (29 - len(arg))
logger.info(" {} {} {}".format(arg, dots, getattr(self, arg)))
self.print_user_config()
def _do_error_check(self):
assert (self.train_micro_batch_size_per_gpu
), "DeepSpeedConfig: {} is not defined".format(TRAIN_MICRO_BATCH_SIZE_PER_GPU)
assert (
self.gradient_accumulation_steps), "DeepSpeedConfig: {} is not defined".format(GRADIENT_ACCUMULATION_STEPS)
if self.zero_enabled:
assert (self.zero_optimization_stage <=
ZeroStageEnum.max_stage), "DeepSpeedConfig: Maximum supported ZeRO stage is {}".format(
ZeroStageEnum.max_stage)
if self.fp16_master_weights_and_gradients:
assert self.zero_enabled and self.zero_optimization_stage == ZeroStageEnum.gradients, "Fp16_master_weights_and_grads is only supported with ZeRO Stage 2 for now."
def _do_warning_check(self):
fp16_enabled = self.fp16_enabled
vocabulary_size = self._param_dict.get(VOCABULARY_SIZE, VOCABULARY_SIZE_DEFAULT)
if vocabulary_size and vocabulary_size % TENSOR_CORE_ALIGN_SIZE != 0:
logger.warning(
"DeepSpeedConfig: vocabulary size {} is not aligned to {}, may import tensor core utilization.".format(
vocabulary_size, TENSOR_CORE_ALIGN_SIZE))
if (self.optimizer_params is not None and MAX_GRAD_NORM in self.optimizer_params.keys()
and self.optimizer_params[MAX_GRAD_NORM] > 0):
if fp16_enabled:
if self.global_rank == 0:
logger.warning("DeepSpeedConfig: In FP16 mode, DeepSpeed will pass {}:{} to FP16 wrapper".format(
MAX_GRAD_NORM, self.optimizer_params[MAX_GRAD_NORM]))
else:
if self.global_rank == 0:
logger.warning(
"DeepSpeedConfig: In FP32 mode, DeepSpeed does not permit MAX_GRAD_NORM ({}) > 0, setting to zero"
.format(self.optimizer_params[MAX_GRAD_NORM]))
self.optimizer_params[MAX_GRAD_NORM] = 0.0 | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/config.py | config.py |
# DeepSpeed Team
import torch
import math
from deepspeed.utils import logger
from deepspeed.ops.quantizer import ds_quantizer
TWO_D_PARAMS = 6
class Quantizer(object):
def __init__(self,
q_groups=1,
q_mixed_fp16=False,
q_change_ratio=0.01,
q_type=0,
q_rounding=0,
q_verbose=False,
q_eigenvalue=False,
use_quantizer_kernel=False,
layer_num=0):
self.q_groups = q_groups
self.q_mixed_fp16 = q_mixed_fp16
self.q_change_ratio = q_change_ratio
self.q_type = q_type
self.qsteps = 0
self.quantize_real_ratio = 1.000
self.q_verbose = q_verbose
self.q_eigenvalue = q_eigenvalue
self.use_quantizer_kernel = use_quantizer_kernel
self.q_rounding = q_rounding
self.layer_num = layer_num
def any_precision_switch(self):
# Temporary disabled functionality
if self.layer_num == 0:
return True
result = False
for index in range(self.layer_num):
if self.q_start_bits[index] != self.q_target_bits:
next_step = self.qsteps + (TWO_D_PARAMS * (self.layer_num if self.layer_num != 0 else 1))
if next_step >= self.q_period[index]:
result = True
return result
def quantize(self, parameter_group, overflow, eigenvalue_enabled, block_eigenvalue={}):
if overflow and not eigenvalue_enabled:
return
self.step()
self.update_fp16_ratio()
for i in range(len(parameter_group)):
for p in parameter_group[i]:
if len(p.size()) > 1 and hasattr(p, "start_bits") and p.start_bits:
param_id = id(p)
if block_eigenvalue is None:
eigenvalue, layer_id = None, 0
else:
eigenvalue, layer_id = block_eigenvalue[param_id] if param_id in block_eigenvalue else (None,
0)
if eigenvalue is not None:
factor = 1 + math.floor(eigenvalue * 4)
p.data = self.compute_quantization(p.data, layer_id, factor)
else:
p.data = self.compute_quantization(p, layer_id)
def step(self):
self.qsteps += 1
def quantize_highbit(self, inputs, num_bits):
q_range = 2**num_bits
input_flat = inputs.reshape(self.q_groups, -1)
g_min = input_flat.amin(dim=-1, keepdim=True)
g_max = input_flat.amax(dim=-1, keepdim=True)
# Random number generator (Uniform)
if self.q_rounding == 'nearest':
p = 0.
else:
p = input_flat.new(input_flat.shape).uniform_(-0.5, 0.5)
if self.q_type == 'symmetric':
scale = 2 * torch.max(torch.abs(g_min), torch.abs(g_max)) / q_range
zero_point = 0.
input_flat = (input_flat / scale + p).round().clamp(-(q_range >> 1), (q_range >> 1) - 1) * scale
elif self.q_type == 'asymmetric':
scale = (g_max - g_min) / q_range
zero_point = (g_min / scale).round() * scale
input_flat = ((input_flat - zero_point) / scale + p).round().clamp(0, (q_range - 1)) * scale + zero_point
output = input_flat.reshape(inputs.shape).contiguous()
return output
def quantize_tenary(self, inputs):
input_flat = inputs.reshape(self.q_groups, -1)
n = input_flat.shape[1]
m = input_flat.norm(p=1, dim=1).div(n)
thres = (0.7 * m).view(-1, 1) #.expand_as(input_flat)
pos = (input_flat > thres).type(inputs.type())
neg = (input_flat < -thres).type(inputs.type())
mask = (input_flat.abs() > thres).type(inputs.type())
alpha = ((mask * input_flat).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1)
output = alpha * pos - alpha * neg
output = output.reshape(inputs.shape).contiguous()
return output
def quantize_binary(self, inputs):
input_flat = inputs.reshape(self.q_groups, -1)
n = input_flat.shape[1]
m = input_flat.norm(p=1, dim=1, keepdim=True).div(n)
output = input_flat.sign().mul(m)
output = output.reshape(inputs.shape).contiguous()
return output
def mixed_fp16_quantize(self, input, input_q, index):
if self.q_mixed_fp16 and self.q_start_bits[index] >= (self.q_target_bits - 1):
input_q = input * self.quantize_real_ratio + (1 - self.quantize_real_ratio) * input_q
return input_q
return input_q
def compute_quantization(self, input, index=0, factor=1):
# fixing the quantization bits based on the training steps
# when reducing 1 bit at each period, we increase the period
# to go slowly toward the target quantization bits
# the period and starting bit can be configured
if input.start_bits != input.target_bits:
if self.qsteps >= input.q_period:
self.quantize_real_ratio = 1.0
input.q_period <<= 1
input.q_period *= factor
input.start_bits -= 1
if self.q_verbose:
logger.info(
f'Quantization settings: current bit-precision = {input.start_bits}, step = {self.qsteps}, quantization period = {input.q_period}, index = {index}'
)
assert (input.start_bits >= input.target_bits), \
'Quantization bit is lower than target precision bits!'
if self.use_quantizer_kernel:
if input.start_bits <= 2:
raise ValueError('Quantization bit is too low, please do it without quantization kernel!')
input_q = ds_quantizer(input.data.clone(),
self.q_groups,
input.start_bits,
asym=False if self.q_type == 'symmetric' else True,
sr=False if self.q_rounding == 'nearest_neighbor' else True)
else:
if input.start_bits >= 3:
input_flat = self.quantize_highbit(input.data, input.start_bits)
elif input.start_bits == 2:
assert self.q_type == 'symmetric', 'Quantization type is not symmetric!'
assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!'
input_flat = self.quantize_tenary(input.data)
elif input.start_bits == 1:
assert self.q_type == 'symmetric', 'Quantization type is not symmetric!'
assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!'
input_flat = self.quantize_binary(input.data)
if self.use_quantizer_kernel:
return self.mixed_fp16_quantize(input.data, input_q, index)
else:
if self.q_mixed_fp16 and input.start_bits >= input.target_bits - 1:
input_flat = self.quantize_real_ratio * input.data + \
(1 - self.quantize_real_ratio) * input_flat
return input_flat
def update_fp16_ratio(self):
if self.q_mixed_fp16:
if self.quantize_real_ratio > 0:
self.quantize_real_ratio -= self.q_change_ratio
else:
self.quantize_real_ratio = 0.000 | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/quantize.py | quantize.py |
# DeepSpeed Team
import os
import re
import stat
import torch
import hashlib
from collections import defaultdict, OrderedDict, deque
from shutil import copyfile
import gc
from torch.nn.modules import Module
from torch.nn.parameter import Parameter
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from typing import Callable, Dict, Union, Iterable
import deepspeed
from deepspeed import comm as dist
from deepspeed.runtime.utils import see_memory_usage, DummyOptim
from .zero.offload_config import OffloadDeviceEnum
from deepspeed.runtime.zero.stage_1_and_2 import DeepSpeedZeroOptimizer
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
from deepspeed.runtime.zero.utils import is_zero_supported_optimizer, ZeRORuntimeException
from deepspeed.runtime.zero.parameter_offload import DeepSpeedZeRoOffload
from deepspeed.runtime.zero.config import ZERO_OPTIMIZATION
from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer
from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer
from deepspeed.runtime.bf16_optimizer import BF16_Optimizer
from deepspeed.runtime.config import DEEPSPEED_OPTIMIZERS, \
ADAGRAD_OPTIMIZER, ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER, \
TORCH_ADAM_PARAM, ADAM_W_MODE, ADAM_W_MODE_DEFAULT, ZERO_ONE_ADAM_OPTIMIZER
from deepspeed.runtime.dataloader import DeepSpeedDataLoader
from deepspeed.runtime.constants import \
ROUTE_TRAIN, ROUTE_PREDICT, ROUTE_EVAL, \
PLD_THETA, PLD_GAMMA, BFLOAT16, FP16, AMP, GRADIENT_ACCUMULATION_STEPS, \
DATA_PARALLEL_GROUP, GLOBAL_RANK
from deepspeed.runtime.zero.config import ZeroStageEnum
from deepspeed.compression import compression_scheduler
from deepspeed.compression.constants import \
WEIGHT_QUANTIZE_IN_FORWARD_ENABLED, \
WEIGHT_QUANTIZATION, SHARED_PARAMETERS, \
WEIGHT_QUANTIZE_ENABLED, \
WEIGHT_QUANTIZE_GROUPS, \
WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE, \
WEIGHT_QUANTIZE_CHANGE_RATIO, \
WEIGHT_QUANTIZE_TYPE, \
WEIGHT_QUANTIZE_ROUNDING, \
WEIGHT_QUANTIZE_VERBOSE, \
WEIGHT_QUANTIZE_KERNEL
from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, FROZEN_PARAM_FRAGMENTS
from deepspeed.runtime.sparse_tensor import SparseTensor
from deepspeed.runtime import lr_schedules
from deepspeed.utils import groups
from deepspeed.utils import logger, log_dist, instrument_w_nvtx
from deepspeed.utils.timer import ThroughputTimer, SynchronizedWallClockTimer
from deepspeed.utils.debug import debug_extract_module_and_param_names
from deepspeed.monitor.monitor import MonitorMaster
from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop
from deepspeed.runtime.utils import clip_grad_norm_
from deepspeed.runtime.eigenvalue import Eigenvalue
from deepspeed.runtime.data_pipeline.constants import DATA_SAMPLING, \
DATA_ROUTING, DATA_SAMPLING_ENABLED, CURRICULUM_LEARNING, \
CURRICULUM_LEARNING_ENABLED, DATA_SAMPLING_NUM_WORKERS, RANDOM_LTD, \
RANDOM_LTD_ENABLED, RANDOM_LTD_LAYER_ID, RANDOM_LTD_LAYER_NUM, \
RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE, RANDOM_LTD_LAYER_TOKEN_LR_ENABLED, \
RANDOM_LTD_GLOBAL_BATCH_SIZE, RANDOM_LTD_MICRO_BATCH_SIZE, DATA_EFFICIENCY
from deepspeed.runtime.data_pipeline.curriculum_scheduler import CurriculumScheduler
from deepspeed.runtime.data_pipeline.data_routing.scheduler import RandomLTDScheduler
from deepspeed.runtime.data_pipeline.data_routing.helper import remove_random_ltd_state_dict
from deepspeed.runtime.data_pipeline.data_routing.basic_layer import RandomLayerTokenDrop
from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine
from .pipe.module import PipelineModule
from .utils import get_ma_status
from ..ops.adam import FusedAdam
from ..moe.sharded_moe import TopKGate, MOELayer
from ..moe.layer import MoE
from ..moe.utils import is_moe_param
from ..git_version_info import version
from deepspeed.profiling.flops_profiler.profiler import FlopsProfiler
from deepspeed.utils.logging import print_json_dist, print_configuration
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import UtilsBuilder
from deepspeed.runtime.config import DtypeEnum
MEMORY_OPT_ALLREDUCE_SIZE = 500000000
DeepSpeedOptimizerCallable = \
Callable[[Union[Iterable[Parameter], Dict[str, Iterable]]], Optimizer]
DeepSpeedSchedulerCallable = Callable[[Optimizer], _LRScheduler]
try:
import apex
from apex import amp
APEX_INSTALLED = True
except ImportError:
# Fail silently so we don't spam logs unnecessarily if user isn't using amp
APEX_INSTALLED = False
def split_half_float_double_sparse(tensors):
device_type = get_accelerator().device_name()
supported_types = [
"torch.{}.HalfTensor".format(device_type), "torch.{}.FloatTensor".format(device_type),
"torch.{}.DoubleTensor".format(device_type), "torch.{}.BFloat16Tensor".format(device_type),
SparseTensor.type()
]
for t in tensors:
assert t.type() in supported_types, f"attempting to reduce an unsupported grad type: {t.type()}"
buckets = []
for i, dtype in enumerate(supported_types):
bucket = [t for t in tensors if t.type() == dtype]
if bucket:
buckets.append((dtype, bucket))
return buckets
FORWARD_MICRO_TIMER = 'forward_microstep'
FORWARD_GLOBAL_TIMER = 'forward'
BACKWARD_MICRO_TIMER = 'backward_microstep'
BACKWARD_GLOBAL_TIMER = 'backward'
BACKWARD_INNER_MICRO_TIMER = 'backward_inner_microstep'
BACKWARD_INNER_GLOBAL_TIMER = 'backward_inner'
BACKWARD_REDUCE_MICRO_TIMER = 'backward_allreduce_microstep'
BACKWARD_REDUCE_GLOBAL_TIMER = 'backward_allreduce'
STEP_MICRO_TIMER = 'step_microstep'
STEP_GLOBAL_TIMER = 'step'
class EngineTimers(object):
r"""Wallclock timers for DeepSpeedEngine"""
def __init__(self, enable_micro_timers, enable_global_timers):
self.forward_timers = []
self.backward_timers = []
self.backward_inner_timers = []
self.backward_reduce_timers = []
self.step_timers = []
self.global_timers = []
self.micro_timers = []
if enable_micro_timers:
self.forward_timers += [FORWARD_MICRO_TIMER]
self.backward_timers += [BACKWARD_MICRO_TIMER]
self.backward_inner_timers += [BACKWARD_INNER_MICRO_TIMER]
self.backward_reduce_timers += [BACKWARD_REDUCE_MICRO_TIMER]
self.step_timers += [STEP_MICRO_TIMER]
self.micro_timers += [
FORWARD_MICRO_TIMER, BACKWARD_MICRO_TIMER, BACKWARD_INNER_MICRO_TIMER, BACKWARD_REDUCE_MICRO_TIMER,
STEP_MICRO_TIMER
]
if enable_global_timers:
self.forward_timers += [FORWARD_GLOBAL_TIMER]
self.backward_timers += [BACKWARD_GLOBAL_TIMER]
self.backward_inner_timers += [BACKWARD_INNER_GLOBAL_TIMER]
self.backward_reduce_timers += [BACKWARD_REDUCE_GLOBAL_TIMER]
self.step_timers += [STEP_GLOBAL_TIMER]
self.global_timers += [
FORWARD_GLOBAL_TIMER, BACKWARD_GLOBAL_TIMER, BACKWARD_INNER_GLOBAL_TIMER, BACKWARD_REDUCE_GLOBAL_TIMER,
STEP_GLOBAL_TIMER
]
class DeepSpeedEngine(Module):
r"""DeepSpeed engine for training."""
def __init__(
self,
args,
model,
optimizer=None,
model_parameters=None,
training_data=None,
lr_scheduler=None,
mpu=None,
dist_init_required=None,
collate_fn=None,
config=None,
config_class=None,
dont_change_device=False,
):
super(DeepSpeedEngine, self).__init__()
self.dont_change_device = dont_change_device
self.client_optimizer = optimizer
self.client_lr_scheduler = lr_scheduler
self.training_data = training_data
self.collate_fn = collate_fn
self.mpu = mpu
self.data_parallel_group = None
self.global_steps = 0
self.global_samples = 0
self.micro_steps = 0
self.skipped_steps = 0
self.gradient_average = True
self.warn_unscaled_loss = True
self.config = config
self._config = config_class
self.loaded_checkpoint_mp_world_size = None
self.loaded_checkpoint_dp_world_size = None
self.enable_backward_allreduce = True
self.progressive_layer_drop = None
self.eigenvalue = None
self.block_eigenvalue = None
self.gas_boundary_ctr = 0
self.dist_backend = get_accelerator().communication_backend_name()
self.has_moe_layers = False
self.num_experts = []
self.gate_modules = []
self.moe_layers = []
self._step_applied = False
self._global_grad_norm = None
self.use_ds_comm = False # False --> Use torch.dist, True --> Use ds.comm backend.
self.checkpoint_engine = None
self._is_gradient_accumulation_boundary = None
self.scale_wrt_gas = None
# for debug purposes - can then debug print: debug_get_module_name(module)
debug_extract_module_and_param_names(model)
# needed for zero_to_fp32 weights reconstruction to remap nameless data to state_dict
self.param_names = {param: name for name, param in model.named_parameters()}
self._do_args_sanity_check(args)
self._configure_with_arguments(args, mpu)
self._do_sanity_check()
see_memory_usage(f"DeepSpeed Engine: After args sanity test", force=self.memory_breakdown())
if mpu is not None:
if self.elasticity_enabled():
if not self.is_elastic_model_parallel_supported():
assert not self.elasticity_enabled(), ("Elasticity is not currently supported"
" with model parallelism.")
self._set_distributed_vars(args)
dist.configure(self._config)
self.monitor = MonitorMaster(self._config.monitor_config)
see_memory_usage(
f"DeepSpeed Engine: Before configure distributed model",
force=self.memory_breakdown(),
)
self.pipeline_parallelism = isinstance(model, PipelineModule)
# Configure distributed model
self._configure_distributed_model(model)
self._get_model_parameters()
see_memory_usage(f"DeepSpeed Engine: After configure distributed model")
# Configure wall clock timers
self.timers = SynchronizedWallClockTimer()
# Throughput timer
self.tput_timer = ThroughputTimer(
batch_size=self.train_batch_size(),
steps_per_output=self.steps_per_print(),
monitor_memory=False,
)
log_dist(f"DeepSpeed Flops Profiler Enabled: {self.flops_profiler_enabled()}", ranks=[0])
if self.flops_profiler_enabled():
self.flops_profiler = FlopsProfiler(self.module, self)
if training_data:
self.training_dataloader = self.deepspeed_io(training_data)
else:
self.training_dataloader = None
# Configure optimizer and scheduler
self.optimizer = None
self.basic_optimizer = None
self.lr_scheduler = None
has_optimizer = False
if optimizer or self.optimizer_name():
has_optimizer = True
# If no parameters given by init default to module parameters
if model_parameters is None:
model_parameters = self.module.parameters()
# Convert model parameters from generator to list
if not isinstance(model_parameters, list):
model_parameters = list(model_parameters)
if has_optimizer:
self._configure_optimizer(optimizer, model_parameters)
self._configure_lr_scheduler(lr_scheduler)
self._report_progress(0)
elif self.zero_optimization():
# no optim selected but zero is enabled
self.optimizer = self._configure_zero_optimizer(optimizer=None)
elif self.bfloat16_enabled():
self.optimizer = self._configure_bf16_optimizer(optimizer=None)
# Bookkeeping for sparse support
self.sparse_tensor_module_names = set()
# if self.sparse_gradients_enabled():
for name, module in self.module.named_modules():
if isinstance(module, (torch.nn.Embedding, torch.nn.EmbeddingBag)) and self.sparse_gradients_enabled():
self.sparse_tensor_module_names.add(name + ".weight")
logger.info("Will convert {} to sparse tensor during training".format(name))
self.save_non_zero_checkpoint = False
self.save_zero_checkpoint = False
if not isinstance(self.optimizer, DeepSpeedZeRoOffload):
self._configure_checkpointing(dist_init_required)
if self.eigenvalue_enabled():
self.eigenvalue = self._configure_eigenvalue()
if self.pld_enabled():
self.progressive_layer_drop = self._configure_progressive_layer_drop()
if self.curriculum_enabled_legacy():
self.curriculum_scheduler_legacy = self._configure_curriculum_scheduler_legacy()
if self.random_ltd_enabled():
random_ltd_config = self.random_ltd_config()
random_ltd_config[RANDOM_LTD_GLOBAL_BATCH_SIZE] = self.train_batch_size()
random_ltd_config[RANDOM_LTD_MICRO_BATCH_SIZE] = self.train_micro_batch_size_per_gpu()
self.random_ltd_scheduler = self._configure_random_ltd_scheduler(random_ltd_config)
# Engine timers
self.engine_timers = EngineTimers(enable_micro_timers=self.wall_clock_breakdown(),
enable_global_timers=self.wall_clock_breakdown()
or self.flops_profiler_enabled())
if self.global_rank == 0:
self._config.print("DeepSpeedEngine configuration")
if self.dump_state():
print_configuration(self, "DeepSpeedEngine")
# Load pre-installed or JIT compile (un)flatten ops
util_ops = UtilsBuilder().load()
self.flatten = util_ops.flatten
self.unflatten = util_ops.unflatten
def destroy(self):
if self.optimizer is not None and hasattr(self.optimizer, 'destroy'):
self.optimizer.destroy()
def _get_model_parameters(self):
if self.autotuning_profile_model_info():
self.autotuning_model_info = {}
num_params = 0
trainable_num_params = 0
for p in self.module.parameters():
# since user code might call deepspeed.zero.Init() before deepspeed.initialize(), need to check the attrbuite to check if the parameter is partitioned in zero 3 already or not
n = 0
if hasattr(p, "ds_tensor"): # if the parameter is partitioned in zero 3
n += p.ds_numel
else: # if the parameter is not partitioned in zero 3 yet
n += p.numel()
num_params += n
if p.requires_grad:
trainable_num_params += n
if self.global_rank == 0:
self.autotuning_model_info["num_params"] = num_params * self.mp_world_size
self.autotuning_model_info["trainable_num_params"] = trainable_num_params * self.mp_world_size
logger.info(f"model parameter = {num_params}")
def get_batch_info(self):
"""Get all training batch related settings.
Returns:
train_batch_size (int): The effective training batch size. This is the amount of data
samples that leads to one step of model update.
train_micro_batch_size_per_gpu (int): Batch size to be processed by one GPU in one
step (without gradient accumulation).
gradient_accumulation_steps (int): Number of training steps to accumulate gradients
before averaging and applying them.
"""
return (
self.train_batch_size,
self.train_micro_batch_size_per_gpu,
self.gradient_accumulation_steps,
)
def set_train_batch_size(self, train_batch_size):
"""Adjust the global batch size by increasing or decreasing the number of
micro-batches (i.e., gradient accumulation steps). The size of each micro-batch
(i.e., ``train_micro_batch_size_per_gpu``) is not changed.
Args:
train_batch_size (int): The new global batch size for training.
Raises:
ValueError: if ``train_batch_size`` is not divisible by the
configured micro-batch size and data parallelism.
"""
if train_batch_size % (self.train_micro_batch_size_per_gpu() * self.dp_world_size) != 0:
#print(f'{train_batch_size=} {self.train_micro_batch_size_per_gpu()=} {self.dp_world_size=}')
raise ValueError(f'Train batch size must be divisible by micro-batch data parallelism')
new_gas = train_batch_size // (self.train_micro_batch_size_per_gpu() * self.dp_world_size)
# overwrite config
self._config.train_batch_size = train_batch_size
self._config.gradient_accumulation_steps = new_gas
def set_data_post_process_func(self, post_process_func):
if self.training_dataloader is not None:
self.training_dataloader.post_process_func = post_process_func
def set_custom_curriculum_learning_schedule(self, schedule_func_dict):
if self.training_dataloader is not None and self.curriculum_learning_enabled():
self.training_dataloader.data_sampler.set_custom_curriculum_learning_schedule(schedule_func_dict)
def get_global_grad_norm(self) -> float:
"""Return the 2-norm of all gradients. If there is model parallelism,
the norm will be global.
The computed norm will be cached and reused until the next step() pass.
.. note::
In the presence of model parallelism, this is a collective call
and acts as a barrier among ``mpu.get_model_parallel_group()``.
Returns:
float: norm
"""
return self._global_grad_norm
def __getattr__(self, name):
"""
Pass through attributes defined in the model if they are not overridden by ds-engine.
"""
_module = {}
if "module" in self.__dict__:
_module = self.__dict__['module']
if name in dir(self):
return getattr(self, name)
elif name in dir(_module):
return getattr(_module, name)
else:
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
def checkpoint_tag_validation_enabled(self):
return self._config.checkpoint_tag_validation_enabled
def checkpoint_tag_validation_fail(self):
return self._config.checkpoint_tag_validation_fail
def elasticity_enabled(self):
return self._config.elasticity_enabled
def is_elastic_model_parallel_supported(self):
if self.elasticity_enabled():
# Add code for finding number of GPUs per node automatically
if self._config.num_gpus_per_node % self._config.elastic_model_parallel_size == 0:
return True
else:
return False
def pld_enabled(self):
return self._config.pld_enabled
def pld_params(self):
return self._config.pld_params
def pld_theta(self):
return self.pld_params()[PLD_THETA]
def pld_gamma(self):
return self.pld_params()[PLD_GAMMA]
def eigenvalue_enabled(self):
return self._config.eigenvalue_enabled
def eigenvalue_verbose(self):
return self._config.eigenvalue_verbose
def eigenvalue_max_iter(self):
return self._config.eigenvalue_max_iter
def eigenvalue_tol(self):
return self._config.eigenvalue_tol
def eigenvalue_stability(self):
return self._config.eigenvalue_stability
def eigenvalue_gas_boundary_resolution(self):
return self._config.eigenvalue_gas_boundary_resolution
def eigenvalue_layer_name(self):
return self._config.eigenvalue_layer_name
def eigenvalue_layer_num(self):
return self._config.eigenvalue_layer_num
def curriculum_enabled_legacy(self):
return self._config.curriculum_enabled_legacy
def curriculum_params_legacy(self):
return self._config.curriculum_params_legacy
def data_efficiency_enabled(self):
return self._config.data_efficiency_enabled
def data_efficiency_config(self):
return self._config.data_efficiency_config
def data_sampling_enabled(self):
return self._config.data_efficiency_config[DATA_SAMPLING][DATA_SAMPLING_ENABLED]
def data_sampling_config(self):
return self._config.data_efficiency_config[DATA_SAMPLING]
def curriculum_learning_enabled(self):
return self._config.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]
def curriculum_learning_config(self):
return self._config.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING]
def random_ltd_enabled(self):
return self._config.data_efficiency_config[DATA_ROUTING][RANDOM_LTD][RANDOM_LTD_ENABLED]
def random_ltd_config(self):
return self._config.data_efficiency_config[DATA_ROUTING][RANDOM_LTD]
def random_ltd_initialize(self):
assert self.random_ltd_enabled()
random_ltd_config = self.random_ltd_config()
random_ltd_queue = deque([x for x in sorted(random_ltd_config[RANDOM_LTD_LAYER_ID])])
count = 0
for name, layer in self.module.named_modules():
if isinstance(layer, RandomLayerTokenDrop):
if len(random_ltd_queue) != 0 and str(random_ltd_queue[0]) in name: ###[1,2,3]
layer.init_config(random_ltd_config, self.random_ltd_scheduler, count)
random_ltd_queue.popleft()
count += 1
if random_ltd_config[RANDOM_LTD_LAYER_NUM] != count:
raise ValueError(f'random_ltd_layer_num {random_ltd_config[RANDOM_LTD_LAYER_NUM]} must be \
equivalent to the len of random_ltd_layer_id {count}')
if random_ltd_config[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][RANDOM_LTD_LAYER_TOKEN_LR_ENABLED]:
assert self.client_lr_scheduler is None
raise ValueError(f'not yet support')
#self.lr_scheduler = lr_schedules.WarmupLayerTokenDecayLR(self.optimizer, self.random_ltd_scheduler)
def wall_clock_breakdown(self):
return self._config.wall_clock_breakdown
def flops_profiler_enabled(self):
return self._config.flops_profiler_config.enabled or self.autotuning_enabled()
def flops_profiler_profile_step(self):
step = self._config.flops_profiler_config.profile_step
if self._config.autotuning_config.enabled:
step = self.autotuning_start_profile_step()
return step
def flops_profiler_module_depth(self):
return self._config.flops_profiler_config.module_depth
def flops_profiler_top_modules(self):
return self._config.flops_profiler_config.top_modules
def flops_profiler_detailed(self):
if self._config.autotuning_config.enabled:
return False
return self._config.flops_profiler_config.detailed
def flops_profiler_output_file(self):
return self._config.flops_profiler_config.output_file
def memory_breakdown(self):
return self._config.memory_breakdown
def autotuning_enabled(self):
return self._config.autotuning_config.enabled
def autotuning_start_profile_step(self):
return self._config.autotuning_config.start_profile_step
def autotuning_end_profile_step(self):
return self._config.autotuning_config.end_profile_step
def autotuning_metric_path(self):
path = self._config.autotuning_config.metric_path
if not path:
path = os.path.join(os.getcwd(), "autotuning_metric.json")
return path
def autotuning_model_info_path(self):
path = self._config.autotuning_config.model_info_path
if not path:
path = os.path.join(os.getcwd(), "autotuning_model_info.json")
return path
def autotuning_metric(self):
return self._config.autotuning_config.metric
def autotuning_profile_model_info(self):
return self.autotuning_enabled(
) and self._config.autotuning_config.model_info and self._config.autotuning_config.model_info.get(
"profile", False)
def sparse_gradients_enabled(self):
return self._config.sparse_gradients_enabled
def train_batch_size(self):
return self._config.train_batch_size
def train_micro_batch_size_per_gpu(self):
return self._config.train_micro_batch_size_per_gpu
def optimizer_name(self):
return (self.client_optimizer.__class__.__name__ if self.client_optimizer else self._config.optimizer_name)
def optimizer_params(self):
return self._config.optimizer_params
def optimizer_legacy_fusion(self):
return self._config.optimizer_legacy_fusion
def scheduler_name(self):
return self._config.scheduler_name
def scheduler_params(self):
return self._config.scheduler_params
def quantize_training(self):
return (
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS]
[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_ENABLED],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_GROUPS],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS]
[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_CHANGE_RATIO],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_TYPE],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_ROUNDING],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_VERBOSE],
self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_KERNEL],
)
def zero_optimization(self):
return self._config.zero_enabled
def zero_allow_untested_optimizer(self):
return self._config.zero_allow_untested_optimizer
def zero_force_ds_cpu_optimizer(self):
return self._config.zero_force_ds_cpu_optimizer
def zero_reduce_scatter(self):
return self._config.zero_config.reduce_scatter
def zero_overlap_comm(self):
return self._config.zero_config.overlap_comm
def zero_offload_optimizer(self):
return self._config.zero_config.offload_optimizer
def zero_offload_param(self):
return self._config.zero_config.offload_param
def zero_use_cpu_optimizer(self):
if self._config.zero_config.offload_optimizer is not None:
return self._config.zero_config.offload_optimizer.device in [OffloadDeviceEnum.cpu, OffloadDeviceEnum.nvme]
return False
def zero_cpu_offload(self):
if self._config.zero_config.offload_optimizer is not None:
return self._config.zero_config.offload_optimizer.device == OffloadDeviceEnum.cpu
return False
def zero_sub_group_size(self):
return self._config.zero_config.sub_group_size
def zero_optimization_stage(self):
return self._config.zero_optimization_stage
def mics_shard_size(self):
return self._config.mics_shard_size
def zero_reduce_bucket_size(self):
return self._config.zero_config.reduce_bucket_size
def zero_allgather_bucket_size(self):
return self._config.zero_config.allgather_bucket_size
def zero_optimization_partition_gradients(self):
return self.zero_optimization_stage() >= ZeroStageEnum.gradients
def zero_optimization_partition_weights(self):
return self.zero_optimization_stage() >= ZeroStageEnum.weights
def zero_contiguous_gradients(self):
return self._config.zero_config.contiguous_gradients
def zero_load_from_fp32_weights(self):
return self._config.zero_config.load_from_fp32_weights
def zero_elastic_checkpoint(self):
return self._config.zero_config.elastic_checkpoint
def zero_max_live_parameters(self):
return self._config.zero_config.max_live_parameters
def zero_max_reuse_distance(self):
return self._config.zero_config.max_reuse_distance
def zero_prefetch_bucket_size(self):
return self._config.zero_config.prefetch_bucket_size
def zero_param_persistence_threshold(self):
return self._config.zero_config.param_persistence_threshold
def zero_model_persistence_threshold(self):
return self._config.zero_config.model_persistence_threshold
def zero_gather_16bit_weights_on_model_save(self):
return self._config.zero_config.gather_16bit_weights_on_model_save
def zero_grad_hooks(self):
return self._config.zero_config.grad_hooks
def zero_legacy_stage1(self):
return self._config.zero_config.legacy_stage1
def zero_ignore_unused_parameters(self):
return self._config.zero_config.ignore_unused_parameters
def fp16_enabled(self):
return self._config.fp16_enabled
def bfloat16_enabled(self):
return self._config.bfloat16_enabled
def fp16_master_weights_and_gradients(self):
return self._config.fp16_master_weights_and_gradients
def amp_enabled(self):
return self._config.amp_enabled
def amp_params(self):
return self._config.amp_params
def fp16_auto_cast(self):
return self._config.fp16_auto_cast
def loss_scale(self):
return self._config.loss_scale
def gradient_accumulation_steps(self):
return self._config.gradient_accumulation_steps
def use_node_local_storage(self):
return self._config.use_node_local_storage
def load_universal_checkpoint(self):
return self._config.load_universal_checkpoint
@property
def communication_data_type(self):
res = self._config.communication_data_type
if res is not None:
return res
if self.fp16_enabled():
return torch.float16
if self.bfloat16_enabled():
return torch.bfloat16
return torch.float32
def postscale_gradients(self):
return not self._config.prescale_gradients
def gradient_predivide_factor(self):
return self._config.gradient_predivide_factor
def steps_per_print(self):
return self._config.steps_per_print
def zero_allgather_partitions(self):
return self._config.zero_config.allgather_partitions
def zero_round_robin_gradients(self):
return self._config.zero_config.round_robin_gradients
def dump_state(self):
return self._config.dump_state
def gradient_clipping(self):
return self._config.gradient_clipping
def dynamic_loss_scale(self):
return self._config.loss_scale == 0
def initial_dynamic_scale(self):
return self._config.initial_dynamic_scale
def dynamic_loss_scale_args(self):
return self._config.dynamic_loss_scale_args
def swap_tensor_config(self):
return self._config.swap_tensor_config
def aio_config(self):
return self._config.aio_config
def get_data_types(self):
model_dtype = torch.float32
if self.fp16_enabled():
model_dtype = torch.float16
elif self.bfloat16_enabled():
model_dtype = torch.bfloat16
if self._config.grad_accum_dtype == None:
if model_dtype == torch.bfloat16 and not self.zero_optimization():
grad_accum_dtype = torch.float32
else:
grad_accum_dtype = model_dtype
else:
grad_accum_dtype = DtypeEnum(self._config.grad_accum_dtype).value
return (model_dtype, grad_accum_dtype)
def _configure_lr_scheduler(self, client_lr_scheduler):
# First check for scheduler in json configuration
lr_scheduler = self._scheduler_from_config(self.optimizer)
if lr_scheduler:
log_dist(f"DeepSpeed using configured LR scheduler = {self.scheduler_name()}", ranks=[0])
self.lr_scheduler = lr_scheduler
else:
if isinstance(client_lr_scheduler, Callable):
log_dist('DeepSpeed using client callable to create LR scheduler', ranks=[0])
self.lr_scheduler = client_lr_scheduler(self.basic_optimizer)
else:
log_dist('DeepSpeed using client LR scheduler', ranks=[0])
self.lr_scheduler = client_lr_scheduler
log_dist(f'DeepSpeed LR Scheduler = {self.lr_scheduler}', ranks=[0])
def _configure_checkpointing(self, dist_init_required):
self.checkpoint_engine = TorchCheckpointEngine()
if self._config is not None and self._config.nebula_config.enabled:
try:
from deepspeed.runtime.checkpoint_engine.nebula_checkpoint_engine import \
NebulaCheckpointEngine
self.checkpoint_engine = NebulaCheckpointEngine(config_params=self._config.nebula_config)
except ImportError as err:
logger.error(f"No torch_nebula was found! Will fall back to torch.save. Details: {err}")
self.checkpoint_engine = TorchCheckpointEngine()
dp_rank = self.global_rank
if self.mpu:
dp_rank = self.mpu.get_data_parallel_rank()
rank = self.local_rank if self.use_node_local_storage() else dp_rank
# only the first data parallel process needs to store the model checkpoint
# if you want to use node local storage this must be done by rank 0 on each
# node
self.save_non_zero_checkpoint = (rank == 0) or self.zero_optimization_partition_weights()
if self.zero_optimization() or self.bfloat16_enabled():
param_rank = dist.get_rank(group=self.optimizer.dp_process_group)
# Only the first parameter parallel process needs to store the
# optimizer state checkpoints for zero
self.save_zero_checkpoint = param_rank == dp_rank
def _scheduler_from_config(self, optimizer):
scheduler_name = self.scheduler_name()
if scheduler_name is not None:
if hasattr(lr_schedules, scheduler_name):
scheduler = getattr(lr_schedules, scheduler_name)
else:
assert hasattr(torch.optim.lr_scheduler,
scheduler_name), f"DeepSpeed does not recognize LR scheduler {scheduler_name}"
scheduler = getattr(torch.optim.lr_scheduler, scheduler_name)
scheduler_params = self.scheduler_params()
instantiated_scheduler = scheduler(optimizer, **scheduler_params)
return instantiated_scheduler
else:
return None
def _set_distributed_vars(self, args):
device_rank = args.device_rank if args is not None and hasattr(args, 'device_rank') else self.local_rank
if device_rank >= 0:
get_accelerator().set_device(device_rank)
self.device = torch.device(get_accelerator().device_name(), device_rank)
self.world_size = dist.get_world_size()
self.global_rank = dist.get_rank()
else:
self.world_size = 1
self.global_rank = 0
self.device = torch.device(get_accelerator().device_name())
# Configure based on command line arguments
def _configure_with_arguments(self, args, mpu):
# After the distributed backend is initialized we are guaranteed the LOCAL_RANK
# environment variable is set. We must align args.local_rank to this value for
# backwards compatibility with scripts relying on [args|self].local_rank containing
# the correct local rank info. _do_args_sanity_check will ensure this is the case.
if "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ:
ompi_local_rank = os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK")
local_rank = os.environ.get('LOCAL_RANK', ompi_local_rank)
assert ompi_local_rank == local_rank, f"LOCAL_RANK ({local_rank}) != OMPI_COMM_WORLD_LOCAL_RANK ({ompi_local_rank}), " \
"not sure how to proceed as we're seeing conflicting local rank info."
os.environ['LOCAL_RANK'] = local_rank
self.local_rank = int(os.environ['LOCAL_RANK'])
if hasattr(args, 'local_rank'):
args.local_rank = self.local_rank
# Validate command line arguments
def _do_args_sanity_check(self, args):
assert "LOCAL_RANK" in os.environ or "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ, "DeepSpeed requires the LOCAL_RANK environment " \
"variable, it is set by the deepspeed launcher, deepspeed.init_distributed, or the torch's launcher. If using a " \
"different launcher please ensure LOCAL_RANK is set prior to initializing deepspeed."
if hasattr(args, 'local_rank') and args.local_rank != None:
assert isinstance(args.local_rank,
int), f"args.local_rank of {args.local_rank} is an unknown type {type(args.local_rank)}"
if args.local_rank >= 0:
env_local_rank = int(os.environ.get("LOCAL_RANK"))
assert (
env_local_rank == args.local_rank
), f"Mismatch in local rank setting, args.local_rank={args.local_rank} but env['LOCAL_RANK']={env_local_rank}."
def _is_supported_optimizer(self, optimizer_name):
return (optimizer_name in DEEPSPEED_OPTIMIZERS or getattr(torch.optim, optimizer_name, None) is not None)
def _supported_optims(self):
FairseqOptimizer = None
try:
from fairseq.optim.fairseq_optimizer import FairseqOptimizer
except ImportError:
pass
expected_optim_types = [Optimizer]
if FairseqOptimizer:
# fairseq optims are not torch.optim objects
expected_optim_types.append(FairseqOptimizer)
return expected_optim_types
# Validate configuration based on command line arguments
def _do_sanity_check(self):
expected_optim_types = self._supported_optims()
expected_optim_types += [type(None), Callable]
assert isinstance(self.client_optimizer, tuple(expected_optim_types)), \
f'Client Optimizer is of unexpected type {type(self.client_optimizer)}'
if not self.client_optimizer:
if self.optimizer_name() is not None:
assert self._is_supported_optimizer(
self.optimizer_name()), "{} is not a supported DeepSpeed Optimizer".format(self.optimizer_name())
if (self.optimizer_name() == LAMB_OPTIMIZER or self.optimizer_name() == ONEBIT_LAMB_OPTIMIZER):
assert (self.dynamic_loss_scale()), "DeepSpeed {} optimizer requires dynamic loss scaling".format(
self.optimizer_name())
# Detect invalid combinations of client optimizer and client scheduler
if isinstance(self.client_lr_scheduler, _LRScheduler):
assert isinstance(self.client_optimizer, Optimizer), \
f'Client Optimizer (type = {type(self.client_optimizer)} is not instantiated but Client LR Scheduler is instantiated'
def _broadcast_model(self):
def is_replicated(p):
if hasattr(p, "ds_status") and p.ds_status is not ZeroParamStatus.AVAILABLE:
return False
return True
for p in self.module.parameters():
# Broadcast the model for different parameters
if is_moe_param(p):
if torch.is_tensor(p) and is_replicated(p):
dist.broadcast(p,
groups._get_expert_broadcast_src_rank(p.group_name),
group=self.expert_data_parallel_group[p.group_name])
else:
if torch.is_tensor(p) and is_replicated(p):
dist.broadcast(p, groups._get_broadcast_src_rank(), group=self.data_parallel_group)
@staticmethod
def __check_params(model: Module, dtype: torch.dtype) -> None:
return
if not all(param.dtype == dtype for param in model.parameters()) and dist.get_rank() == 0:
raise ValueError(f"{dtype} is enabled but the following parameters have dtype that is "
f"not {dtype}: "
f"{[(n, p.dtype) for n, p in model.named_parameters() if p.dtype != dtype]}")
def _set_client_model(self, model):
# register client model in _modules so that nn.module methods work correctly
modules = self.__dict__.get('_modules')
modules['module'] = model
# register module attribute in engine but avoid getattr
self.__dict__['module'] = model
def _configure_distributed_model(self, model):
self._set_client_model(model)
if self.fp16_enabled():
if self.zero_optimization_partition_weights() and any(
[hasattr(param, "ds_id") for param in self.module.parameters()]):
self.__check_params(self.module, torch.half)
self.module.half()
elif self.bfloat16_enabled():
if self.zero_optimization_partition_weights() and any(
hasattr(param, 'ds_id') for param in self.module.parameters()):
self.__check_params(self.module, torch.bfloat16)
self.module.bfloat16()
else:
self.__check_params(self.module, torch.float)
if not self.dont_change_device:
self.module.to(self.device)
# MoE related initialization
for _, module in self.module.named_modules():
if isinstance(module, MoE):
self.has_moe_layers = True
self.num_experts.append(module.num_experts)
if self.has_moe_layers:
for _, module in self.module.named_modules():
if isinstance(module, TopKGate):
self.gate_modules.append(module)
if self.wall_clock_breakdown():
module.wall_clock_breakdown = True
if isinstance(module, MOELayer):
self.moe_layers.append(module)
if self.wall_clock_breakdown():
module.wall_clock_breakdown = True
# Pass the mpu from here to groups. For subsequent use, just query groups
if self.mpu is not None:
groups.mpu = self.mpu
# Set deepspeed parallelism spec. for the model including expert parallelism
for _, module in self.module.named_modules():
if hasattr(module, 'set_deepspeed_parallelism'):
module.set_deepspeed_parallelism()
# Query the groups module to get information about various parallel groups
self.data_parallel_group = groups._get_data_parallel_group()
self.dp_world_size = groups._get_data_parallel_world_size()
self.mp_world_size = groups._get_model_parallel_world_size()
self.expert_parallel_group = groups._get_expert_parallel_group_dict()
self.expert_data_parallel_group = groups._get_expert_data_parallel_group_dict()
if not self.amp_enabled():
self._broadcast_model()
# check if parameters are duplicated in optimizer param_groups
def _check_for_duplicates(self, optimizer):
for name, param in self.module.named_parameters():
param_id = id(param)
def ids_list(group):
return [id(param) for param in group]
occurrence = sum([
ids_list(group['params']).count(param_id) if param_id in ids_list(group['params']) else 0
for group in optimizer.param_groups
])
assert occurrence <= 1, f"Parameter with name: {name} occurs multiple times in optimizer.param_groups. Make sure it only appears once to prevent undefined behaviour."
def _do_optimizer_sanity_check(self, basic_optimizer):
model_dtype, grad_accum_dtype = self.get_data_types()
zero_enabled = self.zero_optimization()
amp_enabled = self.amp_enabled()
# config based assertions
assert (
not (amp_enabled and zero_enabled)
), "Amp and ZeRO are not currently compatible, please use (legacy) fp16 mode which performs similar to amp opt_mode=O2"
if zero_enabled:
if not is_zero_supported_optimizer(basic_optimizer):
assert (
self.zero_allow_untested_optimizer()
), 'You are using an untested ZeRO Optimizer. Please add <"zero_allow_untested_optimizer": true> in the configuration file to use it.'
if self.global_rank == 0:
logger.warning("**** You are using ZeRO with an untested optimizer, proceed with caution *****")
if model_dtype == torch.bfloat16 and grad_accum_dtype == torch.float32 and self.zero_optimization_stage(
) == 1:
return BFLOAT16
if model_dtype != grad_accum_dtype:
raise NotImplementedError(
"Model data type and gradient accumulation data type must be equal to use ZeRO")
return ZERO_OPTIMIZATION
elif amp_enabled:
if model_dtype != grad_accum_dtype:
raise NotImplementedError(
"Model data type and gradient accumulation data type must be equal to use Amp")
if model_dtype == torch.bfloat16 or model_dtype == torch.float16:
raise NotImplementedError("Cannot enable both amp with (legacy) fp16 or bfloat16 mode")
try:
logger.info("Initializing Apex amp from: {}".format(amp.__path__))
except NameError:
# If apex/amp is available it will be imported above
raise RuntimeError("Unable to import apex/amp, please make sure it is installed")
return AMP
# data type checks
elif model_dtype == grad_accum_dtype:
if model_dtype == torch.bfloat16:
raise NotImplementedError(
"Bfloat16 wrapper must use a gradient accumulation type of fp32, enable ZeRO to use Bfloat16 gradient accumulation"
)
if model_dtype == torch.float16:
return FP16
# else optimizer_wrapper = None
elif model_dtype == torch.bfloat16 and grad_accum_dtype == torch.float32:
return BFLOAT16
else:
raise NotImplementedError("unsupported mix of model dtype and gradient accummulation type")
return None
# Configure optimizer
def _configure_optimizer(self, client_optimizer, model_parameters):
if client_optimizer is not None:
if isinstance(client_optimizer, tuple(self._supported_optims())):
client_optimizer.param_groups[:] = [
pg for pg in client_optimizer.param_groups if len(pg["params"]) != 0
]
log_dist("Removing param_group that has no 'params' in the client Optimizer", ranks=[0])
basic_optimizer = client_optimizer
log_dist('Using client Optimizer as basic optimizer', ranks=[0])
else:
basic_optimizer = client_optimizer(model_parameters)
log_dist('Using client callable to create basic optimizer', ranks=[0])
if self.zero_use_cpu_optimizer() and not isinstance(basic_optimizer, deepspeed.ops.adam.DeepSpeedCPUAdam):
if self.zero_force_ds_cpu_optimizer():
msg = f'You are using ZeRO-Offload with a client provided optimizer ({type(basic_optimizer)}) which in most cases will yield poor performance. Please either use deepspeed.ops.adam.DeepSpeedCPUAdam or set an optimizer in your ds-config (https://www.deepspeed.ai/docs/config-json/#optimizer-parameters). If you really want to use a custom optimizer w. ZeRO-Offload and understand the performance impacts you can also set <"zero_force_ds_cpu_optimizer": false> in your configuration file.'
raise ZeRORuntimeException(msg)
else:
basic_optimizer = self._configure_basic_optimizer(model_parameters)
log_dist(f"Using DeepSpeed Optimizer param name {self.optimizer_name()} as basic optimizer", ranks=[0])
self._check_for_duplicates(basic_optimizer)
self.basic_optimizer = basic_optimizer
log_dist("DeepSpeed Basic Optimizer = {}".format(basic_optimizer.__class__.__name__), ranks=[0])
optimizer_wrapper = self._do_optimizer_sanity_check(basic_optimizer)
if optimizer_wrapper == ZERO_OPTIMIZATION:
self.optimizer = self._configure_zero_optimizer(basic_optimizer)
elif optimizer_wrapper == AMP:
amp_params = self.amp_params()
log_dist(f"Initializing AMP with these params: {amp_params}", ranks=[0])
model, self.optimizer = amp.initialize(self.module, basic_optimizer, **amp_params)
self._set_client_model(model)
self._broadcast_model()
# TODO: maybe need to broadcast experts differently?
elif optimizer_wrapper == FP16:
self.optimizer = self._configure_fp16_optimizer(basic_optimizer)
elif optimizer_wrapper == BFLOAT16:
self.optimizer = self._configure_bf16_optimizer(basic_optimizer)
else:
self.optimizer = basic_optimizer
log_dist("DeepSpeed Final Optimizer = {}".format(self.optimizer_name()), ranks=[0])
self.compression_scheduler = self._configure_compression_scheduler()
self.quantizer = self._configure_quantization()
def _configure_basic_optimizer(self, model_parameters):
optimizer_parameters = self.optimizer_params()
if optimizer_parameters is None:
optimizer_parameters = {}
# print(optimizer_parameters.keys())
if "max_grad_norm" in optimizer_parameters.keys():
raise ValueError(
"'max_grad_norm' is not supported as an optimizer parameter, please switch to using the deepspeed parameter 'gradient_clipping' see: https://www.deepspeed.ai/docs/config-json/#gradient-clipping for more details"
)
if self.optimizer_name() in [ADAM_OPTIMIZER, ADAMW_OPTIMIZER]:
torch_adam = optimizer_parameters.pop(TORCH_ADAM_PARAM, False)
adam_w_mode = optimizer_parameters.pop(ADAM_W_MODE, ADAM_W_MODE_DEFAULT)
# Optimizer name of Adam forces AdamW logic unless adam_w_mode is explicitly set
effective_adam_w_mode = self.optimizer_name() == ADAMW_OPTIMIZER or adam_w_mode
if torch_adam:
if not effective_adam_w_mode:
optimizer = torch.optim.Adam(model_parameters, **optimizer_parameters)
else:
optimizer = torch.optim.AdamW(model_parameters, **optimizer_parameters)
else:
if self.zero_use_cpu_optimizer():
from deepspeed.ops.adam import DeepSpeedCPUAdam
optimizer = DeepSpeedCPUAdam(model_parameters,
**optimizer_parameters,
adamw_mode=effective_adam_w_mode)
else:
from deepspeed.ops.adam import FusedAdam
optimizer = FusedAdam(
model_parameters,
**optimizer_parameters,
adam_w_mode=effective_adam_w_mode,
)
elif self.optimizer_name() == ADAGRAD_OPTIMIZER:
if self.zero_use_cpu_optimizer():
from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad
optimizer = DeepSpeedCPUAdagrad(model_parameters, **optimizer_parameters)
else:
optimizer = torch.optim.Adagrad(model_parameters, **optimizer_parameters)
elif self.optimizer_name() == LAMB_OPTIMIZER:
from deepspeed.ops.lamb import FusedLamb
optimizer = FusedLamb(model_parameters, **optimizer_parameters)
elif self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER:
assert not self.zero_optimization(), "1bit-Adam is not compatible with ZeRO"
from deepspeed.runtime.fp16.onebit.adam import OnebitAdam
optimizer = OnebitAdam(model_parameters, self, **optimizer_parameters)
if not self.fp16_enabled():
logger.warning(f"Currently the convergence of 1-bit Adam is only verified under FP16")
elif self.optimizer_name() == ZERO_ONE_ADAM_OPTIMIZER:
assert not self.zero_optimization(), "0/1 Adam is not compatible with ZeRO"
from deepspeed.runtime.fp16.onebit.zoadam import ZeroOneAdam
optimizer = ZeroOneAdam(model_parameters, self, **optimizer_parameters)
if not self.fp16_enabled():
logger.warning(f'Currently the convergence of 0/1 Adam is only verified under FP16')
elif self.optimizer_name() == ONEBIT_LAMB_OPTIMIZER:
assert not self.zero_optimization(), "1bit-Lamb is not compatible with ZeRO"
from deepspeed.runtime.fp16.onebit.lamb import OnebitLamb
optimizer = OnebitLamb(model_parameters, self, **optimizer_parameters)
if not self.fp16_enabled():
logger.warning(f"Currently the convergence of 1-bit Lamb is only verified under FP16")
else:
torch_optimizer = getattr(torch.optim, self.optimizer_name())
optimizer = torch_optimizer(model_parameters, **optimizer_parameters)
return optimizer
def _configure_compression_scheduler(self):
return compression_scheduler(self.module, self._config.compression_config)
def _configure_random_ltd_scheduler(self, configs):
return RandomLTDScheduler(configs)
def _configure_quantization(self):
(
quantize_weight_in_forward,
quantize_enabled,
q_groups,
q_mixed_fp16,
q_change_ratio,
q_type,
q_rounding,
q_verbose,
use_quantizer_kernel,
) = self.quantize_training()
if quantize_enabled and not quantize_weight_in_forward:
assert self.fp16_enabled(
), "MoQ (quantize in optimization step) weight quantization is only supported for FP16"
quantizer = None
if quantize_enabled and not quantize_weight_in_forward:
from deepspeed.runtime.quantize import Quantizer
quantizer = Quantizer(
q_groups,
q_mixed_fp16,
q_change_ratio,
q_type,
q_rounding,
q_verbose,
self.eigenvalue_enabled(),
use_quantizer_kernel,
self.eigenvalue_layer_num() if self.eigenvalue_enabled() else 0,
)
return quantizer
def _configure_fp16_optimizer(self, optimizer):
initial_dynamic_scale = self.initial_dynamic_scale()
dynamic_loss_args = self.dynamic_loss_scale_args()
clip_grad = self.gradient_clipping()
if APEX_INSTALLED:
fused_opts = (apex.optimizers.FusedAdam, FusedAdam)
else:
fused_opts = FusedAdam
if isinstance(optimizer, fused_opts) \
or self.optimizer_name() in [ONEBIT_ADAM_OPTIMIZER, ZERO_ONE_ADAM_OPTIMIZER]:
if self.dynamic_loss_scale():
log_dist(f'Creating fp16 optimizer with dynamic loss scale', ranks=[0])
timers = self.timers if self.wall_clock_breakdown() else None
optimizer = FP16_Optimizer(
optimizer,
deepspeed=self,
dynamic_loss_scale=True,
initial_dynamic_scale=initial_dynamic_scale,
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion(),
timers=timers,
has_moe_layers=self.has_moe_layers,
)
else:
log_dist(f'Creating fp16 optimizer with static loss scale: {self.loss_scale()}', ranks=[0])
optimizer = FP16_Optimizer(
optimizer,
deepspeed=self,
static_loss_scale=self.loss_scale(),
mpu=self.mpu,
clip_grad=clip_grad,
fused_adam_legacy=self.optimizer_legacy_fusion(),
has_moe_layers=self.has_moe_layers,
)
else:
log_dist(f'Creating fp16 unfused optimizer with dynamic loss scale', ranks=[0])
optimizer = FP16_UnfusedOptimizer(
optimizer,
deepspeed=self,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=dynamic_loss_args,
mpu=self.mpu,
clip_grad=clip_grad,
fused_lamb_legacy=self.optimizer_name() == LAMB_OPTIMIZER,
)
return optimizer
def _configure_bf16_optimizer(self, optimizer):
clip_grad = self.gradient_clipping()
if optimizer is None:
optimizer = DummyOptim(list(self.module.parameters()))
log_dist('Creating BF16 optimizer', ranks=[0])
timers = self.timers if self.wall_clock_breakdown() else None
optimizer = BF16_Optimizer(optimizer,
self.param_names,
mpu=self.mpu,
clip_grad=clip_grad,
allgather_bucket_size=self.zero_allgather_bucket_size(),
dp_process_group=self.data_parallel_group,
timers=timers)
return optimizer
def _configure_zero_optimizer(self, optimizer):
zero_stage = self.zero_optimization_stage()
mics_shard_size = self.mics_shard_size()
model_dtype, grad_accum_dtype = self.get_data_types()
timers = self.timers if self.wall_clock_breakdown() else None
if optimizer is None:
optimizer = DummyOptim(list(self.module.parameters()))
if self.zero_legacy_stage1():
raise Exception(
"The deprecated version of ZeRO Stage 1 is not supported in deepspeed >= 0.5.9. Please downgrade to a version less than 0.5.9 if you need to use this deprecated version of ZeRO."
)
if zero_stage <= ZeroStageEnum.gradients:
overlap_comm = self.zero_overlap_comm()
contiguous_gradients = self.zero_contiguous_gradients()
round_robin_gradients = self.zero_round_robin_gradients()
assert not isinstance(optimizer, DummyOptim), "zero stage {} requires an optimizer".format(zero_stage)
log_dist(f'Creating {model_dtype} ZeRO stage {zero_stage} optimizer', ranks=[0])
# Overlap and contiguous grads are meaningless in stage 1 and are ignored
if zero_stage == ZeroStageEnum.optimizer_states:
overlap_comm = False
round_robin_gradients = False
# Non-MoE requires contiguous grads to be disabled w. stage 1
if not self.has_moe_layers:
contiguous_gradients = False
if isinstance(self.module, PipelineModule):
if overlap_comm:
logger.warning("Pipeline parallelism does not support overlapped communication, will be disabled.")
overlap_comm = False
optimizer = DeepSpeedZeroOptimizer(
optimizer,
self.param_names,
timers=timers,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
contiguous_gradients=contiguous_gradients,
reduce_bucket_size=self.zero_reduce_bucket_size(),
allgather_bucket_size=self.zero_allgather_bucket_size(),
dp_process_group=self.data_parallel_group,
expert_parallel_group=self.expert_parallel_group if self.has_moe_layers else None,
expert_data_parallel_group=self.expert_data_parallel_group if self.has_moe_layers else None,
reduce_scatter=self.zero_reduce_scatter(),
overlap_comm=overlap_comm,
cpu_offload=self.zero_cpu_offload(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_accumulation_steps=self.gradient_accumulation_steps(),
ignore_unused_parameters=self.zero_ignore_unused_parameters(),
partition_grads=zero_stage == ZeroStageEnum.gradients,
round_robin_gradients=round_robin_gradients,
has_moe_layers=self.has_moe_layers,
fp16_master_weights_and_gradients=self.fp16_master_weights_and_gradients(),
communication_data_type=self.communication_data_type,
elastic_checkpoint=self.zero_elastic_checkpoint())
elif zero_stage == ZeroStageEnum.weights:
assert not self.has_moe_layers, "MoE not supported with Stage 3"
if isinstance(optimizer, DummyOptim):
log_dist("Creating ZeRO Offload", ranks=[0])
optimizer = DeepSpeedZeRoOffload(self.module,
timers=timers,
ds_config=self.config,
overlap_comm=self.zero_overlap_comm(),
prefetch_bucket_size=self.zero_prefetch_bucket_size(),
max_reuse_distance=self.zero_max_reuse_distance(),
max_live_parameters=self.zero_max_live_parameters(),
param_persistence_threshold=self.zero_param_persistence_threshold(),
model_persistence_threshold=self.zero_model_persistence_threshold(),
offload_param_config=self.zero_offload_param(),
mpu=self.mpu)
else:
log_dist(
f'Creating fp16 ZeRO stage {zero_stage} optimizer,'
f' MiCS is enabled {mics_shard_size>0},'
f' Hierarchical params gather {self._config.mics_hierarchial_params_gather}',
ranks=[0])
if mics_shard_size > 0:
return self._return_mics_optimizer(optimizer, timers)
log_dist(f'Creating {model_dtype} ZeRO stage {zero_stage} optimizer', ranks=[0])
from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3
optimizer = DeepSpeedZeroOptimizer_Stage3(
self.module,
optimizer,
timers=timers,
ds_config=self.config,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
contiguous_gradients=self.zero_contiguous_gradients(),
reduce_bucket_size=self.zero_reduce_bucket_size(),
prefetch_bucket_size=self.zero_prefetch_bucket_size(),
max_reuse_distance=self.zero_max_reuse_distance(),
max_live_parameters=self.zero_max_live_parameters(),
param_persistence_threshold=self.zero_param_persistence_threshold(),
model_persistence_threshold=self.zero_model_persistence_threshold(),
dp_process_group=self.data_parallel_group,
reduce_scatter=self.zero_reduce_scatter(),
overlap_comm=self.zero_overlap_comm(),
offload_optimizer_config=self.zero_offload_optimizer(),
offload_param_config=self.zero_offload_param(),
sub_group_size=self.zero_sub_group_size(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_accumulation_steps=self.gradient_accumulation_steps(),
aio_config=self.aio_config(),
communication_data_type=self.communication_data_type)
else:
raise NotImplementedError("ZeRO stage {} not implemented".format(zero_stage))
return optimizer
def _return_mics_optimizer(self, basic_optimizer, timers):
from deepspeed.runtime.zero.mics import MiCS_Optimizer
optimizer = MiCS_Optimizer(self.module,
basic_optimizer,
timers=timers,
ds_config=self.config,
static_loss_scale=self.loss_scale(),
dynamic_loss_scale=self.dynamic_loss_scale(),
dynamic_loss_args=self.dynamic_loss_scale_args(),
clip_grad=self.gradient_clipping(),
contiguous_gradients=self.zero_contiguous_gradients(),
reduce_bucket_size=self.zero_reduce_bucket_size(),
prefetch_bucket_size=self.zero_prefetch_bucket_size(),
max_reuse_distance=self.zero_max_reuse_distance(),
max_live_parameters=self.zero_max_live_parameters(),
param_persistence_threshold=self.zero_param_persistence_threshold(),
model_persistence_threshold=self.zero_model_persistence_threshold(),
dp_process_group=self.data_parallel_group,
reduce_scatter=self.zero_reduce_scatter(),
overlap_comm=self.zero_overlap_comm(),
offload_optimizer_config=self.zero_offload_optimizer(),
offload_param_config=self.zero_offload_param(),
sub_group_size=self.zero_sub_group_size(),
mpu=self.mpu,
postscale_gradients=self.postscale_gradients(),
gradient_predivide_factor=self.gradient_predivide_factor(),
gradient_accumulation_steps=self.gradient_accumulation_steps(),
aio_config=self.aio_config(),
communication_data_type=self.communication_data_type)
return optimizer
def _configure_eigenvalue(self):
eigenvalue = Eigenvalue(
verbose=self.eigenvalue_verbose(),
max_iter=self.eigenvalue_max_iter(),
tol=self.eigenvalue_tol(),
stability=self.eigenvalue_stability(),
gas_boundary_resolution=self.eigenvalue_gas_boundary_resolution(),
layer_name=self.eigenvalue_layer_name(),
layer_num=self.eigenvalue_layer_num(),
)
return eigenvalue
def _configure_progressive_layer_drop(self):
pld = ProgressiveLayerDrop(theta=self.pld_theta(), gamma=self.pld_gamma())
return pld
def _configure_curriculum_scheduler_legacy(self):
scheduler = CurriculumScheduler(self.curriculum_params_legacy())
return scheduler
@staticmethod
def is_map_style_dataset(obj):
return hasattr(obj, "__getitem__") and hasattr(obj, "__len__")
@staticmethod
def is_iterable_style_dataset(obj):
return isinstance(obj, torch.utils.data.IterableDataset) # hasattr(obj, "__iter__") should work as well
def dataloader_drop_last(self):
return self._config.dataloader_drop_last
def was_step_applied(self) -> bool:
"""Returns True if the latest ``step()`` produced in parameter updates.
Note that a ``False`` return is not an error condition. Steps are frequently
no-ops, such as between gradient accumulation boundaries or when overflows
occur.
Returns:
bool: Whether the latest ``step()`` modified model parameters.
"""
return self._step_applied
def deepspeed_io(self,
dataset,
batch_size=None,
route=ROUTE_TRAIN,
pin_memory=True,
data_sampler=None,
collate_fn=None,
num_local_io_workers=None):
if not (self.is_map_style_dataset(dataset) or self.is_iterable_style_dataset(dataset)):
raise ValueError("Training data must be a torch Dataset")
if batch_size is None:
batch_size = self.train_micro_batch_size_per_gpu()
if collate_fn is None:
collate_fn = self.collate_fn
# Currently we only use timer in train route
deepspeed_io_timer = None
if route == ROUTE_TRAIN:
deepspeed_io_timer = self.tput_timer
# If mpu is provided, forward world size and parallel rank to sampler.
data_parallel_world_size = self.dp_world_size
data_parallel_rank = self.global_rank
if self.mpu is not None:
data_parallel_world_size = self.mpu.get_data_parallel_world_size()
data_parallel_rank = self.mpu.get_data_parallel_rank()
if data_sampler is None and (route == ROUTE_PREDICT or route == ROUTE_EVAL):
data_sampler = torch.utils.data.DistributedSampler(
dataset,
num_replicas=data_parallel_world_size,
rank=data_parallel_rank,
shuffle=False,
)
deepspeed_dataloader_config = {}
if self.curriculum_learning_enabled():
deepspeed_dataloader_config = {
CURRICULUM_LEARNING: self.curriculum_learning_enabled(),
DATA_EFFICIENCY: self.data_efficiency_config(),
DATA_PARALLEL_GROUP: self.data_parallel_group,
GRADIENT_ACCUMULATION_STEPS: self.gradient_accumulation_steps(),
GLOBAL_RANK: self.global_rank,
DATA_SAMPLING_NUM_WORKERS: self.data_sampling_config()[DATA_SAMPLING_NUM_WORKERS]
}
return DeepSpeedDataLoader(dataset=dataset,
batch_size=batch_size,
pin_memory=pin_memory,
collate_fn=collate_fn,
local_rank=self.local_rank,
tput_timer=deepspeed_io_timer,
num_local_io_workers=num_local_io_workers,
data_sampler=data_sampler,
data_parallel_world_size=data_parallel_world_size,
data_parallel_rank=data_parallel_rank,
dataloader_drop_last=self.dataloader_drop_last(),
deepspeed_dataloader_config=deepspeed_dataloader_config)
def train(self, mode=True):
r""""""
self.warn_unscaled_loss = True
self.module.train(mode)
def eval(self):
r""""""
self.warn_unscaled_loss = True
self.module.train(False)
def _scale_loss_by_gas(self, prescaled_loss):
if isinstance(prescaled_loss, torch.Tensor):
scaled_loss = prescaled_loss / self.gradient_accumulation_steps()
elif isinstance(prescaled_loss, tuple) or isinstance(prescaled_loss, list):
scaled_loss = []
for l in prescaled_loss:
if isinstance(l, torch.Tensor):
scaled_loss.append(l / self.gradient_accumulation_steps())
else:
scaled_loss.append(l)
else:
scaled_loss = prescaled_loss
if self.warn_unscaled_loss:
logger.warning(f"DeepSpeed unable to scale loss because of type: {type(prescaled_loss)}")
self.warn_unscaled_loss = False
return scaled_loss
@instrument_w_nvtx
def forward(self, *inputs, **kwargs):
r"""Execute forward propagation
Arguments:
*inputs: Variable length input list
**kwargs: variable length keyword arguments
"""
if self.autotuning_profile_model_info():
ma = get_ma_status()
else:
see_memory_usage("Engine before forward", force=self.memory_breakdown())
flops_profiler_active = (self.flops_profiler_enabled()
and self.global_steps == self.flops_profiler_profile_step() and self.global_rank == 0)
# used to check quantization happens at step 0!
if self.global_steps == 0 and hasattr(self, "compression_scheduler"):
self.compression_scheduler.step(step_zero_check=True)
if self.quantizer:
tensor_to_quantize = self.optimizer.bit16_groups if self.zero_optimization_stage(
) == 2 else self.optimizer.fp16_groups
if self.compression_scheduler.weight_quantization_enabled:
self.quantizer.quantize(
tensor_to_quantize,
(self.optimizer.overflow if self.fp16_enabled() else False),
self.eigenvalue_enabled(),
None,
)
if flops_profiler_active:
self.flops_profiler.start_profile(ignore_list=None)
if self.module.training:
if self.progressive_layer_drop:
kwargs.update(self.progressive_layer_drop.get_state())
if self.__class__.__name__ != "PipelineEngine":
# TODO: The above if condition is a HACK since for PipelineEngine
# it's difficult to inject argument in forward pass.
if self.module.training and self.curriculum_enabled_legacy():
self.curriculum_scheduler_legacy.update_difficulty(self.global_steps + 1)
if self.curriculum_params_legacy()["curriculum_type"] == "seqlen":
kwargs.update({"curriculum_seqlen": self.curriculum_scheduler_legacy.get_current_difficulty()})
if self.module.training and self.random_ltd_enabled():
self.random_ltd_scheduler.update_seq(self.global_steps)
if self.zero_optimization_partition_weights():
# Enable automated discovery of external parameters by indicating that
# we are in a forward pass.
for module in self.module.modules():
module._parameters._in_forward = True
self._start_timers(self.engine_timers.forward_timers)
if self.training_dataloader is None:
self.tput_timer.start()
if self.fp16_auto_cast():
inputs = self._cast_inputs_half(inputs)
loss = self.module(*inputs, **kwargs)
if self.zero_optimization_partition_weights():
# Disable automated discovery of external parameters
for module in self.module.modules():
module._parameters._in_forward = False
self._stop_timers(self.engine_timers.forward_timers)
if flops_profiler_active:
self.flops_profiler.stop_profile()
if self.autotuning_profile_model_info():
activation_mem = get_ma_status() - ma
self.autotuning_model_info["activation_mem_per_gpu"] = activation_mem
print_json_dist(self.autotuning_model_info, [0], path=self.autotuning_model_info_path())
exit()
else:
see_memory_usage("Engine after forward", force=self.memory_breakdown())
return loss
def _cast_inputs_half(self, inputs):
if isinstance(inputs, (list, tuple)):
new_inputs = []
for v in inputs:
new_inputs.append(self._cast_inputs_half(v))
return inputs.__class__(new_inputs)
elif isinstance(inputs, dict):
new_inputs = {}
for k, v in inputs.items():
new_inputs[k] = self._cast_inputs_half(v)
return new_inputs
elif hasattr(inputs, 'half'):
return inputs.half()
else:
return inputs
def print_forward_breakdown(self, fwd_time):
gate_time = 0.0
moe_time = 0.0
falltoall = 0.0
salltoall = 0.0
for gate in self.gate_modules:
#logger.info(f"Individual TopK gate time: {gate.gate_time:.2f} ms")
gate_time += gate.gate_time
for l in self.moe_layers:
#logger.info(f"MoE layer; total: {l.time_moe:.2f} ms, first alltoall: {l.time_falltoall:.2f}, second alltoall: {l.time_salltoall:.2f}")
moe_time += l.time_moe
falltoall += l.time_falltoall
salltoall += l.time_salltoall
# TODO: Allreduce/average them across ranks for more accurate timing.
# if deepspeed.comm.get_rank() == 0:
log_dist(
f"rank={dist.get_rank()} time (ms) | forward: {fwd_time:.2f} (forward_moe: {moe_time:.2f}, 1st alltoall: {falltoall:.2f}, 2nd alltoall: {salltoall:.2f}, top-k: {gate_time:.2f})",
ranks=[0])
@instrument_w_nvtx
def allreduce_gradients(self, bucket_size=MEMORY_OPT_ALLREDUCE_SIZE):
assert not (self.bfloat16_enabled() and self.pipeline_parallelism), \
f'allreduce_gradients() is not valid when bfloat+pipeline_parallelism is enabled'
# Pass (PP) gas boundary flag to optimizer (required for zero)
self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary()
# ZeRO stage >= 2 communicates during non gradient accumulation boundaries as well
if self.zero_optimization_partition_gradients():
self.optimizer.overlapping_partition_gradients_reduce_epilogue()
# Communicate only at gradient accumulation boundaries
elif self.is_gradient_accumulation_boundary():
if self.zero_optimization_stage() == ZeroStageEnum.optimizer_states and hasattr(
self.optimizer, 'reduce_gradients'):
self.optimizer.reduce_gradients(pipeline_parallel=self.pipeline_parallelism)
else:
self.buffered_allreduce_fallback(elements_per_buffer=bucket_size)
@instrument_w_nvtx
def backward(self, loss, allreduce_gradients=True, release_loss=False, retain_graph=False, scale_wrt_gas=True):
r"""Execute backward pass on the loss
Arguments:
loss: Torch tensor on which to execute backward propagation
allreduce_gradients: is deprecated, ignored, and will soon be removed'
retain_graph: bool, default: false
forward on user defined choice of retain_graph
"""
see_memory_usage("Engine before backward", force=self.memory_breakdown())
if self.scale_wrt_gas is not None:
scale_wrt_gas = self.scale_wrt_gas
if not allreduce_gradients:
logger.warning(f"Argument `allreduce_gradients` is deprecated, ignored, and will soon be removed")
# scale loss w.r.t. gradient accumulation if needed
if self.gradient_accumulation_steps() > 1 and scale_wrt_gas:
loss = self._scale_loss_by_gas(loss.float())
# Log training Loss
if self.monitor.enabled:
if self.is_gradient_accumulation_boundary():
if self.global_rank == 0:
self.summary_events = [(
f"Train/Samples/train_loss",
loss.mean().item() * self.gradient_accumulation_steps(),
self.global_samples,
)]
self.monitor.write_events(self.summary_events)
self._start_timers(self.engine_timers.backward_timers)
assert self.optimizer is not None and not isinstance(self.optimizer, DummyOptim), \
"must provide optimizer during init in order to use backward"
self._start_timers(self.engine_timers.backward_inner_timers)
if self.zero_optimization():
self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary()
self.optimizer.backward(loss, retain_graph=retain_graph)
elif self.amp_enabled():
# AMP requires delaying unscale when inside gradient accumulation boundaries
# https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations
delay_unscale = not self.is_gradient_accumulation_boundary()
with amp.scale_loss(loss, self.optimizer, delay_unscale=delay_unscale) as scaled_loss:
scaled_loss.backward(retain_graph=retain_graph)
elif self.fp16_enabled():
if self.eigenvalue_enabled():
self.optimizer.backward(loss, create_graph=True, retain_graph=True)
else:
self.optimizer.backward(loss, retain_graph=retain_graph)
elif self.bfloat16_enabled():
self.optimizer.backward(loss)
else:
if self.eigenvalue_enabled():
loss.backward(create_graph=True, retain_graph=True)
else:
loss.backward(retain_graph=retain_graph)
self._stop_timers(self.engine_timers.backward_inner_timers)
self._start_timers(self.engine_timers.backward_reduce_timers)
if allreduce_gradients and self.enable_backward_allreduce:
# Traditional code path that allreduces the module parameter grads
self.allreduce_gradients()
self._stop_timers(self.engine_timers.backward_reduce_timers)
self._stop_timers(self.engine_timers.backward_timers)
if release_loss:
# loss.data = None
pass
see_memory_usage("Engine after backward", force=self.memory_breakdown())
return loss
def is_gradient_accumulation_boundary(self):
"""
Query whether the current micro-batch is at the boundary of
gradient accumulation, and thus will trigger gradient reductions and
an optimizer step.
Returns:
bool: if the current step is a gradient accumulation boundary.
"""
if self._is_gradient_accumulation_boundary is None:
return (self.micro_steps + 1) % \
self.gradient_accumulation_steps() == 0
else:
return self._is_gradient_accumulation_boundary
def set_gradient_accumulation_boundary(self, is_boundary):
"""
Manually overrides the DeepSpeed engine's gradient accumulation boundary state, this is an optional
feature and should be used with care. The state should be set before to the intended
value before each forward/backward. The final fordward/backward should have the
boundary state set to True. This style allows client code to only call engine.step() once after all
the gradient accumulation passes are complete. See example below:
.. code-block:: python
engine.set_gradient_accumulation_boundary(False)
for _ in range(gradient_accumulation_steps - 1):
micro_batch = next(data_loader)
loss = engine(micro_batch)
engine.backward(loss)
engine.set_gradient_accumulation_boundary(True)
micro_batch = next(data_loader)
loss = engine(micro_batch)
engine.backward(loss)
engine.step()
Arguments:
is_boundary (bool): are we at a gradient accumulation boundary or not?
"""
self._is_gradient_accumulation_boundary = is_boundary
self.optimizer.is_gradient_accumulation_boundary = is_boundary
def zero_grad(self):
"""
Zero parameter grads.
"""
for param_name, param in self.module.named_parameters():
param.grad = None
def clip_fp32_gradients(self):
clip_grad_norm_(parameters=self.module.parameters(), max_norm=self.gradient_clipping(), mpu=self.mpu)
def _take_model_step(self, lr_kwargs, block_eigenvalue={}):
if self.gradient_clipping() > 0.0:
if not (self.fp16_enabled() or self.bfloat16_enabled() or self.amp_enabled() or self.zero_optimization()):
self.clip_fp32_gradients()
elif self.amp_enabled():
# AMP's recommended way of doing clipping
# https://nvidia.github.io/apex/advanced.html#gradient-clipping
master_params = amp.master_params(self.optimizer)
clip_grad_norm_(parameters=master_params, max_norm=self.gradient_clipping(), mpu=self.mpu)
self.optimizer.step()
if hasattr(self.optimizer, '_global_grad_norm'):
self._global_grad_norm = self.optimizer._global_grad_norm
# Quantize the updated parameter if there is no overflow
if self.quantizer:
tensor_to_quantize = self.optimizer.bit16_groups if self.zero_optimization_stage(
) == 2 else self.optimizer.fp16_groups
if self.compression_scheduler.weight_quantization_enabled:
self.quantizer.quantize(
tensor_to_quantize,
(self.optimizer.overflow if self.fp16_enabled() else False),
self.eigenvalue_enabled(),
block_eigenvalue,
)
# zero grad in basic optimizer could be unreliable and may not exhibit
# the behaviour that we want
if self.bfloat16_enabled():
# TODO: Temporary until bf16_optimizer and zero_optimizer are integrated
if self.zero_optimization() and hasattr(self.optimizer, "zero_grad"):
self.optimizer.zero_grad()
else:
pass
elif self.zero_optimization() or self.fp16_enabled() or self.amp_enabled():
self.optimizer.zero_grad()
else:
self.zero_grad()
report_progress = self.global_rank == 0 if self.global_rank else True
# Check overflow here since in DS fp16 optimizer, the overflow is updated in above step() function.
overflow = False
if hasattr(self.optimizer, "overflow"):
overflow = self.optimizer.overflow
self._step_applied = not overflow
if overflow:
self.skipped_steps += 1
else:
self.compression_scheduler.step()
if self.lr_scheduler is not None:
try:
self.lr_scheduler.step(**(lr_kwargs or {}))
except TypeError:
# XXX Hack to work with Megatron 2.0 and DeepSpeed pipelines.
# We don't currently have a way to specify lr_kwargs from
# pipe_engine.train_batch()
self.lr_scheduler.step(increment=self.train_batch_size())
if report_progress and (self.global_steps + 1) % self.steps_per_print() == 0:
self._report_progress(self.global_steps + 1)
self.global_steps += 1
self.global_samples += self.train_batch_size()
def step(self, lr_kwargs=None):
r"""Execute the weight update step after forward and backward propagation
on effective_train_batch.
"""
see_memory_usage("Engine before step", force=self.memory_breakdown())
# Check early because self.global_steps is incremented at some point here.
# TODO: Delay self.global_steps increment until very end of this function.
flops_profiler_active = self.flops_profiler_enabled(
) and self.global_steps == self.flops_profiler_profile_step() and self.global_rank == 0
self._start_timers(self.engine_timers.step_timers)
assert self.optimizer is not None and not isinstance(self.optimizer, DummyOptim), \
"must provide optimizer during init in order to use step"
report_progress = False
self._step_applied = False # assume False, will flip to True
# Update the model when we reach gradient accumulation boundaries
if self.is_gradient_accumulation_boundary():
self.gas_boundary_ctr += 1
if (self.eigenvalue_enabled() and (self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution() == 0)
and self.quantizer.any_precision_switch()):
log_dist(f"computing eigenvalue...", ranks=[0])
self.block_eigenvalue = self.eigenvalue.compute_eigenvalue(self.module, self.device,
self.optimizer.cur_scale)
if self.progressive_layer_drop:
self.progressive_layer_drop.update_state(self.global_steps)
if (self.eigenvalue_enabled() and not self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution()
and self.quantizer.any_precision_switch()):
self._take_model_step(lr_kwargs, self.block_eigenvalue)
else:
self._take_model_step(lr_kwargs)
report_progress = self.global_rank == 0 if self.global_rank else True
self.tput_timer.stop(global_step=self.is_gradient_accumulation_boundary(), report_speed=report_progress)
self._stop_timers(self.engine_timers.step_timers)
# Log learning rate
if self.monitor.enabled:
if self.is_gradient_accumulation_boundary():
if self.global_rank == 0:
self.summary_events = [(f"Train/Samples/lr", self.get_lr()[0], self.global_samples)]
if self.fp16_enabled() and hasattr(self.optimizer, "cur_scale"):
self.summary_events.append((
f"Train/Samples/loss_scale",
self.optimizer.cur_scale,
self.global_samples,
))
if (self.eigenvalue_enabled()
and not self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution()):
ev_values = self.block_eigenvalue.values()
for i in range(len(ev_values)):
self.summary_events.append((
f"Train/Eigenvalues/ModelBlockParam_{i}",
self.ev_values[i][0],
self.global_samples,
))
self.monitor.write_events(self.summary_events)
# Check flops profiling
if flops_profiler_active:
if self.autotuning_enabled():
self.flops = self.flops_profiler.get_total_flops() * 3
else:
self.flops_profiler.print_model_profile(
profile_step=self.global_steps,
module_depth=self.flops_profiler_module_depth(),
top_modules=self.flops_profiler_top_modules(),
detailed=self.flops_profiler_detailed(),
output_file=self.flops_profiler_output_file(),
)
self.flops_profiler.end_profile()
if self.autotuning_enabled() and self.global_steps == (self.autotuning_end_profile_step() + 1):
self._autotuning_exit()
if self.wall_clock_breakdown():
# Log micro timing and reset
self.timers.log(names=self.engine_timers.micro_timers, memory_breakdown=self.memory_breakdown())
if self.wall_clock_breakdown() or self.flops_profiler_enabled():
# Log global timing and reset
if self.is_gradient_accumulation_boundary():
if self.monitor.enabled:
self._write_monitor()
if self.has_moe_layers:
fwd_time = self.timers(FORWARD_GLOBAL_TIMER).elapsed(reset=False)
self.print_forward_breakdown(fwd_time=fwd_time)
self.timers.log(self.engine_timers.global_timers)
self.micro_steps += 1
see_memory_usage("Engine after step", force=self.memory_breakdown())
def _start_timers(self, timer_names):
for name in timer_names:
self.timers(name).start()
def _stop_timers(self, timer_names):
record = self.is_gradient_accumulation_boundary() and \
self.flops_profiler_enabled() and \
(self.global_steps >= self.flops_profiler_profile_step())
for name in timer_names:
self.timers(name).stop(record=record)
def _autotuning_exit(self):
if self.global_rank == 0:
msg = self.timers.get_mean([
FORWARD_GLOBAL_TIMER,
BACKWARD_GLOBAL_TIMER,
STEP_GLOBAL_TIMER,
], reset=False)
titer = msg[FORWARD_GLOBAL_TIMER] + msg[BACKWARD_GLOBAL_TIMER] + msg[STEP_GLOBAL_TIMER]
msg["latency"] = titer
msg["FLOPS_per_gpu"] = self.flops * 1_000_000 * self.gradient_accumulation_steps() / titer
msg["throughput"] = self.train_batch_size() * 1_000_000 / \
msg["latency"]
print_json_dist(msg, [0], path=self.autotuning_metric_path())
log_dist(
f"Wrote metrics to {self.autotuning_metric_path()}, {os.path.abspath(self.autotuning_metric_path())}",
ranks=[0])
import atexit
atexit.register(print, "Autotuning: done with running current ds config.")
exit()
def _write_monitor(self):
if self.global_rank == 0:
self.summary_events = [
(
f"Train/Samples/elapsed_time_ms_forward",
self.timers(FORWARD_GLOBAL_TIMER).elapsed(reset=False),
self.global_samples,
),
(
f"Train/Samples/elapsed_time_ms_backward",
self.timers(BACKWARD_GLOBAL_TIMER).elapsed(reset=False),
self.global_samples,
),
(
f"Train/Samples/elapsed_time_ms_backward_inner",
self.timers(BACKWARD_INNER_GLOBAL_TIMER).elapsed(reset=False),
self.global_samples,
),
(
f"Train/Samples/elapsed_time_ms_backward_allreduce",
self.timers(BACKWARD_REDUCE_GLOBAL_TIMER).elapsed(reset=False),
self.global_samples,
),
(
f"Train/Samples/elapsed_time_ms_step",
self.timers(STEP_GLOBAL_TIMER).elapsed(reset=False),
self.global_samples,
),
]
self.monitor.write_events(self.summary_events)
def _get_optimizer_param(self, param_name):
result = []
if not self.optimizer:
return result
for group in self.optimizer.param_groups:
if param_name in group:
result.append(group[param_name])
else:
result.append(0.0)
return result
def get_lr(self):
return self._get_optimizer_param("lr")
def get_type(self):
return self._get_optimizer_param("type")
def get_mom(self):
if self.optimizer_name() in ["SGD", "RMSprop"]:
return self._get_optimizer_param("momentum")
else:
return self._get_optimizer_param("betas")
def get_pld_theta(self):
if self.progressive_layer_drop:
return self.progressive_layer_drop.get_theta()
else:
return None
def _report_progress(self, step):
lr = self.get_lr()
mom = self.get_mom()
log_dist(f"step={step}, skipped={self.skipped_steps}, lr={lr}, mom={mom}", ranks=[0])
def allreduce_bucket(self, bucket, dp_group):
tensor = self.flatten(bucket)
tensor_to_allreduce = tensor
if self.communication_data_type != tensor.dtype:
tensor_to_allreduce = tensor.to(self.communication_data_type)
if self.postscale_gradients():
if self.gradient_predivide_factor() != 1.0:
tensor_to_allreduce.mul_(1.0 / self.gradient_predivide_factor())
dist.all_reduce(tensor_to_allreduce, group=dp_group)
if self.gradient_average:
if self.gradient_predivide_factor() != dist.get_world_size(group=dp_group):
tensor_to_allreduce.mul_(self.gradient_predivide_factor() / dist.get_world_size(group=dp_group))
else:
tensor_to_allreduce.mul_(1. / dist.get_world_size(group=dp_group))
dist.all_reduce(tensor_to_allreduce, group=dp_group)
if self.communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce:
tensor.copy_(tensor_to_allreduce)
return tensor
def allreduce_and_copy(self, small_bucket, dp_group):
allreduced = self.allreduce_bucket(small_bucket, dp_group)
for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self, bucket, dp_group, numel_per_bucket=500000000):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket, dp_group)
small_bucket = []
numel = 0
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket, dp_group)
def _get_gradients_for_reduction(self):
non_expert_grads = []
expert_grads = {}
if self.has_moe_layers:
for key in self.expert_data_parallel_group.keys():
expert_grads[key] = []
for param_name, param in self.module.named_parameters():
if param.grad is None:
# In cases where there is an imbalance of empty grads across
# ranks we must create empty grads, this will ensure that every
# rank is reducing the same size. In some cases it may make
# sense in the future to support the ability to average not
# w.r.t. world size but with a different value.
param.grad = torch.zeros(param.size(), dtype=param.dtype, device=param.device)
grad_data = param.grad.data
if param_name in self.sparse_tensor_module_names or grad_data.is_sparse:
# Call param.grad without data to avoid problem with setting of updated grads
grad_data = SparseTensor(param.grad)
if is_moe_param(param):
expert_grads[param.group_name].append(grad_data)
else:
non_expert_grads.append(grad_data)
return non_expert_grads, expert_grads
def _reduce_non_expert_gradients(self, grads, elements_per_buffer):
split_buckets = split_half_float_double_sparse(grads)
for _, bucket_tuple in enumerate(split_buckets):
bucket_type, bucket = bucket_tuple
if self.pipeline_parallelism:
dp_group = self.mpu.get_data_parallel_group()
else:
dp_group = groups._get_data_parallel_group()
if bucket_type == SparseTensor.type():
self.sparse_allreduce_no_retain(bucket, dp_group=dp_group)
else:
self.allreduce_no_retain(bucket, dp_group=dp_group, numel_per_bucket=elements_per_buffer)
def _reduce_expert_gradients(self, expert_grads, elements_per_buffer):
for ep_name, expert_grads_group in expert_grads.items():
expert_split_buckets = split_half_float_double_sparse(expert_grads_group)
for i, bucket_tuple in enumerate(expert_split_buckets):
bucket_type, bucket = bucket_tuple
if bucket_type == SparseTensor.type():
self.sparse_allreduce_no_retain(bucket, groups._get_expert_data_parallel_group(ep_name))
else:
# Separate between diff groups
self.allreduce_no_retain(bucket,
dp_group=groups._get_expert_data_parallel_group(ep_name),
numel_per_bucket=elements_per_buffer)
def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000):
if grads is None:
non_expert_grads, expert_grads = self._get_gradients_for_reduction()
else:
assert not self.has_moe_layers, "attempting to reduce grads in unsupported way w.r.t. MoE"
non_expert_grads = grads
self._reduce_non_expert_gradients(non_expert_grads, elements_per_buffer)
if self.has_moe_layers:
self._reduce_expert_gradients(expert_grads, elements_per_buffer)
def sparse_allreduce_no_retain(self, bucket, dp_group):
allreduced_sparses = self.sparse_allreduce_bucket(bucket, dp_group)
# Densify sparse tensor and copy back to original location
for tensor in allreduced_sparses:
if tensor.is_sparse:
tensor.orig_dense_tensor.data = tensor.to_coo_tensor()
else:
tensor.orig_dense_tensor.copy_(tensor.to_dense())
def sparse_allreduce_bucket(self, bucket, dp_group):
sparse_list = []
for sparse in bucket:
sparse_list.append(self.sparse_allreduce(sparse, dp_group))
return sparse_list
def sparse_allreduce(self, sparse, dp_group):
original_data_type = sparse.values.dtype
if self.communication_data_type != sparse.values.dtype:
if self.communication_data_type in (torch.float16, torch.bfloat16):
indices = sparse.indices.to(torch.int32)
else:
indices = sparse.indices
values = sparse.values.to(self.communication_data_type)
else:
indices = sparse.indices
values = sparse.values
if self.postscale_gradients():
if self.gradient_average:
values.mul_(self.gradient_predivide_factor() / dist.get_world_size(group=dp_group))
else:
values.mul_(1. / dist.get_world_size(group=dp_group))
indices_device_list = self.sparse_all_gather(indices, dp_group)
values_device_list = self.sparse_all_gather(values, dp_group)
sparse.indices = torch.cat(indices_device_list).to(torch.long)
sparse.values = torch.cat(values_device_list).to(original_data_type)
return sparse
def sparse_all_gather(self, value, dp_group):
my_size = torch.LongTensor([value.size()[0]]).to(self.device)
all_sizes = self.all_gather_scalar(my_size, dp_group)
max_size = torch.cat(all_sizes).max()
fill_size = max_size - my_size
assert value.dim() in [1, 2]
if value.dim() == 1:
if fill_size > 0:
value = torch.cat([value, value.new_empty(fill_size)])
tensor_list = [value.new_empty(max_size) for _ in range(dist.get_world_size(group=dp_group))]
else:
if fill_size > 0:
value = torch.cat([value, value.new_empty(fill_size, value.size()[1])])
tensor_list = [
value.new_empty(max_size,
value.size()[1]) for _ in range(dist.get_world_size(group=dp_group))
]
dist.all_gather(tensor_list, value, group=dp_group)
tensors = []
for dev_idx, t in enumerate(tensor_list):
size = all_sizes[dev_idx][0]
tensors.append(t.index_select(0, torch.arange(size, dtype=torch.long, device=self.device)))
return tensors
def all_gather_scalar(self, value, dp_group):
tensor_list = [value.new_zeros(value.size()) for _ in range(dist.get_world_size(group=dp_group))]
dist.all_gather(tensor_list, value, group=dp_group)
return tensor_list
def module_state_dict(self, destination=None, prefix="", keep_vars=False):
sd = self.module.state_dict(destination, prefix, keep_vars)
if self.random_ltd_enabled():
sd = remove_random_ltd_state_dict(sd)
return sd
@staticmethod
def load_moe_state_dict(checkpoint_path,
tag,
state_dict,
old_moe_load,
model=None,
mpu=None,
num_experts=1,
checkpoint_engine=TorchCheckpointEngine()):
if old_moe_load:
expp_rank = groups._get_expert_data_parallel_rank(groups._get_max_expert_size_name())
num_local_experts = max(num_experts) // groups._get_expert_parallel_world_size(
groups._get_max_expert_size_name())
for local_expert_id in range(num_local_experts):
global_expert_id = expp_rank * num_local_experts + local_expert_id
expert_state_dict = checkpoint_engine.load(
DeepSpeedEngine._get_expert_ckpt_name(
checkpoint_path,
-1, # -1 means ignore layer_id
global_expert_id,
tag,
mpu),
map_location=torch.device('cpu'))
# Updating global -> local expert ids
moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.'
for key in list(expert_state_dict.keys()):
local_key = key.replace(f'{moe_str_prefix}{global_expert_id}',
f'{moe_str_prefix}{local_expert_id}')
expert_state_dict[local_key] = expert_state_dict.pop(key)
state_dict.update(expert_state_dict)
else:
moe_layer_id = 0
for n_module, module in model.named_modules():
if isinstance(module, MoE): # and deepspeed.comm.get_rank() == 0:
group_name = module.expert_group_name
num_local_experts = module.num_local_experts
expp_rank = groups._get_expert_parallel_rank(group_name)
# loop all local_experts
for local_expert_id in range(num_local_experts):
global_expert_id = expp_rank * num_local_experts + local_expert_id
expert_state_dict = checkpoint_engine.load(DeepSpeedEngine._get_expert_ckpt_name(
checkpoint_path, moe_layer_id, global_expert_id, tag, mpu),
map_location=torch.device('cpu'))
# print(expert_state_dict.keys())
# Updating global -> local expert ids
moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.'
for key in list(expert_state_dict.keys()):
local_key = key.replace(f'{moe_str_prefix}{global_expert_id}',
f'{moe_str_prefix}{local_expert_id}')
expert_state_dict[local_key] = expert_state_dict.pop(key)
state_dict.update(expert_state_dict)
moe_layer_id += 1
def load_module_state_dict(self, checkpoint, strict=True, custom_load_fn=None):
module_state_dict = checkpoint['module']
if custom_load_fn:
custom_load_fn(src=module_state_dict, dst=self.module)
else:
self.module.load_state_dict(
module_state_dict, # TODO
strict=strict)
if checkpoint.get(FROZEN_PARAM_FRAGMENTS, None) is not None:
saved_frozen_params = checkpoint[FROZEN_PARAM_FRAGMENTS]
for param in self.module.parameters():
if param.requires_grad:
continue
if param not in self.param_names:
raise ValueError(f"failed to find frozen {param} in named params")
name = self.param_names[param]
if hasattr(param, 'ds_id'):
param.ds_tensor.data.copy_(saved_frozen_params[name].data)
else:
param.data.copy_(saved_frozen_params[name].data)
def _get_zero_ckpt_prefix(self, dp_rank, bf16_mode):
return f'{"bf16_" if bf16_mode else ""}zero_pp_rank_{dp_rank}'
def _get_rank_zero_ckpt_name(self, checkpoints_path, tag, mp_rank, dp_rank, bf16_mode):
file_prefix = self._get_zero_ckpt_prefix(dp_rank, bf16_mode=bf16_mode)
zero_ckpt_name = os.path.join(
checkpoints_path,
str(tag),
f"{file_prefix}_mp_rank_{mp_rank:02d}_optim_states.pt",
)
return zero_ckpt_name
def _get_zero_ckpt_name(self, checkpoints_path, tag):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
pp_rank = dist.get_rank(group=self.optimizer.dp_process_group)
bf16_mode = self.bfloat16_enabled()
return self._get_rank_zero_ckpt_name(checkpoints_path, tag, mp_rank, pp_rank, bf16_mode)
def _get_ckpt_name(self, checkpoints_path, tag, mp_placeholder=None):
if mp_placeholder is not None:
mp_rank_str = mp_placeholder
else:
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
mp_rank_str = f"{mp_rank:02d}"
if self.zero_optimization_partition_weights():
filename = "zero_pp_rank_{}".format(dist.get_rank(group=self.optimizer.dp_process_group))
ckpt_name = os.path.join(
checkpoints_path,
str(tag),
f"{filename}_mp_rank_{mp_rank_str}_model_states.pt",
)
else:
ckpt_name = os.path.join(
checkpoints_path,
str(tag),
"mp_rank_" + mp_rank_str + "_model_states.pt",
)
return ckpt_name
def _get_optimizer_ckpt_name(self, checkpoints_path, tag, expp_rank):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
ckpt_name = os.path.join(checkpoints_path, str(tag),
f'expp_rank_{expp_rank}_mp_rank_{mp_rank:02d}_optim_states.pt')
return ckpt_name
@staticmethod
def _get_expert_ckpt_name(checkpoints_path, layer_id, expert_id, tag, mpu=None):
mp_rank = 0 if mpu is None else mpu.get_model_parallel_rank()
if layer_id <= -1:
# Used to support old checkpoint loading
ckpt_name = os.path.join(checkpoints_path, '' if tag is None else str(tag),
f'expert_{expert_id}_mp_rank_{mp_rank:02d}_model_states.pt')
else:
# Used to support new checkpoint loading
ckpt_name = os.path.join(checkpoints_path, '' if tag is None else str(tag),
f'layer_{layer_id}_expert_{expert_id}_mp_rank_{mp_rank:02d}_model_states.pt')
return ckpt_name
def _get_all_ckpt_names(self, checkpoints_path, tag):
# It is required that (checkpoints_path, tag) are consistent among all ranks.
ckpt_file_pattern = self._get_ckpt_name(checkpoints_path, tag, mp_placeholder="*")
import glob
ckpt_files = glob.glob(ckpt_file_pattern)
ckpt_files.sort()
return ckpt_files
def load_checkpoint(self,
load_dir,
tag=None,
load_module_strict=True,
load_optimizer_states=True,
load_lr_scheduler_states=True,
load_module_only=False,
custom_load_fn=None):
"""
Load training checkpoint
Arguments:
load_dir: Required. Directory to load the checkpoint from
tag: Checkpoint tag used as a unique identifier for checkpoint, if not provided will attempt to load tag in 'latest' file
load_module_strict: Optional. Boolean to strictly enforce that the keys in state_dict of module and checkpoint match.
load_optimizer_states: Optional. Boolean to load the training optimizer states from Checkpoint. Ex. ADAM's momentum and variance
load_lr_scheduler_states: Optional. Boolean to add the learning rate scheduler states from Checkpoint.
load_module_only: Optional. Boolean to load only the model weights from the checkpoint. Ex. warmstarting.
custom_load_fn: Optional. Custom model load function.
Returns:
A tuple of ``load_path`` and ``client_state``.
*``load_path``: Path of the loaded checkpoint. ``None`` if loading the checkpoint failed.
*``client_state``: State dictionary used for loading required training states in the client code.
Important: under ZeRO3, one cannot load checkpoint with ``engine.load_checkpoint()`` right
after ``engine.save_checkpoint()``. It is because ``engine.module`` is partitioned, and
``load_checkpoint()`` wants a pristine model. If insisting to do so, please reinitialize engine
before ``load_checkpoint()``.
"""
if tag is None:
latest_tag = "latest_universal" if self.load_universal_checkpoint() else "latest"
latest_path = os.path.join(load_dir, latest_tag)
if os.path.isfile(latest_path):
with open(latest_path, "r") as fd:
tag = fd.read().strip()
else:
if self.load_universal_checkpoint():
raise ValueError(f'Invalid for universal checkpoint: {latest_path} does not exist')
else:
logger.warning(
f"Unable to find latest file at {latest_path}, if trying to load latest "
"checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint."
)
return None, None
if self.zero_optimization_partition_weights():
# Prepare for checkpoint load by ensuring all parameters are partitioned
self.optimizer.checkpoint_event_prologue()
load_path, client_states = self._load_checkpoint(load_dir,
tag,
load_module_strict=load_module_strict,
load_optimizer_states=load_optimizer_states,
load_lr_scheduler_states=load_lr_scheduler_states,
load_module_only=load_module_only,
custom_load_fn=custom_load_fn)
load_zero_checkpoint = self.zero_optimization() or self.bfloat16_enabled()
if load_zero_checkpoint and load_path is not None:
success = self._load_zero_checkpoint(load_dir, tag, load_optimizer_states=load_optimizer_states)
if not success:
self.optimizer._restore_from_bit16_weights()
if self.zero_optimization_partition_weights():
self.optimizer.checkpoint_event_epilogue()
return load_path, client_states
def _load_checkpoint(self,
load_dir,
tag,
load_module_strict=True,
load_optimizer_states=True,
load_lr_scheduler_states=True,
load_module_only=False,
custom_load_fn=None):
from deepspeed.runtime.state_dict_factory import SDLoaderFactory
ckpt_list = self._get_all_ckpt_names(load_dir, tag)
sd_loader = SDLoaderFactory.get_sd_loader(ckpt_list, checkpoint_engine=self.checkpoint_engine)
is_pipe_parallel = isinstance(self.module, PipelineModule)
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
load_path, checkpoint, _ = sd_loader.load(self.mp_world_size, mp_rank, is_pipe_parallel=is_pipe_parallel)
if checkpoint is None:
return None, None
if is_pipe_parallel:
# Pipeline parallelism uses this to load its own checkpoint files.
self._curr_ckpt_path = os.path.join(load_dir, tag)
if self.has_moe_layers:
# print(checkpoint.keys())
old_moe_load = False
if not isinstance(checkpoint['num_experts'], list):
old_moe_load = True
DeepSpeedEngine.load_moe_state_dict(load_dir,
tag,
state_dict=checkpoint['module'],
old_moe_load=old_moe_load,
model=self.module,
mpu=self.mpu,
num_experts=self.num_experts,
checkpoint_engine=self.checkpoint_engine)
if not self.load_universal_checkpoint():
self.load_module_state_dict(checkpoint=checkpoint,
strict=load_module_strict,
custom_load_fn=custom_load_fn)
self.loaded_checkpoint_dp_world_size = checkpoint['dp_world_size']
if load_module_only:
deepspeed_states = ['module']
if self.optimizer is not None and self.fp16_enabled():
self.optimizer.refresh_fp32_params()
else:
if self.has_moe_layers:
largest_group_name = groups._get_max_expert_size_name()
expp_rank = groups._get_expert_parallel_rank(largest_group_name)
optim_load_path = self._get_optimizer_ckpt_name(load_dir, tag, expp_rank)
optim_checkpoint = self.checkpoint_engine.load(optim_load_path, map_location=torch.device('cpu'))
else:
optim_checkpoint = checkpoint
has_zero_optimizer_state = self.zero_optimization() or self.bfloat16_enabled()
if load_optimizer_states and self.optimizer is not None and not has_zero_optimizer_state:
if self.fp16_enabled():
self.optimizer.load_state_dict(optim_checkpoint['optimizer'],
load_optimizer_states=load_optimizer_states)
else:
self.optimizer.load_state_dict(optim_checkpoint['optimizer'])
if load_lr_scheduler_states and self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
if self.random_ltd_enabled() and self.random_ltd_scheduler is not None and 'random_ltd' in checkpoint:
self.random_ltd_scheduler.load_state_dict(checkpoint['random_ltd'])
if self.training_dataloader is not None and self.curriculum_learning_enabled(
) and 'data_sampler' in checkpoint:
self.training_dataloader.data_sampler.load_state_dict(checkpoint['data_sampler'])
def get_sparse_tensor_module_names(original_set, loaded_set, original_parameters, loaded_parameters):
result = set()
for name in original_set:
if name in loaded_parameters and name not in loaded_set:
continue # parameter existed in previous model and was not sparse
result.add(name)
for name in loaded_set:
if name in original_parameters:
result.add(name) # parameter exists in both configs and it was sparse
return result
if 'sparse_tensor_module_names' in checkpoint:
sparse_tensor_module_names = checkpoint['sparse_tensor_module_names']
elif 'csr_tensor_module_names' in checkpoint:
sparse_tensor_module_names = checkpoint['csr_tensor_module_names']
else:
sparse_tensor_module_names = None
if sparse_tensor_module_names is not None:
if load_module_strict:
self.sparse_tensor_module_names = sparse_tensor_module_names
else:
self.sparse_tensor_module_names = get_sparse_tensor_module_names(
self.sparse_tensor_module_names, sparse_tensor_module_names,
dict(self.module.named_parameters()), checkpoint["module"])
self.global_steps = checkpoint['global_steps']
self.global_samples = checkpoint.get('global_samples', self.global_steps * self.train_batch_size())
self.skipped_steps = checkpoint['skipped_steps']
self.loaded_checkpoint_mp_world_size = checkpoint['mp_world_size']
deepspeed_states = [
'module', 'sparse_tensor_module_names', 'skipped_steps', 'global_steps', 'dp_world_size',
'mp_world_size', 'data_sampler', 'random_ltd'
]
client_state = {}
if load_lr_scheduler_states:
deepspeed_states.append('lr_scheduler')
if load_optimizer_states:
deepspeed_states.append('optimizer')
client_state = {key: value for key, value in checkpoint.items() if not key in deepspeed_states}
if not load_optimizer_states and not load_module_only:
client_state['optimizer'] = optim_checkpoint['optimizer']
return load_path, client_state
def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True):
if self.load_universal_checkpoint():
zero_sd_list = None
checkpoint_folder = f'{os.path.join(load_dir, tag)}'
else:
if load_optimizer_states and self.dp_world_size != self.loaded_checkpoint_dp_world_size:
raise ZeRORuntimeException("The checkpoint being loaded used a DP " \
f"world size of {self.loaded_checkpoint_dp_world_size} but the " \
f"current world size is {self.dp_world_size}. Automatic adjustment " \
"of ZeRO's optimizer state partitioning with a new world size is not " \
"currently supported.")
checkpoint_folder = None
zero_sd_list = self._get_all_zero_checkpoints(load_dir, tag)
if zero_sd_list is None:
return False
self.optimizer.load_state_dict(state_dict_list=zero_sd_list,
load_optimizer_states=load_optimizer_states,
load_from_fp32_weights=self.zero_load_from_fp32_weights(),
checkpoint_folder=checkpoint_folder)
if self.load_universal_checkpoint():
logger.info(f'loaded universal zero checkpoints from {checkpoint_folder} for rank {self.global_rank}')
else:
logger.info(f"loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}")
return True
def _get_mp_rank_zero_checkpoint_names(self, load_dir, tag, mp_rank, dp_world_size, bf16_mode):
zero_ckpt_names = []
for dp_rank in range(dp_world_size):
ckpt_name = self._get_rank_zero_ckpt_name(checkpoints_path=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_rank=dp_rank,
bf16_mode=bf16_mode)
zero_ckpt_names.append(ckpt_name)
return zero_ckpt_names
def _get_all_zero_checkpoint_names(self, load_dir, tag, bf16_mode):
mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank()
zero_ckpt_names = self._get_mp_rank_zero_checkpoint_names(load_dir=load_dir,
tag=tag,
mp_rank=mp_rank,
dp_world_size=self.loaded_checkpoint_dp_world_size,
bf16_mode=bf16_mode)
for i, ckpt_name in enumerate(zero_ckpt_names):
if not os.path.exists(ckpt_name):
# transparently handle the old file pattern for optim_states
if "optim_states.pt" in ckpt_name:
ckpt_name_try = ckpt_name.replace("_optim_states.pt", "optim_states.pt")
if os.path.exists(ckpt_name_try):
zero_ckpt_names[i] = ckpt_name_try
continue
return zero_ckpt_names
def _get_all_zero_checkpoint_state_dicts(self, zero_ckpt_names):
zero_sd_list = []
for i, ckpt_name in enumerate(zero_ckpt_names):
_state = None
if ckpt_name is None:
_state = {OPTIMIZER_STATE_DICT: None}
# Fully load state for current rank
elif self.zero_elastic_checkpoint() or dist.get_rank(group=self.optimizer.dp_process_group) == i:
_state = self.checkpoint_engine.load(
ckpt_name,
map_location='cpu',
)
else:
_state = {OPTIMIZER_STATE_DICT: None}
zero_sd_list.append(_state)
zero_optimizer_sd = [sd[OPTIMIZER_STATE_DICT] for sd in zero_sd_list]
logger.info(f"successfully read {len(zero_optimizer_sd)} ZeRO state_dicts for rank {self.global_rank}")
return zero_optimizer_sd
def _get_all_zero_checkpoints(self, load_dir, tag):
for bf16_mode in [self.bfloat16_enabled(), not self.bfloat16_enabled()]:
zero_ckpt_names = self._get_all_zero_checkpoint_names(load_dir, tag, bf16_mode)
if zero_ckpt_names is not None:
# Warn if loading checkpoint of different bit16 type
if bf16_mode is not self.bfloat16_enabled():
checkpoint_bit16 = BFLOAT16 if bf16_mode else FP16
engine_bit16 = BFLOAT16 if self.bfloat16_enabled() else FP16
logger.warn(f'Loading {checkpoint_bit16} zero checkpoints into {engine_bit16} training engine')
return self._get_all_zero_checkpoint_state_dicts(zero_ckpt_names)
return None
def _checkpoint_tag_validation(self, tag):
if self.checkpoint_tag_validation_enabled():
s_hash = hashlib.sha1(tag.encode())
bhash = torch.ByteTensor([s_hash.digest()]).flatten().to(self.device)
max_bhash = bhash.clone()
min_bhash = bhash.clone()
dist.all_reduce(max_bhash, op=dist.ReduceOp.MAX)
dist.all_reduce(min_bhash, op=dist.ReduceOp.MIN)
valid = all(min_bhash == bhash) and all(max_bhash == bhash)
msg = (f"[rank={dist.get_rank()}] The checkpoint tag name '{tag}' is not consistent across "
"all ranks. Including rank unique information in checkpoint tag could cause issues when "
"restoring with different world sizes.")
if self.checkpoint_tag_validation_fail():
assert valid, msg
elif not valid:
logger.warning(msg)
def save_checkpoint(self, save_dir, tag=None, client_state={}, save_latest=True):
"""Save training checkpoint
Arguments:
save_dir: Required. Directory for saving the checkpoint
tag: Optional. Checkpoint tag used as a unique identifier for the checkpoint, global step is
used if not provided. Tag name must be the same across all ranks.
client_state: Optional. State dictionary used for saving required training states in the client code.
save_latest: Optional. Save a file 'latest' pointing to the latest saved checkpoint.
Important: all processes must call this method and not just the process with rank 0. It is
because each process needs to save its master weights and scheduler+optimizer states. This
method will hang waiting to synchronize with other processes if it's called just for the
process with rank 0.
"""
if self.zero_optimization_partition_weights():
# Prepare for checkpoint save by ensuring all parameters are partitioned
self.optimizer.checkpoint_event_prologue()
rank = self.local_rank if self.use_node_local_storage() else self.global_rank
# This is to make sure the checkpoint names are created without collision
# There seems to be issue creating them in parallel
# Ensure save_dir directory exists
self.checkpoint_engine.makedirs(save_dir, exist_ok=True)
dist.barrier()
if tag is None:
tag = f"global_step{self.global_steps}"
# Ensure tag is a string
tag = str(tag)
self.checkpoint_engine.create(tag)
# Ensure checkpoint tag is consistent across ranks
self._checkpoint_tag_validation(tag)
if self.has_moe_layers:
self.save_non_zero_checkpoint = False
self._create_checkpoint_file(save_dir, tag, False)
self._save_moe_checkpoint(save_dir, tag, client_state=client_state)
# We distribute the task of saving layer checkpoint files among
# data parallel instances, so all procs should call _save_checkpoint.
# All procs then call module_state_dict(), but only procs of data
# parallel rank 0 save the general model params.
if not self.has_moe_layers:
self._create_checkpoint_file(save_dir, tag, False)
self._save_checkpoint(save_dir, tag, client_state=client_state)
if self.save_zero_checkpoint:
self._create_zero_checkpoint_files(save_dir, tag)
self._save_zero_checkpoint(save_dir, tag)
if self.zero_optimization_partition_weights():
self.optimizer.checkpoint_event_epilogue()
# Save latest checkpoint tag
self.checkpoint_engine.commit(tag)
if save_latest and rank == 0:
with open(os.path.join(save_dir, 'latest'), 'w') as fd:
fd.write(tag)
dist.barrier()
return True
def _get_non_moe_state_dict(self, full_state_dict):
"""
Get the state dict of the non-moe layers
"""
for key in list(full_state_dict.keys()):
if 'expert' in key and 'moe.gate.wg.weight' not in key:
full_state_dict.pop(key)
return full_state_dict
def _save_moe_checkpoint(self, save_dir, tag, client_state={}):
save_path = self._get_ckpt_name(save_dir, tag)
# A hack to save the checkpointing directory. Pipeline parallelism overrides
# module_state_dict() and uses this path to save the model. module_state_dict()
# then instead just returns None.
# Using layer_#_export_# to save the model's expert state_dict
moe_layer_id = 0
for n_module, module in self.module.named_modules():
if isinstance(module, MoE): # and deepspeed.comm.get_rank() == 0:
group_name = module.expert_group_name
num_local_experts = module.num_local_experts
expp_rank = groups._get_expert_parallel_rank(group_name)
exp_dp_rank = groups._get_expert_data_parallel_rank(group_name)
# print(expp_rank, exp_dp_rank)
if exp_dp_rank != 0:
moe_layer_id += 1
continue
# get all moe parameters
moe_state_dict = {}
for n, p in module.state_dict().items():
if 'expert' in n and 'moe.gate.wg.weight' not in n:
moe_state_dict[n_module + '.' + n] = p
moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.'
# print(moe_state_dict.keys()) # until now, everything is fine. So the bug happens at next few lines
# Reorder the moe name rank, so that each checkpoint only has one expert
experts_state_dict = defaultdict(dict)
for key in list(moe_state_dict.keys()):
m = re.match(f".*{moe_str_prefix}([0-9]+).*", key)
local_expert_id = None
if not m:
logger.warn(f'No expert found in key {key}.')
else:
local_expert_id = m.group(1)
global_expert_id = expp_rank * \
num_local_experts + int(local_expert_id)
expert_key = key.replace(f'{moe_str_prefix}{local_expert_id}',
f'{moe_str_prefix}{global_expert_id}')
# truncating extra tensor (shared) storage
truncated = moe_state_dict.pop(key).clone().detach()
experts_state_dict[str(global_expert_id)][expert_key] = truncated
# let save the moe parameters
for global_expert_id, expert_state_dict in experts_state_dict.items():
# save the moe parameters
moe_save_path = self._get_expert_ckpt_name(save_dir, moe_layer_id, global_expert_id, tag, self.mpu)
if self.random_ltd_enabled():
expert_state_dict = remove_random_ltd_state_dict(expert_state_dict)
self.checkpoint_engine.save(expert_state_dict, moe_save_path)
moe_layer_id += 1
self._curr_ckpt_path = os.path.join(save_dir, tag)
largest_group_name = groups._get_max_expert_size_name()
expp_rank = groups._get_expert_parallel_rank(largest_group_name)
exp_dp_rank = groups._get_expert_data_parallel_rank(largest_group_name)
# In the case of E + D parallelism, only the
# first expert parallel group should save the expert weights
# since each expert parallel group is a copy of the model's experts
if exp_dp_rank != 0:
return
# Save optimizer states. They are different across each exp parallel rank.
optimizer_state = {
'optimizer': self.optimizer.state_dict() if self.optimizer and not self.zero_optimization() else None
}
# TODO: why use BufferedWriter not the path
file_path = self._get_optimizer_ckpt_name(save_dir, tag, expp_rank)
self.checkpoint_engine.save(optimizer_state, file_path)
# get non-moe parameters
model_state_dict = self._get_non_moe_state_dict(self.module_state_dict())
if expp_rank == 0:
# TODO: update num experts info,.. in checkpoint
state = {
'module':
model_state_dict,
'lr_scheduler':
self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None,
'data_sampler':
self.training_dataloader.data_sampler.state_dict() if
(self.training_dataloader is not None and self.curriculum_learning_enabled()) else None,
'random_ltd':
self.random_ltd_scheduler.state_dict() if self.random_ltd_enabled() else None,
'sparse_tensor_module_names':
self.sparse_tensor_module_names,
'skipped_steps':
self.skipped_steps,
'global_steps':
self.global_steps,
'global_samples':
self.global_samples,
'dp_world_size':
self.dp_world_size,
'mp_world_size':
self.mp_world_size,
'num_experts':
self.num_experts
}
state.update(client_state)
logger.info(f'Saving model checkpoint: {save_path}')
self.checkpoint_engine.save(state, save_path)
self._curr_save_path = None
def _create_checkpoint_file(self, save_dir, tag, zero_checkpoint):
name_function = (self._get_zero_ckpt_name if zero_checkpoint else self._get_ckpt_name)
try:
checkpoint_name = name_function(save_dir, tag)
path = os.path.dirname(checkpoint_name)
self.checkpoint_engine.makedirs(path, exist_ok=True)
except:
logger.error(f"Failed saving model checkpoint to {save_dir} with tag {tag}")
return False
return True
def _create_zero_checkpoint_files(self, save_dir, tag):
success = True
# zero checkpoint files are created sequentially
for rank in range(self.world_size):
if rank == self.global_rank:
success = self._create_checkpoint_file(save_dir, tag, True)
dist.barrier()
return success
def _save_checkpoint(self, save_dir, tag, client_state={}):
save_path = self._get_ckpt_name(save_dir, tag)
zero_optimizer_state = self.zero_optimization() or self.bfloat16_enabled()
save_frozen_param = self.zero_optimization_partition_gradients()
# A hack to save the checkpointing directory. Pipeline parallelism overrides
# module_state_dict() and uses this path to save the model. module_state_dict()
# then instead just returns None. The module_state_dict() implementation in
# PipelineEngine expects the save path to be set in self._curr_ckpt_path.
self._curr_ckpt_path = os.path.join(save_dir, tag)
module = self.module_state_dict()
self._curr_ckpt_path = None
state = dict(module=module,
buffer_names=self._get_buffer_names(),
optimizer=self.optimizer.state_dict() if self.optimizer and not zero_optimizer_state else None,
param_shapes=self._get_zero_param_shapes() if self.optimizer and zero_optimizer_state else None,
frozen_param_shapes=self._get_zero_frozen_param_attributes(self._get_param_shape_func)
if save_frozen_param else None,
shared_params=self._get_shared_params() if self.optimizer and zero_optimizer_state else None,
frozen_param_fragments=self._get_zero_frozen_param_attributes(self._get_param_fragment_func)
if save_frozen_param else None,
lr_scheduler=self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None,
data_sampler=self.training_dataloader.data_sampler.state_dict() if
(self.training_dataloader is not None and self.curriculum_learning_enabled()) else None,
random_ltd=self.random_ltd_scheduler.state_dict() if self.random_ltd_enabled() else None,
sparse_tensor_module_names=self.sparse_tensor_module_names,
skipped_steps=self.skipped_steps,
global_steps=self.global_steps,
global_samples=self.global_samples,
dp_world_size=self.dp_world_size,
mp_world_size=self.mp_world_size,
ds_config=self.config,
ds_version=version)
state.update(client_state)
if self.save_non_zero_checkpoint:
log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0, 1])
self.checkpoint_engine.save(state, save_path)
def _get_buffer_names(self):
buffer_names = []
# we save buffer names so that we could extract later the real buffers from the saved
# state_dict["module"] in the non-zero checkpoint - the buffers are already there but they
# are intermixed with param placeholders
# have to traverse the tree to be able to skip non-persistent buffers
def get_layer_named_buffers(module, prefix=""):
for name, buf in module.named_buffers(recurse=False):
if buf is not None and name not in module._non_persistent_buffers_set:
buffer_names.append(prefix + name)
for name, child in module.named_children():
if child is not None:
get_layer_named_buffers(child, prefix + name + ".")
get_layer_named_buffers(self.module, prefix="")
return buffer_names
def _get_param_shape_func(self, param):
return param.ds_shape if hasattr(param, 'ds_id') else param.shape
def _get_param_fragment_func(self, param):
return param.ds_tensor.detach().cpu() if hasattr(param, 'ds_id') else param.detach().cpu()
def _get_zero_frozen_param_attributes(self, attr_func):
frozen_param_fragments = OrderedDict()
for param in self.module.parameters():
if param.requires_grad:
continue
if param not in self.param_names:
raise ValueError(f"failed to find frozen {param} in named params")
name = self.param_names[param]
frozen_param_fragments[name] = attr_func(param)
return frozen_param_fragments
def _get_zero_param_shapes(self):
"""Returns a dict of name to shape mapping, only for the flattened fp32 weights saved by the
optimizer. the names are exactly as in state_dict. The order is absolutely important, since
the saved data is just flattened data with no identifiers and requires reconstruction in the
same order it was saved.
We can't rely on self.module.named_parameters() to get the saved tensors, as some params
will be missing and others unsaved and then it'd be impossible to reconstruct state_dict
from the flattened weights.
optimizer.bit16_groups seems to be the easiest to use as it's in all zeroX versions.
"""
param_group_shapes = []
cnt = 0
numel = 0
# zero2 started using a round_robin_bit16_groups which is a shuffled version of bit16_groups -
# if we don't use it, we get parameters ordered incorrectly
if hasattr(self.optimizer, "round_robin_bit16_groups"):
bit16_groups = self.optimizer.round_robin_bit16_groups
elif self.bfloat16_enabled() and not self.zero_optimization():
bit16_groups = self.optimizer.bf16_groups
else:
bit16_groups = self.optimizer.bit16_groups if self.zero_optimization_stage(
) == 2 else self.optimizer.fp16_groups
for bit16_group in bit16_groups:
param_shapes = OrderedDict()
for param in bit16_group:
cnt += 1
numel += param.ds_numel if hasattr(param, "ds_numel") else param.numel()
shape = param.ds_shape if hasattr(param, "ds_shape") else param.shape
if param not in self.param_names:
raise ValueError(f"failed to find optimizer param in named params")
name = self.param_names[param]
param_shapes[name] = shape
# uncomment to debug zero_to_fp32.py problems
# if self.global_rank == 0: print(f"saving param {name} {shape} (numel={shape.numel()})")
param_group_shapes.append(param_shapes)
# if self.global_rank == 0: print(f"Total saved {numel} numels in {cnt} params")
return param_group_shapes
def _get_shared_params(self):
"""
Returns a dict of shared params, which can later be used to reconstruct the original state dict,
e.g. in `zero_to_fp32`. Each dict entry is a pair of param names, where the key is the name
of the variable that isn't stored and the value is the actual param holding data.
"""
shared_ds_ids = {}
shared_params_by_full_name = {}
def get_layer_state_dict(module, prefix=""):
# handle params
for name, param in module.named_parameters(recurse=False):
if param is None or not hasattr(param, "ds_id"):
continue
key = prefix + name
# can't rely on param.data_ptr() as it will be reused as weights gets
# gathered and reduced, but param.ds_id is unique across all zero weights
# (and shared params will have the same param.ds_id)
if param.ds_id in shared_ds_ids:
# shared weights
#print(f"`{key}` is shared with `{shared_ds_ids[param.ds_id]}`")
shared_params_by_full_name[key] = shared_ds_ids[param.ds_id]
else:
shared_ds_ids[param.ds_id] = key
for name, child in module.named_children():
if child is not None:
get_layer_state_dict(child, prefix + name + ".")
if dist.get_rank() == 0:
get_layer_state_dict(self.module, prefix="")
return shared_params_by_full_name
def _copy_recovery_script(self, save_path):
base_dir = os.path.dirname(os.path.dirname(__file__))
script = "zero_to_fp32.py"
src = os.path.join(base_dir, "utils", script)
dst = os.path.join(save_path, script)
#logger.info(f"creating recovery script {dst}")
copyfile(src, dst)
# make executable
os.chmod(dst, os.stat(dst).st_mode | stat.S_IEXEC)
def _save_zero_checkpoint(self, save_path, tag):
zero_checkpoint_name = self._get_zero_ckpt_name(save_path, tag)
zero_sd = dict(optimizer_state_dict=self.optimizer.state_dict(), ds_config=self.config, ds_version=version)
self.checkpoint_engine.save(zero_sd, zero_checkpoint_name)
if self.global_rank == 0:
self._copy_recovery_script(save_path)
ckpt_type = 'zero' if self.zero_optimization() else 'bf16_zero'
logger.info(f'{ckpt_type} checkpoint saved {zero_checkpoint_name}')
def _zero3_consolidated_16bit_state_dict(self):
"""
Get a full non-partitioned state_dict with fp16 weights on cpu.
Important: this function must be called on all ranks and not just rank 0.
This is similar to nn.Module.state_dict (modelled after _save_to_state_dict), but:
1. consolidates the weights from different partitions on gpu0
2. works on one layer at a time to require as little gpu0 memory as possible, by
moving the already consolidated weights to cpu
3. takes care to keep the shared params shared when gradually copying the params to cpu
Returns:
a consolidated fp16 ``state_dict`` on cpu on rank 0, ``None`` on other ranks
"""
if not self.zero_optimization_partition_weights():
raise ValueError("this function requires ZeRO-3 mode")
state_dict = OrderedDict() if dist.get_rank() == 0 else None
shared_params = {}
def get_layer_state_dict(module, prefix=""):
# gather one layer at a time to be memory-efficient
# must use modifier_rank=0 to release GPU memory after each layer gathered
#see_memory_usage("before GatheredParameters", force=True)
with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
if dist.get_rank() == 0:
# handle params
for name, param in module.named_parameters(recurse=False):
if param is None:
continue
key = prefix + name
# can't rely on param.data_ptr() as it will be reused as weights gets
# gathered and reduced, but param.ds_id is unique across all zero weights
# (and shared params will have the same param.ds_id)
if param.ds_id in shared_params:
# shared weights
#print(f"`{key}` is shared with `{shared_params[param.ds_id]}`")
state_dict[key] = state_dict[shared_params[param.ds_id]]
else:
state_dict[key] = param.detach().cpu()
shared_params[param.ds_id] = key
#print(f"param {param.ds_id} {param.shape} {key} ")
# now buffers - not sure if need to take care of potentially shared weights here
for name, buf in module.named_buffers(recurse=False):
if (buf is not None and name not in module._non_persistent_buffers_set):
state_dict[prefix + name] = buf.detach().cpu()
#see_memory_usage("after GatheredParameters", force=True)
for name, child in module.named_children():
if child is not None:
get_layer_state_dict(child, prefix + name + ".")
# Prepare for checkpoint save by ensuring all parameters are partitioned
self.optimizer.checkpoint_event_prologue()
see_memory_usage("before get_layer_state_dict", force=False)
get_layer_state_dict(self.module, prefix="")
see_memory_usage("after get_layer_state_dict", force=False)
self.optimizer.checkpoint_event_epilogue()
return state_dict
def save_fp16_model(self, save_dir, save_filename="pytorch_model.bin"):
"""has been renamed to save_16bit_model, keeping this around for backwards
compatibility"""
return self.save_16bit_model(save_dir, save_filename)
def save_16bit_model(self, save_dir, save_filename="pytorch_model.bin"):
"""
Save 16bit model weights
This method saves the 16bit model weights at the desired destination.
Arguments:
save_dir: Required. Directory for saving the model
save_filename: Optional. Filename to save to. Defaults to ``pytorch_model.bin``
Returns:
``True`` when a model has been saved, ``False`` otherwise. It will not be saved if
stage3_gather_16bit_weights_on_model_save is ``False``.
Important: all processes must call this method and not just the process with rank 0. It is
because the processes need to work in sync to gather the weights. This method will hang
waiting to synchronize with other processes if it's called just for the process with rank 0.
"""
path = os.path.join(save_dir, save_filename)
if self.zero_optimization_partition_weights():
if self.zero_gather_16bit_weights_on_model_save():
# consolidation is expensive in time and memory and therefore isn't a default
state_dict = self._zero3_consolidated_16bit_state_dict()
else:
# the model will be bogus if not consolidated so don't confuse the user by saving it
logger.info(
f"Did not save the model {path} because `stage3_gather_16bit_weights_on_model_save` is False")
return False
else:
state_dict = self.module.state_dict()
tag = f"global_step{self.global_steps}"
tag = str(tag)
self.checkpoint_engine.create(tag)
if dist.get_rank() == 0:
self.checkpoint_engine.makedirs(save_dir, exist_ok=True)
logger.info(f"Saving model weights to {path}, tag: {tag}")
self.checkpoint_engine.save(state_dict, path)
self.checkpoint_engine.commit(tag)
return True
def empty_partition_cache(self):
"""
Release GPU memory consumed by offloaded model parameters.
"""
if hasattr(self.optimizer, 'empty_partition_cache'):
self.optimizer.empty_partition_cache()
gc.collect()
get_accelerator().empty_cache() | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/engine.py | engine.py |
# DeepSpeed Team
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from deepspeed.accelerator import get_accelerator
from deepspeed.runtime.data_pipeline.data_sampling.data_sampler import DeepSpeedDataSampler
from deepspeed.runtime.data_pipeline.constants import CURRICULUM_LEARNING, \
DATA_EFFICIENCY, DATA_SAMPLING_NUM_WORKERS
from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, \
DATA_PARALLEL_GROUP, GLOBAL_RANK
class RepeatingLoader:
def __init__(self, loader):
"""Wraps an iterator to allow for infinite iteration. This is especially useful
for DataLoader types that we wish to automatically restart upon completion.
Args:
loader (iterator): The data loader to repeat.
"""
self.loader = loader
self.data_iter = iter(self.loader)
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.data_iter)
except StopIteration:
self.data_iter = iter(self.loader)
batch = next(self.data_iter)
return batch
class DeepSpeedDataLoader(object):
def __init__(self,
dataset,
batch_size,
pin_memory,
local_rank,
tput_timer,
collate_fn=None,
num_local_io_workers=None,
data_sampler=None,
data_parallel_world_size=None,
data_parallel_rank=None,
dataloader_drop_last=False,
deepspeed_dataloader_config={}):
self.deepspeed_dataloader_config = deepspeed_dataloader_config
self.tput_timer = tput_timer
self.batch_size = batch_size
self.curriculum_learning_enabled = False
if CURRICULUM_LEARNING in deepspeed_dataloader_config:
self.curriculum_learning_enabled = deepspeed_dataloader_config[CURRICULUM_LEARNING]
if self.curriculum_learning_enabled:
data_sampler = DeepSpeedDataSampler(self.deepspeed_dataloader_config[DATA_EFFICIENCY],
len(dataset),
self.batch_size,
data_parallel_rank,
data_parallel_world_size,
self.deepspeed_dataloader_config[DATA_PARALLEL_GROUP],
self.deepspeed_dataloader_config[GRADIENT_ACCUMULATION_STEPS],
self.deepspeed_dataloader_config[GLOBAL_RANK],
drop_last=dataloader_drop_last)
device_count = get_accelerator().device_count()
num_local_io_workers = self.deepspeed_dataloader_config[DATA_SAMPLING_NUM_WORKERS]
else:
if local_rank >= 0:
if data_sampler is None:
data_sampler = DistributedSampler(dataset=dataset,
num_replicas=data_parallel_world_size,
rank=data_parallel_rank)
device_count = 1
else:
if data_sampler is None:
data_sampler = RandomSampler(dataset)
device_count = get_accelerator().device_count()
batch_size *= device_count
if num_local_io_workers is None:
num_local_io_workers = 2 * device_count
self.num_local_io_workers = num_local_io_workers
self.data_sampler = data_sampler
self.dataset = dataset
self.collate_fn = collate_fn
self.device_count = device_count
self.batch_size = batch_size
self.pin_memory = pin_memory
self.data = None
self.dataloader_drop_last = dataloader_drop_last
self.post_process_func = None
if self.dataloader_drop_last:
self.len = len(self.data_sampler) // self.batch_size
else:
from math import ceil
self.len = ceil(len(self.data_sampler) / self.batch_size)
def __iter__(self):
self._create_dataloader()
return self
def __len__(self):
return self.len
def __next__(self):
if self.tput_timer:
self.tput_timer.start()
if self.curriculum_learning_enabled:
data = next(self.data_iterator)
if self.post_process_func is not None:
data = self.post_process_func(data, self.data_sampler.state_dict())
return data
else:
return next(self.data)
def _create_dataloader(self):
if self.curriculum_learning_enabled:
if self.collate_fn is None:
self.dataloader = DataLoader(self.dataset,
pin_memory=self.pin_memory,
batch_sampler=self.data_sampler,
num_workers=self.num_local_io_workers)
else:
self.dataloader = DataLoader(self.dataset,
pin_memory=self.pin_memory,
batch_sampler=self.data_sampler,
collate_fn=self.collate_fn,
num_workers=self.num_local_io_workers)
self.data_iterator = iter(self.dataloader)
return self.dataloader
else:
if self.collate_fn is None:
self.dataloader = DataLoader(self.dataset,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
sampler=self.data_sampler,
num_workers=self.num_local_io_workers,
drop_last=self.dataloader_drop_last)
else:
self.dataloader = DataLoader(self.dataset,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
sampler=self.data_sampler,
collate_fn=self.collate_fn,
num_workers=self.num_local_io_workers,
drop_last=self.dataloader_drop_last)
self.data = (x for x in self.dataloader)
return self.dataloader
# DataLoader([(torch.randn(3, 3), torch.tensor(i % 2)) for i in range(10)], batch_size=2)) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/dataloader.py | dataloader.py |
# DeepSpeed Team
"""
Copyright NVIDIA/Megatron
Helper functions and classes from multiple sources.
"""
from collections.abc import Iterable
from deepspeed.moe.utils import is_moe_param
import os
import psutil
import gc
from math import sqrt
from math import floor
from bisect import bisect_left
import torch
from deepspeed import comm as dist
try:
from torch._six import inf
except ModuleNotFoundError:
from torch import inf
from deepspeed.utils import groups, logger
from deepspeed.runtime.constants import PIPE_REPLICATED
from numpy import prod
from deepspeed.accelerator import get_accelerator
from deepspeed.module_inject.policy import transpose
from torch.nn import functional as F
torch_memory_reserved = get_accelerator().memory_reserved
torch_max_memory_reserved = get_accelerator().max_memory_reserved
class DummyOptim():
"""
Dummy optimizer presents model parameters as a param group, this is
primarily used to allow ZeRO-3 without an optimizer
"""
def __init__(self, params):
self.param_groups = []
self.param_groups.append({'params': params})
def noop_decorator(func):
return func
def ensure_directory_exists(filename):
"""Create the directory path to ``filename`` if it does not already exist.
Args:
filename (str): A file path.
"""
dirname = os.path.dirname(filename)
os.makedirs(dirname, exist_ok=True)
def set_random_seed(seed):
"""Set the random seed for common PRNGs used during training: random, numpy, and torch.
Args:
seed (int): the seed to use
"""
import numpy
import random
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
def is_model_parallel_parameter(p) -> bool:
if hasattr(p, 'model_parallel') and p.model_parallel:
return True
if hasattr(p, 'tensor_model_parallel') and p.tensor_model_parallel:
return True
return False
def bwc_tensor_model_parallel_rank(mpu=None):
"""Backwards-compatible way of querying the tensor model parallel rank from
an ``mpu`` object.
*Tensor* model parallelism means that tensors are physically split across
processes. This contrasts with *pipeline* model parallelism, in which the
layers are partitioned but tensors left intact.
The API for tensor model parallelism has changed across versions and this
helper provides a best-effort implementation across versions of ``mpu``
objects. The preferred mechanism is
``mpu.get_tensor_model_parallel_rank()``.
This should "just work" with both Megatron-LM and DeepSpeed's pipeline
parallelism.
Args:
mpu (model parallel unit, optional): The tensor model parallel rank.
If ``mpu=None``, returns 0. Defaults to ``None``.
Returns:
int: the rank
"""
if mpu is None:
# No model parallelism in easy :)
return 0
if hasattr(mpu, 'get_tensor_model_parallel_rank'):
# New Megatron and DeepSpeed convention (post pipeline-parallelism release)
return mpu.get_tensor_model_parallel_rank()
elif hasattr(mpu, 'get_slice_parallel_rank'):
# Some DeepSpeed + pipeline parallelism versions
return mpu.get_slice_parallel_rank()
else:
# Deprecated Megatron and DeepSpeed convention
return mpu.get_model_parallel_rank()
def copy_to_device(item, device, criterion_func):
"""
Return a copy of tensor on specified device.
Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.
Parameters:
item: tensor to copy or (possibly nested) container of tensors to copy.
device: target device
criterion_func: Function to restrict copy operation to items meet criterion
Returns:
None
"""
if criterion_func(item):
return item.to(device)
elif isinstance(item, list):
return [copy_to_device(v, device, criterion_func) for v in item]
elif isinstance(item, tuple):
return tuple([copy_to_device(v, device, criterion_func) for v in item])
elif isinstance(item, dict):
return {k: copy_to_device(v, device, criterion_func) for k, v in item.items()}
else:
return item
def move_to_device(item, device, criterion_func):
"""
Move tensor on to specified device by changing the storage.
Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.
Parameters:
item: tensor to move or (possibly nested) container of tensors to move.
device: target device
criterion_func: Function to restrict move operation to items meet criterion
Returns:
None
"""
if criterion_func(item):
device_copy = item.to(device)
item.data = device_copy.data
return item
elif isinstance(item, list):
return [move_to_device(v, device, criterion_func) for v in item]
elif isinstance(item, tuple):
return tuple([move_to_device(v, device, criterion_func) for v in item])
elif isinstance(item, dict):
return {k: move_to_device(v, device, criterion_func) for k, v in item.items()}
else:
return item
class CheckOverflow(object):
'''Checks for overflow in gradient across parallel process'''
def __init__(self, param_groups=None, mpu=None, zero_reduce_scatter=False, deepspeed=None):
self.mpu = mpu
self.params = [] if param_groups else None
self.zero_reduce_scatter = zero_reduce_scatter
self.deepspeed = deepspeed
self.has_moe_params = False
if param_groups:
for group in param_groups:
for param in group:
self.params.append(param)
if is_moe_param(param):
self.has_moe_params = True
def check_using_norm(self, norm_group, reduce_overflow=True):
# TODO: I don't think reduce_overflow is needed if mpu is None
overflow = -1 in norm_group
overflow_gpu = get_accelerator().FloatTensor([overflow])
if self.has_moe_params:
# In this case, we need to do an all_reduce across
# the expert_parallel_group, so that if there was
# an overflow due to expert weights, we detect it
# Only need to check groups.get_largest_expert_parallel_group()
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=groups._get_max_expert_parallel_group())
if self.mpu is not None:
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.mpu.get_model_parallel_group())
elif reduce_overflow:
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX)
dist.barrier()
overflow = overflow_gpu[0].item()
return bool(overflow)
def check(self, param_groups=None):
params = []
has_moe_params = False
if param_groups is None:
params = self.params
has_moe_params = self.has_moe_params
else:
assert param_groups is not None, \
"self.params and param_groups both cannot be none"
for group in param_groups:
for param in group:
params.append(param)
if is_moe_param(param):
has_moe_params = True
return self.has_overflow(params, has_moe_params=has_moe_params)
# `params` is a list / generator of torch.Variable
def has_overflow_serial(self, params):
for i, p in enumerate(params):
if p.grad is not None and self._has_inf_or_nan(p.grad.data, i):
return True
return False
def has_overflow(self, params, has_moe_params=None):
if has_moe_params is None:
has_moe_params = self.has_moe_params
overflow = self.has_overflow_serial(params)
# Since each model parallel GPU carries only part of the model,
# make sure overflow flag is synced across all the model parallel GPUs
overflow_gpu = get_accelerator().ByteTensor([overflow])
# deepspeeed.comm.all_reduce(overflow_gpu,
# op=deepspeed.comm.ReduceOp.MAX,
# group=mpu.get_model_parallel_group())
if has_moe_params:
# All reduce this across expert_parallel_group, so that if an expert
# overflows, we detect it here
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=groups._get_max_expert_parallel_group())
if self.zero_reduce_scatter:
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=dist.get_world_group())
elif self.mpu is not None:
if self.deepspeed is not None:
using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
if (using_pipeline and self.deepspeed.pipeline_enable_backward_allreduce is False) or (
not using_pipeline and self.deepspeed.enable_backward_allreduce is False):
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.mpu.get_data_parallel_group())
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.mpu.get_model_parallel_group())
elif self.deepspeed is not None and self.deepspeed.enable_backward_allreduce is False:
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=dist.get_world_group())
overflow = overflow_gpu[0].item()
return bool(overflow)
# `x` is a torch.Tensor
@staticmethod
def _has_inf_or_nan(x, i):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
def _handle_overflow(cpu_sum, x, i):
import math
rank = dist.get_rank()
if rank == 0:
t_i = -1
for v_i, v in enumerate(x.data.contiguous().view(-1)):
if not math.isfinite(float(v)):
t_i = v_i
break
logger.info(f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}")
def get_global_norm(norm_list):
""" Compute total from a list of norms
"""
total_norm = 0.0
for norm in norm_list:
total_norm += norm**2.0
# logger.info(f'norm_list = {norm_list} global = {sqrt(total_norm)}')
return sqrt(total_norm)
def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None):
"""Clips gradient norm of an iterable of parameters.
This has been adapted from Nvidia megatron. We add norm averaging
to consider MoE params when calculating norm as they will result
in different norms across different ranks.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
# Take max across all GPUs.
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0
for p in parameters:
if mpu is not None:
if (mpu.get_model_parallel_rank() == 0) or is_model_parallel_parameter(p):
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item()**norm_type
else:
param_norm = p.grad.data.float().norm(norm_type)
total_norm += param_norm.item()**norm_type
# Sum across all model parallel GPUs.
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
# Need to average total_norm across different GPUs due to the presence of moe params
pg = groups._get_data_parallel_group()
scaled_norm = total_norm * 1.0 / float(dist.get_world_size(group=pg))
scaled_norm_tensor = get_accelerator().FloatTensor([float(scaled_norm)])
dist.all_reduce(scaled_norm_tensor, group=pg)
total_norm = scaled_norm_tensor.item()
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm
def get_grad_norm(parameters, norm_type=2, mpu=None):
"""Get grad norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place. Taken from Nvidia Megatron.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
# Take max across all GPUs.
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0.
tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu)
for p in parameters:
# Pipeline parallelism may replicate parameters. Avoid multi-counting.
if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
continue
# Filter to avoid over-counting replicated tensors from tensor
# model parallelism
if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p):
continue
param_norm = p.grad.data.float().norm(norm_type)
total_norm += param_norm.item()**norm_type
# Sum across all model parallel GPUs.
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
def get_grad_zeros(parameters, mpu=None):
"""Compute the number of grads with zero values.
This is adapted from get_grad_norm
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
Returns:
Total number of params with zero values (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
total_zeros = 0.
tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu)
for p in parameters:
# Pipeline parallelism may replicate parameters. Avoid multi-counting.
if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
continue
# Filter to avoid over-counting replicated tensors from tensor
# model parallelism
if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p):
continue
count_zeros = p.grad.numel() - torch.count_nonzero(p.grad)
total_zeros += count_zeros.item()
# Sum across all model parallel GPUs.
total_zeros_cuda = get_accelerator().FloatTensor([float(total_zeros)])
if mpu is not None:
dist.all_reduce(total_zeros_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group())
total_zeros = total_zeros_cuda[0].item()
return total_zeros
def get_weight_norm(parameters, norm_type=2, mpu=None):
"""Get norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place. Taken from Nvidia Megatron.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.data.abs().max() for p in parameters)
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
# Take max across all GPUs.
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0.
tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu)
for p in parameters:
# Pipeline parallelism may replicate parameters. Avoid multi-counting.
if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
continue
# Filter to avoid over-counting replicated tensors from tensor
# model parallelism
if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p):
continue
param_norm = p.data.float().norm(norm_type)
total_norm += param_norm**norm_type
# Sum across all model parallel GPUs.
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
def prefix_sum_inc(weights):
""" Compute an inclusive prefix sum.
Example:
>>> prefix_sum_inc([3,4,5])
[3, 7, 12]
"""
weights_ = [w for w in weights]
for x in range(1, len(weights_)):
weights_[x] += weights_[x - 1]
return weights_
def partition_uniform(num_items, num_parts):
parts = [0] * (num_parts + 1)
# First check for the trivial edge case
if num_items <= num_parts:
for p in range(num_parts + 1):
parts[p] = min(p, num_items)
return parts
chunksize = floor(num_items / num_parts)
for p in range(num_parts):
parts[p] = min(chunksize * p, num_items)
parts[num_parts] = num_items
return parts
def _lprobe(weights, num_parts, bottleneck):
num_items = len(weights)
total_weight = weights[-1]
# initialize partitioning
parts = [0] * (num_parts + 1)
for p in range(1, num_parts + 1):
parts[p] = num_items
bsum = bottleneck # running sum of target weight for pth partition
chunksize = num_items // num_parts
step = chunksize
for p in range(1, num_parts):
# Jump to the next bucket
while (step < num_items) and (weights[step] < bsum):
step += chunksize
# Find the end index of partition p
parts[p] = bisect_left(weights, bsum, lo=step - chunksize, hi=min(step, num_items))
# Nothing more to partition, return early
if parts[p] == num_items:
# See if the current partition is overweight.
part_size = weights[-1] - weights[parts[p - 1]]
return parts, part_size < bottleneck
# Next partition target
bsum = weights[parts[p] - 1] + bottleneck
return parts, bsum >= total_weight
def _rb_partition_balanced(weights, num_parts, eps):
total_weight = weights[-1]
lower = total_weight / num_parts # best case heaviest partition
upper = total_weight # worst case heaviest partition
# Do a binary search for the best partitioning
while upper > lower + eps:
mid = lower + ((upper - lower) / 2)
parts, success = _lprobe(weights, num_parts, mid)
if success:
upper = mid
else:
lower = mid + eps
return upper
def partition_balanced(weights, num_parts, eps=1e-3):
num_items = len(weights)
# First check for the trivial edge case
if num_items <= num_parts:
return partition_uniform(num_items, num_parts)
weights_ = prefix_sum_inc(weights)
# Find the smallest bottleneck (weight of heaviest partition)
bottleneck = _rb_partition_balanced(weights_, num_parts, eps=eps)
# Now compute that partitioning
parts, success = _lprobe(weights_, num_parts, bottleneck)
assert success
return parts
class PartitionedTensor:
def __init__(self, tensor, group, partition_meta=None):
super().__init__()
self.group = group
self.num_parts = dist.get_world_size(group=self.group)
self.rank = dist.get_rank(group=self.group)
self.orig_size = list(tensor.size())
self.orig_device = tensor.device
self.local_data, self.partition = self._partition_tensor(tensor)
@classmethod
def from_meta(cls, meta, local_part, group, device=get_accelerator().device_name()):
assert meta.dtype == torch.long
dummy = torch.ones(dist.get_world_size(group=group))
part_obj = cls(tensor=dummy, group=group)
meta = meta.tolist()
# [N, list0, ..., listN-1]
part_obj.orig_size = meta[1:(1 + meta[0])]
meta = meta[1 + meta[0]:]
part_obj.orig_device = device
part_obj.local_data = local_part.detach()
part_obj.group = group
# Partition is encoded like the rowptr of a CSR matrix:
# [num_parts, rank, 0, part_1, ..., part_num_parts]
# TODO: support shuffle between different partition granularities
assert part_obj.num_parts == meta[0]
assert part_obj.rank == meta[1]
part_obj.partition = meta[2:] # length num_parts+1
return part_obj
def _partition_tensor(self, tensor):
partition = partition_uniform(num_items=tensor.numel(), num_parts=self.num_parts)
start = partition[self.rank]
length = partition[self.rank + 1] - start
tensor_part = tensor.detach().contiguous().view(-1).narrow(0, start=start, length=length).clone()
return tensor_part, partition
def full(self, device=None):
if device is None:
device = self.orig_device
# Allocate the full tensor as a flat buffer.
full_numel = prod(self.full_size())
flat_tensor = torch.zeros([full_numel], dtype=self.local_data.dtype, device=device)
# Prepare all-gather buffer
partition_tensors = []
for part_id in range(self.num_parts):
part_size = self.partition[part_id + 1] - self.partition[part_id]
buf = flat_tensor.narrow(0, start=self.partition[part_id], length=part_size)
if part_id == self.rank:
buf.copy_(self.local_data)
partition_tensors.append(buf)
# Collect the full tensor
dist.all_gather(partition_tensors, partition_tensors[self.rank], group=self.group)
for i in range(len(partition_tensors)):
partition_tensors[i].data = torch.zeros(1)
partition_tensors[i] = None
return flat_tensor.view(self.full_size()).clone().detach()
def to_meta(self):
"""Returns a torch.LongTensor that encodes partitioning information.
Can be used along with ``data()`` to serialize a ``PartitionedTensor`` for
communication.
Returns:
torch.LongTensor: a tensor encoding the meta-information for the partitioning
"""
meta = []
meta.append(len(self.orig_size))
meta += list(self.orig_size)
meta.append(self.num_parts)
meta.append(self.rank)
meta += self.partition
return torch.LongTensor(data=meta).to(self.orig_device)
def data(self):
return self.local_data
def local_size(self):
return self.local_data.size()
def full_size(self):
return self.orig_size
mem_alloced = 0
mem_cached = 0
def memory_status(msg, print_rank=-1, reset_max=False):
global mem_alloced, mem_cached
rank = dist.get_rank()
if print_rank != -1 and rank != print_rank:
return
get_accelerator().synchronize()
if reset_max:
get_accelerator().reset_max_memory_cached()
get_accelerator().reset_max_memory_allocated()
new_alloced = get_accelerator().memory_allocated()
new_cached = get_accelerator().memory_cached()
delta_alloced = new_alloced - mem_alloced
delta_cached = new_cached - mem_cached
mem_cached = new_cached
mem_alloced = new_alloced
max_alloced = get_accelerator().max_memory_allocated()
max_cached = get_accelerator().max_memory_cached()
# convert to GB for printing
new_alloced /= 1024**3
new_cached /= 1024**3
delta_alloced /= 1024**3
delta_cached /= 1024**3
max_alloced /= 1024**3
max_cached /= 1024**3
print(
f'RANK={rank} MEMSTATS', msg, f'device={get_accelerator().current_device_name()} '
f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) '
f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)')
def get_ma_status():
if dist.is_initialized() and not dist.get_rank() == 0:
return 0
return get_accelerator().memory_allocated()
def empty_cache():
get_accelerator().empty_cache()
get_accelerator().reset_peak_memory_stats()
def see_memory_usage(message, force=False):
if not force:
return
if dist.is_initialized() and not dist.get_rank() == 0:
return
# python doesn't do real-time garbage collection so do it explicitly to get the correct RAM reports
gc.collect()
# Print message except when distributed but not rank 0
logger.info(message)
logger.info(f"MA {round(get_accelerator().memory_allocated() / (1024 * 1024 * 1024),2 )} GB \
Max_MA {round(get_accelerator().max_memory_allocated() / (1024 * 1024 * 1024),2)} GB \
CA {round(torch_memory_reserved() / (1024 * 1024 * 1024),2)} GB \
Max_CA {round(torch_max_memory_reserved() / (1024 * 1024 * 1024))} GB ")
vm_stats = psutil.virtual_memory()
used_GB = round(((vm_stats.total - vm_stats.available) / (1024**3)), 2)
logger.info(f'CPU Virtual Memory: used = {used_GB} GB, percent = {vm_stats.percent}%')
# get the peak memory to report correct data, so reset the counter for the next call
get_accelerator().reset_peak_memory_stats()
def call_to_str(base, *args, **kwargs):
"""Construct a string representation of a call.
Args:
base (str): name of the call
args (tuple, optional): args to ``base``
kwargs (dict, optional): kwargs supplied to ``base``
Returns:
str: A string representation of base(*args, **kwargs)
"""
name = f'{base}('
if args:
name += ', '.join(repr(arg) for arg in args)
if kwargs:
name += ', '
if kwargs:
name += ', '.join(f'{key}={repr(arg)}' for key, arg in kwargs.items())
name += ')'
return name
def get_only_unique_item(items):
item_set = set(items)
if len(item_set) != 1:
raise RuntimeError(f"expected there to be only one unique element in {items}")
unique_item, = item_set
return unique_item
def clip_gradients(parameters, max_norm=1.0, global_grad_norm=None, mpu=None, eps=1e-6):
"""Clip the gradient of a list of parameters.
Args:
parameters: List of parameters whose .grad will be clipped.
global_grad_norm (float, optional): Precomputed gradient norm. Defaults to None.
mpu (optional): model parallelism unit. Defaults to None.
eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6
Returns:
float: the global gradient norm
"""
if global_grad_norm is None:
global_grad_norm = get_grad_norm(parameters, mpu=mpu)
clip_coef = max_norm / (global_grad_norm + eps)
if clip_coef < 1:
for p in parameters:
p.grad.detach().mul_(clip_coef)
return global_grad_norm
def get_global_norm_of_tensors(input_tensors, norm_type=2, mpu=None):
"""Get norm of an iterable of tensors.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Taken from Nvidia Megatron.
Arguments:
input_tensors (Iterable[Tensor]): an iterable of Tensors will have norm computed
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the tensors (viewed as a single vector).
"""
assert isinstance(input_tensors, Iterable), f'expected Iterable type not {type(input_tensors)}'
assert all([torch.is_tensor(t) for t in input_tensors]), f'expected list of only tensors'
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(t.data.abs().max() for t in input_tensors)
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = sum([t.data.float().norm(norm_type).item()**norm_type for t in input_tensors])
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
if mpu is not None:
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group())
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
def clip_tensors_by_global_norm(input_tensors, max_norm=1.0, global_norm=None, mpu=None, eps=1e-6):
"""Clip list of tensors by global norm.
Args:
input_tensors: List of tensors to be clipped
global_norm (float, optional): Precomputed norm. Defaults to None.
mpu (optional): model parallelism unit. Defaults to None.
eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6
Returns:
float: the global norm
"""
if global_norm is None:
global_norm = get_global_norm_of_tensors(input_tensors, mpu=mpu)
clip_coef = max_norm / (global_norm + eps)
if clip_coef < 1:
for t in input_tensors:
t.detach().mul_(clip_coef)
return global_norm
def align_dense_tensors(tensor_list, alignment):
num_elements = sum(t.numel() for t in tensor_list)
remaining = num_elements % alignment
if remaining:
elements_to_add = alignment - remaining
pad_tensor = torch.zeros(elements_to_add, device=tensor_list[0].device, dtype=tensor_list[0].dtype)
padded_tensor_list = tensor_list + [pad_tensor]
else:
padded_tensor_list = tensor_list
return padded_tensor_list
def all_gather_dp_groups(partitioned_param_groups, dp_process_group, start_alignment_factor, allgather_bucket_size):
for group_id, partitioned_params in enumerate(partitioned_param_groups):
# Sequential AllGather Best of both worlds
partition_id = dist.get_rank(group=dp_process_group[group_id])
dp_world_size = dist.get_world_size(group=dp_process_group[group_id])
num_shards = max(1, partitioned_params[partition_id].numel() * dp_world_size // allgather_bucket_size)
shard_size = partitioned_params[partition_id].numel() // num_shards
# Enforce nccl/rccl alignment of start location of each shard
shard_size = shard_size - (shard_size % start_alignment_factor)
num_elements = shard_size
assert shard_size * num_shards <= partitioned_params[partition_id].numel()
for shard_id in range(num_shards):
if shard_id == (num_shards - 1):
num_elements = partitioned_params[partition_id].numel() - shard_id * shard_size
shard_list = []
for dp_id in range(dp_world_size):
curr_shard = partitioned_params[dp_id].narrow(0, shard_id * shard_size, num_elements).detach()
shard_list.append(curr_shard)
dist.all_gather(shard_list, shard_list[partition_id], dp_process_group[group_id])
class TLinear(torch.nn.Linear):
def __init__(self, orig_layer, name=""):
self.name = name
super().__init__(orig_layer.weight.shape[1], orig_layer.weight.shape[0], bias=(orig_layer.bias is not None))
self.weight.data = transpose(orig_layer.weight.data)
self.bias = orig_layer.bias
self._fwd_func = self._fwd_bias_add if self.bias is not None else self._fwd
def _fwd(self, input):
return F.linear(input, self.weight)
def _fwd_bias_add(self, input):
return F.linear(input, self.weight, bias=self.bias)
def forward(self, input):
return self._fwd_func(input)
def get_inactive_params(param_list):
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
return [param for param in param_list if (hasattr(param, 'ds_id') and \
param.ds_status == ZeroParamStatus.NOT_AVAILABLE)] | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/utils.py | utils.py |
# DeepSpeed Team
import torch
from deepspeed.utils import log_dist
import numpy as np
import logging
class Eigenvalue(object):
def __init__(self,
verbose=False,
max_iter=100,
tol=1e-2,
stability=0,
gas_boundary_resolution=1,
layer_name='',
layer_num=0):
super().__init__()
self.verbose = verbose
self.max_iter = max_iter
self.tol = tol
self.stability = stability
self.gas_boundary_resolution = gas_boundary_resolution
self.layer_name = layer_name
self.layer_num = layer_num
assert len(self.layer_name) > 0 and layer_num > 0
log_dist(
f'enabled eigenvalue with verbose={verbose}, max_iter={max_iter}, tol={tol}, stability={stability}, gas_boundary_resolution={gas_boundary_resolution}, layer_name={layer_name}, layer_num={layer_num}',
ranks=[0])
# Replace all nan/pos-inf/neg-inf to zero
# TODO: Pytorch new version may add this function, replace this one by then.
def nan_to_num(self, x):
device = x.device
x = x.cpu().numpy()
x = np.nan_to_num(x=x, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
return torch.from_numpy(x).to(device)
def normalize(self, v):
norm_squared = self.inner_product(v, v)
norm = norm_squared**0.5 + self.stability
normalized_vectors = [vector / norm for vector in v]
normalized_vectors = [self.nan_to_num(vector) for vector in normalized_vectors]
return normalized_vectors
def inner_product(self, xs, ys):
return sum([torch.sum(x * y) for (x, y) in zip(xs, ys)])
def get_layers(self, module):
scope_names = self.layer_name.split('.')
assert len(scope_names) > 0
m = module
for name in scope_names:
assert hasattr(m, name), "layer_name configuration is invalid."
m = getattr(m, name)
return m
def compute_eigenvalue(self, module, device=None, scale=1.0):
block_eigenvalue = []
param_keys = []
layers = self.get_layers(module)
for block in range(self.layer_num):
model_block = layers[block]
# We found this randn() has obvious accuracy impact in some cases, save/recover random state here.
rng_state = torch.random.get_rng_state()
if device is None:
v = [
torch.randn(p.size()) for p in model_block.parameters()
if p.grad is not None and p.grad.grad_fn is not None
]
else:
v = [
torch.randn(p.size(), device=device) for p in model_block.parameters()
if p.grad is not None and p.grad.grad_fn is not None
]
torch.random.set_rng_state(rng_state)
grads = [
param.grad for param in model_block.parameters()
if param.grad is not None and param.grad.grad_fn is not None
]
params = [
param for param in model_block.parameters()
if param.grad is not None and param.grad.grad_fn is not None
]
layer_keys = [id(p) for p in model_block.parameters()]
param_keys.append(layer_keys)
v = self.normalize(v)
# Disable eigenvalue if the model doesn't support second order gradients computation,
# e.g. when enabling DS transformer kernel.
if len(grads) == 0 or len(params) == 0:
log_dist(f'The model does NOT support eigenvalue computation.', ranks=[0], level=logging.WARNING)
return []
i = 0
eigenvalue_current, eigenvalue_previous = 1., 0.
while (i < self.max_iter) and abs(eigenvalue_current) > 0 and (abs(
(eigenvalue_current - eigenvalue_previous) / eigenvalue_current) >=
self.tol): # test convergence criteria
eigenvalue_previous = eigenvalue_current
Hv = torch.autograd.grad(grads, params, grad_outputs=v, only_inputs=True, retain_graph=True)
#Hv = [hv.float() for hv in Hv]
Hv = [self.nan_to_num(hv).float() for hv in Hv]
eigenvalue_current = self.inner_product(Hv, v).item()
v = self.normalize(Hv)
v = [x / scale for x in v]
i += 1
eigenvalue_current *= scale
block_eigenvalue.append(eigenvalue_current)
if self.verbose:
log_dist(f'block: {block}, power iteration: {i}, eigenvalue: {eigenvalue_current}', ranks=[0])
block_eigenvalue = self.post_process(block_eigenvalue)
if self.verbose:
log_dist(f'post processed block_eigenvalue: {block_eigenvalue}', ranks=[0])
# {param_id: (eigenvalue, layer_id)}
ev_dict = {}
for i, (layer_keys, value) in enumerate(zip(param_keys, block_eigenvalue)):
ev_dict.update(dict.fromkeys(layer_keys, (value, i)))
return ev_dict
# 1. Map all eigenvalues to [0, 1.0].
# 2. Some layers can't generate valid eigenvalues on fp16 precision, use 1.0 instead.
def post_process(self, value_list):
max_value = abs(max(value_list, key=abs))
return [abs(v) / max_value if v != 0.0 else 1.0 for v in value_list] | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/eigenvalue.py | eigenvalue.py |
# DeepSpeed Team
from collections import OrderedDict
import torch
import sys
import os
from deepspeed import comm as dist
from deepspeed.runtime.constants import PIPE_REPLICATED
from deepspeed.ops.op_builder import UtilsBuilder
from deepspeed.runtime import ZeROOptimizer
from packaging import version as pkg_version
from deepspeed.git_version_info import version
from deepspeed.runtime.utils import (get_global_norm_of_tensors, clip_tensors_by_global_norm, DummyOptim,
align_dense_tensors, all_gather_dp_groups, bwc_tensor_model_parallel_rank,
is_model_parallel_parameter, see_memory_usage)
from deepspeed.utils import link_hp_params, fragment_address
from deepspeed.checkpoint import enable_universal_checkpoint
from deepspeed.checkpoint.constants import (DS_VERSION, PARTITION_COUNT, BASE_OPTIMIZER_STATE,
SINGLE_PARTITION_OF_FP32_GROUPS, CLIP_GRAD, GROUP_PADDINGS,
PARAM_SLICE_MAPPINGS)
setattr(sys.modules[__name__], 'fragment_address', fragment_address)
class BF16_Optimizer(ZeROOptimizer):
def __init__(self,
init_optimizer,
param_names,
mpu=None,
clip_grad=0.0,
norm_type=2,
allgather_bucket_size=5000000000,
dp_process_group=None,
timers=None):
super().__init__()
see_memory_usage('begin bf16_optimizer', force=True)
self.timers = timers
self.optimizer = init_optimizer
self.param_names = param_names
self.using_real_optimizer = not isinstance(self.optimizer, DummyOptim)
self.clip_grad = clip_grad
self.norm_type = norm_type
self.mpu = mpu
self.allgather_bucket_size = int(allgather_bucket_size)
self.dp_process_group = dp_process_group
self.dp_rank = dist.get_rank(group=self.dp_process_group)
self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))]
# Load pre-built or JIT compile (un)flatten ops
util_ops = UtilsBuilder().load()
self.flatten = util_ops.flatten
self.unflatten = util_ops.unflatten
#align nccl all-gather send buffers to 4-bye boundary
self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2
# Build BF16/FP32 groups
self.bf16_groups = []
self.bf16_groups_flat = []
self.bf16_partitioned_groups = []
self.fp32_groups_flat_partition = []
# Maintain different fp32 gradients views for convenience
self.fp32_groups_gradients = []
self.fp32_groups_gradient_dict = {}
self.fp32_groups_gradients_flat = []
self.fp32_groups_actual_gradients_flat = []
self.fp32_groups_gradient_flat_partition = []
self.fp32_groups_has_gradients = []
self.step_count = 0
self.group_paddings = []
if self.using_real_optimizer:
self._setup_for_real_optimizer()
see_memory_usage('end bf16_optimizer', force=True)
def _setup_for_real_optimizer(self):
dp_world_size = dist.get_world_size(group=self.dp_process_group)
self.partition_count = [dp_world_size for i in range(len(self.optimizer.param_groups))]
for i, param_group in enumerate(self.optimizer.param_groups):
see_memory_usage(f'before initializing group {i}', force=True)
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
# grab the original list
trainable_parameters = [param for param in param_group['params'] if param.requires_grad]
self.bf16_groups.append(trainable_parameters)
# create flat bf16 params
self.bf16_groups_flat.append(
self._flatten_dense_tensors_aligned(self.bf16_groups[i],
self.nccl_start_alignment_factor * dp_world_size))
# Make bf16 params point to flat tensor storage
self._update_storage_to_flattened_tensor(tensor_list=self.bf16_groups[i],
flat_tensor=self.bf16_groups_flat[i])
# divide flat weights into equal sized partitions
partition_size = self.bf16_groups_flat[i].numel() // dp_world_size
bf16_dp_partitions = [
self.bf16_groups_flat[i].narrow(0, dp_index * partition_size, partition_size)
for dp_index in range(dp_world_size)
]
self.bf16_partitioned_groups.append(bf16_dp_partitions)
# create fp32 params partition
self.fp32_groups_flat_partition.append(bf16_dp_partitions[partition_id].clone().float().detach())
self.fp32_groups_flat_partition[i].requires_grad = True
num_elem_list = [t.numel() for t in self.bf16_groups[i]]
# create fp32 gradients
self.fp32_groups_gradients_flat.append(torch.zeros_like(self.bf16_groups_flat[i], dtype=torch.float32))
# track individual fp32 gradients for entire model
fp32_gradients = self._split_flat_tensor(flat_tensor=self.fp32_groups_gradients_flat[i],
num_elem_list=num_elem_list)
self.fp32_groups_gradients.append(fp32_gradients)
self.fp32_groups_gradient_dict[i] = fp32_gradients
# flat tensor corresponding to actual fp32 gradients (i.e., minus alignment padding)
length_without_padding = sum(num_elem_list)
self.fp32_groups_actual_gradients_flat.append(
torch.narrow(self.fp32_groups_gradients_flat[i], 0, 0, length_without_padding))
# flat tensor corresponding to gradient partition
self.fp32_groups_gradient_flat_partition.append(
torch.narrow(self.fp32_groups_gradients_flat[i], 0, partition_id * partition_size, partition_size))
# track fp32 gradient updates
self.fp32_groups_has_gradients.append([False] * len(self.bf16_groups[i]))
# Record padding required for alignment
if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1:
padding = self.bf16_groups_flat[i].numel() - length_without_padding
else:
padding = 0
self.group_paddings.append(padding)
# update optimizer param groups to reference fp32 params partition
param_group['params'] = [self.fp32_groups_flat_partition[i]]
see_memory_usage(f'after initializing group {i}', force=True)
see_memory_usage('before initialize_optimizer', force=True)
self.initialize_optimizer_states()
see_memory_usage('end initialize_optimizer', force=True)
# Need optimizer states initialized before linking lp to optimizer state
self._link_all_hp_params()
self._enable_universal_checkpoint()
self._param_slice_mappings = self._create_param_mapping()
def _enable_universal_checkpoint(self):
for lp_param_group in self.bf16_groups:
enable_universal_checkpoint(param_list=lp_param_group)
def _create_param_mapping(self):
param_mapping = []
for i, _ in enumerate(self.optimizer.param_groups):
param_mapping_per_group = OrderedDict()
for lp in self.bf16_groups[i]:
if lp._hp_mapping is not None:
lp_name = self.param_names[lp]
param_mapping_per_group[lp_name] = lp._hp_mapping.get_hp_fragment_address()
param_mapping.append(param_mapping_per_group)
return param_mapping
def _link_all_hp_params(self):
dp_world_size = dist.get_world_size(group=self.dp_process_group)
for i, _ in enumerate(self.optimizer.param_groups):
# Link bf16 and fp32 params in partition
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
partition_size = self.bf16_groups_flat[i].numel() // dp_world_size
flat_hp_partition = self.fp32_groups_flat_partition[i]
link_hp_params(lp_param_list=self.bf16_groups[i],
flat_hp_partition=flat_hp_partition,
gradient_dict=self.fp32_groups_gradient_dict,
offload_gradient_dict=None,
use_offload=False,
param_group_index=i,
partition_start=partition_id * partition_size,
partition_size=partition_size,
partition_optimizer_state=self.optimizer.state[flat_hp_partition],
dp_group=self.real_dp_process_group[i])
def initialize_optimizer_states(self):
"""Take an optimizer step with zero-valued gradients to allocate internal
optimizer state.
This helps prevent memory fragmentation by allocating optimizer state at the
beginning of training instead of after activations have been allocated.
"""
for param_partition, grad_partition in zip(self.fp32_groups_flat_partition,
self.fp32_groups_gradient_flat_partition):
param_partition.grad = grad_partition
self.optimizer.step()
self.clear_hp_grads()
def _split_flat_tensor(self, flat_tensor, num_elem_list):
assert sum(num_elem_list) <= flat_tensor.numel()
tensor_list = []
offset = 0
for num_elem in num_elem_list:
dense_tensor = torch.narrow(flat_tensor, 0, offset, num_elem)
tensor_list.append(dense_tensor)
offset += num_elem
return tensor_list
def _update_storage_to_flattened_tensor(self, tensor_list, flat_tensor):
updated_params = self.unflatten(flat_tensor, tensor_list)
for p, q in zip(tensor_list, updated_params):
p.data = q.data
def _flatten_dense_tensors_aligned(self, tensor_list, alignment):
return self.flatten(align_dense_tensors(tensor_list, alignment))
@torch.no_grad()
def step(self, closure=None):
if closure is not None:
raise NotImplementedError(f'{self.__class__} does not support closure.')
all_groups_norm = get_global_norm_of_tensors(input_tensors=self.get_grads_for_norm(),
mpu=self.mpu,
norm_type=self.norm_type)
self._global_grad_norm = all_groups_norm
assert all_groups_norm > 0.
if self.clip_grad > 0.:
clip_tensors_by_global_norm(input_tensors=self.get_grads_for_norm(for_clipping=True),
max_norm=self.clip_grad,
global_norm=all_groups_norm,
mpu=self.mpu)
self.optimizer.step()
self.update_lp_params()
self.clear_hp_grads()
self.step_count += 1
def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwargs):
"""Perform a backward pass and copy the low-precision gradients to the
high-precision copy.
We copy/accumulate to the high-precision grads now to prevent accumulating in the
bf16 grads after successive backward() calls (i.e., grad accumulation steps > 1)
The low-precision grads are deallocated during this procedure.
"""
self.clear_lp_grads()
loss.backward(**bwd_kwargs)
if update_hp_grads:
self.update_hp_grads(clear_lp_grads=clear_lp_grads)
@torch.no_grad()
def update_hp_grads(self, clear_lp_grads=False):
for i, group in enumerate(self.bf16_groups):
for j, lp in enumerate(group):
if lp.grad is None:
continue
hp_grad = self.fp32_groups_gradients[i][j]
assert hp_grad is not None, \
f'high precision param has no gradient, lp param_id = {id(lp)} group_info = [{i}][{j}]'
hp_grad.data.add_(lp.grad.data.to(hp_grad.dtype).view(hp_grad.shape))
lp._hp_grad = hp_grad
self.fp32_groups_has_gradients[i][j] = True
# clear gradients
if clear_lp_grads:
lp.grad = None
@torch.no_grad()
def get_grads_for_reduction(self):
return self.fp32_groups_gradients_flat
@torch.no_grad()
def get_grads_for_norm(self, for_clipping=False):
grads = []
tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu)
for i, group in enumerate(self.bf16_groups):
for j, lp in enumerate(group):
if not for_clipping:
if hasattr(lp, PIPE_REPLICATED) and lp.ds_pipe_replicated:
continue
if not (tensor_mp_rank == 0 or is_model_parallel_parameter(lp)):
continue
if not self.fp32_groups_has_gradients[i][j]:
continue
grads.append(self.fp32_groups_gradients[i][j])
return grads
@torch.no_grad()
def update_lp_params(self):
for i, (bf16_partitions,
fp32_partition) in enumerate(zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition)):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
bf16_partitions[partition_id].data.copy_(fp32_partition.data)
# print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True)
# if i == 0:
# print_rank_0(f'{fp32_partition[:10]=}', force=True)
all_gather_dp_groups(partitioned_param_groups=self.bf16_partitioned_groups,
dp_process_group=self.real_dp_process_group,
start_alignment_factor=self.nccl_start_alignment_factor,
allgather_bucket_size=self.allgather_bucket_size)
def clear_hp_grads(self):
for flat_gradients in self.fp32_groups_gradients_flat:
flat_gradients.zero_()
for i, group in enumerate(self.fp32_groups_gradients):
self.fp32_groups_has_gradients[i] = [False] * len(group)
def clear_lp_grads(self):
for group in self.bf16_groups:
for param in group:
param.grad = None
def state_dict(self):
state_dict = {}
state_dict[CLIP_GRAD] = self.clip_grad
state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict()
state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = self.fp32_groups_flat_partition
state_dict[GROUP_PADDINGS] = self.group_paddings
state_dict[PARTITION_COUNT] = self.partition_count
state_dict[DS_VERSION] = version
state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings
return state_dict
# Restore base optimizer fp32 weights bfloat16 weights
def _restore_from_bit16_weights(self):
for i, group in enumerate(self.bf16_groups):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
for bf16_partitions, fp32_partition in zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition):
fp32_partition.data.copy_(bf16_partitions[partition_id].data)
def refresh_fp32_params(self):
self._restore_from_bit16_weights()
def load_state_dict(self,
state_dict_list,
checkpoint_folder,
load_optimizer_states=True,
load_from_fp32_weights=False):
if checkpoint_folder:
self._load_universal_checkpoint(checkpoint_folder, load_optimizer_states, load_from_fp32_weights)
else:
self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights)
def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False):
dp_rank = dist.get_rank(group=self.dp_process_group)
current_rank_sd = state_dict_list[dp_rank]
ckpt_version = current_rank_sd.get(DS_VERSION, False)
assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed"
ckpt_version = pkg_version.parse(ckpt_version)
self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad)
if load_optimizer_states:
self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE])
if load_from_fp32_weights:
for current, saved in zip(self.fp32_groups_flat_partition,
current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]):
src_tensor = _get_padded_tensor(saved, current.numel())
current.data.copy_(src_tensor.data)
if load_optimizer_states:
self._link_all_hp_params()
def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights):
self._load_hp_checkpoint_state(checkpoint_folder)
@property
def param_groups(self):
"""Forward the wrapped optimizer's parameters."""
return self.optimizer.param_groups
def _load_hp_checkpoint_state(self, checkpoint_dir):
checkpoint_dir = os.path.join(checkpoint_dir, "zero")
tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu)
tp_world_size = self.mpu.get_slice_parallel_world_size()
for i, _ in enumerate(self.optimizer.param_groups):
for lp in self.bf16_groups[i]:
if lp._hp_mapping is not None:
#print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}")
lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank,
tp_world_size)
def _get_padded_tensor(src_tensor, size):
if src_tensor.numel() >= size:
return src_tensor
padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device)
slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel())
slice_tensor.data.copy_(src_tensor.data)
return padded_tensor | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/bf16_optimizer.py | bf16_optimizer.py |
# DeepSpeed Team
"""
Implementation of a compressed sparse tensor. Similar in
functionality to TensorFlow's IndexedSlices implementation.
"""
import torch
class SparseTensor(object):
""" Compressed Sparse Tensor """
def __init__(self, dense_tensor=None):
self.orig_dense_tensor = dense_tensor
self.is_sparse = dense_tensor.is_sparse
if dense_tensor is not None:
if dense_tensor.is_sparse:
dense_tensor = dense_tensor.coalesce()
self.indices = dense_tensor.indices().flatten()
self.values = dense_tensor.values()
else:
result = torch.sum(dense_tensor, dim=1)
self.indices = result.nonzero().flatten()
self.values = dense_tensor[self.indices]
self.dense_size = list(dense_tensor.size())
else:
self.indices = None
self.values = None
self.dense_size = None
def to_coo_tensor(self):
return torch.sparse_coo_tensor(self.indices.unsqueeze(0), self.values, self.dense_size)
@staticmethod
def type():
return "deepspeed.SparseTensor"
def to_dense(self):
it = self.indices.unsqueeze(1)
full_indices = torch.cat([it for _ in range(self.dense_size[1])], dim=1)
return self.values.new_zeros(self.dense_size).scatter_add_(0, full_indices, self.values)
def sparse_size(self):
index_size = list(self.indices.size())
index_size = index_size[0]
value_size = list(self.values.size())
value_size = value_size[0] * value_size[1]
dense_size = self.dense_size[0] * self.dense_size[1]
return index_size + value_size, dense_size
def add(self, b):
assert self.dense_size == b.dense_size
self.indices = torch.cat([self.indices, b.indices])
self.values = torch.cat([self.values, b.values])
def __str__(self):
sparse_size, dense_size = self.sparse_size()
return "DeepSpeed.SparseTensor(indices_size={}, values_size={}, " \
"dense_size={}, device={}, reduction_factor={})".format(
self.indices.size(), self.values.size(), self.dense_size,
self.indices.get_device(), dense_size / sparse_size
)
def __repr__(self):
return self.__str__() | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/sparse_tensor.py | sparse_tensor.py |
# DeepSpeed Team
from deepspeed.runtime.config_utils import get_scalar_param, DeepSpeedConfigObject
#########################################
# DeepSpeed Activation Checkpointing
#########################################
# Activation Checkpointing Allows to save memory by only keeping a select few
#activations for the backpropagation.
ACTIVATION_CHKPT_FORMAT = '''
Activation Checkpointing should be configured as:
"session_params": {
"activation_checkpointing": {
"partitioned_activations": [true|false],
"number_checkpoints": 100,
"contiguous_memory_optimization": [true|false],
"cpu_checkpointing": [true|false]
"profile": [true|false],
"synchronize_checkpoint_boundary": [true|false],
}
}
'''
ACT_CHKPT_PARTITION_ACTIVATIONS = 'partition_activations'
ACT_CHKPT_PARTITION_ACTIVATIONS_DEFAULT = False
ACT_CHKPT_NUMBER_CHECKPOINTS = 'number_checkpoints'
ACT_CHKPT_NUMBER_CHECKPOINTS_DEFAULT = None
ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION = 'contiguous_memory_optimization'
ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION_DEFAULT = False
ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY = 'synchronize_checkpoint_boundary'
ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY_DEFAULT = False
ACT_CHKPT_PROFILE = 'profile'
ACT_CHKPT_PROFILE_DEFAULT = False
ACT_CHKPT_CPU_CHECKPOINTING = 'cpu_checkpointing'
ACT_CHKPT_CPU_CHECKPOINTING_DEFAULT = False
ACT_CHKPT = 'activation_checkpointing'
ACT_CHKPT_DEFAULT = {
ACT_CHKPT_PARTITION_ACTIVATIONS: ACT_CHKPT_PARTITION_ACTIVATIONS_DEFAULT,
ACT_CHKPT_NUMBER_CHECKPOINTS: ACT_CHKPT_NUMBER_CHECKPOINTS_DEFAULT,
ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION: ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION_DEFAULT,
ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY: ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY_DEFAULT,
ACT_CHKPT_PROFILE: ACT_CHKPT_PROFILE_DEFAULT,
ACT_CHKPT_CPU_CHECKPOINTING: ACT_CHKPT_CPU_CHECKPOINTING_DEFAULT
}
class DeepSpeedActivationCheckpointingConfig(DeepSpeedConfigObject):
def __init__(self, param_dict):
super(DeepSpeedActivationCheckpointingConfig, self).__init__()
self.partition_activations = None
self.contiguous_memory_optimization = None
self.cpu_checkpointing = None
self.number_checkpoints = None
self.synchronize_checkpoint_boundary = None
self.profile = None
if ACT_CHKPT in param_dict.keys():
act_chkpt_config_dict = param_dict[ACT_CHKPT]
else:
act_chkpt_config_dict = ACT_CHKPT_DEFAULT
self._initialize(act_chkpt_config_dict)
def _initialize(self, act_chkpt_config_dict):
self.partition_activations = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_PARTITION_ACTIVATIONS,
ACT_CHKPT_PARTITION_ACTIVATIONS_DEFAULT)
self.contiguous_memory_optimization = get_scalar_param(act_chkpt_config_dict,
ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION,
ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION_DEFAULT)
self.cpu_checkpointing = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_CPU_CHECKPOINTING,
ACT_CHKPT_CPU_CHECKPOINTING_DEFAULT)
self.number_checkpoints = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_NUMBER_CHECKPOINTS,
ACT_CHKPT_NUMBER_CHECKPOINTS_DEFAULT)
self.profile = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_PROFILE, ACT_CHKPT_PROFILE_DEFAULT)
self.synchronize_checkpoint_boundary = get_scalar_param(act_chkpt_config_dict,
ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY,
ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY_DEFAULT) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/activation_checkpointing/config.py | config.py |
# DeepSpeed Team
"""
Use to partition the activations stored for backward propagation
Therefore reduces the memory consumption
Also implements CPU checkpointing and contiguous memory checkpointing
Reduces memory consumption and memory fragmentation
Code for rng checkpointing taken from NVIDIA Megatron-LM mpu/random.py
b886b7bb972afe72bac0f5de4f42a4a7bae8ebef
"""
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import copy
import torch
import contextlib
from deepspeed import comm as dist
import mmap
from torch import _C
from deepspeed.runtime.config import DeepSpeedConfig
from deepspeed.utils import logger
from deepspeed.runtime.utils import copy_to_device, move_to_device, see_memory_usage, bwc_tensor_model_parallel_rank
from deepspeed.utils.timer import SynchronizedWallClockTimer as Timers
from deepspeed.accelerator import get_accelerator
# DeepSpeed Checkpointing Enabled or Disabled
deepspeed_checkpointing_enabled = False
# MP parameters
mpu = None
mp_rank = None
mp_size = None
mp_group = None
# Model Parameters
num_layers = None
# Checkpointing buffers
contiguous_data_buffers = []
data_offsets = []
contiguous_size_buffers = []
size_offsets = []
timers = None
# optimization flags
PARTITION_ACTIVATIONS = False
CPU_CHECKPOINT = False
CONTIGUOUS_CHECKPOINTING = False
SYNCHRONIZE = False
PROFILE_TIME = False
# Default name for the model parallel rng tracker.
_MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng'
transport_stream = None
cuda_device = None
def detach_variable(inputs, device=None):
if isinstance(inputs, tuple):
out = []
for inp in inputs:
if not isinstance(inp, torch.Tensor):
out.append(inp)
continue
requires_grad = inp.requires_grad
if device is not None:
x = inp.to(device=device)
else:
x = inp
x = x.detach()
x.requires_grad = requires_grad
out.append(x)
return tuple(out)
else:
raise RuntimeError("Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__)
def _set_cuda_rng_state(new_state, device=-1):
"""Sets the random number generator state of the current GPU.
Arguments:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state) #ignore-cuda
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState):
# older PyTorch
def cb():
with get_accelerator().device(device):
_C._cuda_setRNGState(new_state)
else:
# newer PyTorch
if device == -1:
device = torch.device(get_accelerator().device_name())
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device(get_accelerator().device_name(), device)
def cb():
idx = device.index
if idx is None:
idx = get_accelerator().current_device()
default_generator = get_accelerator().default_generator(idx)
default_generator.set_state(new_state)
get_accelerator().lazy_call(cb)
class CudaRNGStatesTracker:
"""Tracker for the cuda RNG states.
Using the `add` method, a cuda rng state is initialized based on
the input `seed` and is assigned to `name`. Later, by forking the
rng state, we can perform operations and return to our starting
cuda state.
"""
def __init__(self):
# Map from a string name to the cuda rng state.
self.states_ = {}
# Seeds are just for book keeping and ensure no seed is set twice.
self.seeds_ = set()
def reset(self):
"""Set to the initial state (no tracker)."""
self.states_ = {}
self.seeds_ = set()
def get_states(self):
"""Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary."""
return copy.copy(self.states_)
def set_states(self, states):
"""Set the rng states. For efficiency purposes, we do not check
the size of seed for compatibility."""
self.states_ = states
def add(self, name, seed):
"""Track the rng state."""
# Check seed is not already used.
if seed in self.seeds_:
raise Exception('seed {} already exists'.format(seed))
self.seeds_.add(seed)
# Check that state is not already defined.
if name in self.states_:
raise Exception('cuda rng state {} already exists'.format(name))
# Get the current rng state.
orig_rng_state = get_accelerator().get_rng_state()
# Set the new state and store it.
get_accelerator().manual_seed(seed)
self.states_[name] = get_accelerator().get_rng_state()
# Reset rng state to what it was.
_set_cuda_rng_state(orig_rng_state)
@contextlib.contextmanager
def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
"""Fork the cuda rng state, perform operations, and exit with
the original state."""
# Check if we have added the state
if name not in self.states_:
raise Exception('cuda rng state {} is not added'.format(name))
# Store current rng state.
orig_cuda_rng_state = get_accelerator().get_rng_state()
# Set rng state to the desired one
_set_cuda_rng_state(self.states_[name])
# Do the stuff we wanted to do.
try:
yield
finally:
# Update the current rng state for later use.
self.states_[name] = get_accelerator().get_rng_state()
# And set the state to the original state we started with.
_set_cuda_rng_state(orig_cuda_rng_state)
# RNG tracker object.
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
def get_cuda_rng_tracker():
"""Get cuda rng tracker."""
return _CUDA_RNG_STATE_TRACKER
def model_parallel_cuda_manual_seed(seed):
"""Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no get_accelerator().manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model parallel groups. This is used for
example for dropout in the non-model-parallel regions.
model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
"""
global mpu
tp_rank = bwc_tensor_model_parallel_rank(mpu)
# 2718 is just for fun and any POSITIVE value will work.
offset = seed + 2718
model_parallel_seed = offset + tp_rank
# Data parallel gets the original seed.
data_parallel_seed = seed
if dist.get_rank() == 0:
logger.info(
'> initializing model parallel cuda seeds on global rank {}, '
'model parallel rank {}, and data parallel rank {} with '
'model parallel seed: {} and data parallel seed: {}'.format(dist.get_rank(), tp_rank,
mpu.get_data_parallel_rank(),
model_parallel_seed, data_parallel_seed), )
_CUDA_RNG_STATE_TRACKER.reset()
# Set the default state.
get_accelerator().manual_seed(data_parallel_seed)
# and model parallel state.
_CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, model_parallel_seed)
def get_partition_start(item):
global mp_rank, mp_size, mp_group
size = item.numel()
partition_size = size / mp_size
start = partition_size * mp_rank
return int(start)
def get_partition_size(item):
global mp_rank, mp_size, mp_group
size = item.numel()
assert size % mp_size == 0, "Doesn't handle if partition activation if item is not divisible by mp size"
partition_size = size / mp_size
return int(partition_size)
def gather_partitioned_activations(tensors, device=None):
global mp_rank, mp_size, mp_group
assert len(tensors) % 2 == 0, f'Expected even count of tensors, instead got {len(tensors)}'
inputs = []
num_args = int(len(tensors) / 2)
for i in range(num_args):
item = tensors[2 * i]
size = tensors[2 * i + 1]
if not is_activation_to_checkpoint(item):
inputs.append(item)
continue
# don't need to do all_gather if model parallel is not enabled
if mp_group is None or mp_size == 1:
item = item.view(list(size.numpy()))
inputs.append(item)
continue
partition_size = item.numel()
tensor_size = partition_size * mp_size
if device is not None:
flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=device)
else:
flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=item.device)
partitions = []
for i in range(mp_size):
part_i = flat_tensor.narrow(0, partition_size * i, partition_size)
if i == mp_rank:
part_i.copy_(item)
partitions.append(part_i)
dist.all_gather(partitions, partitions[mp_rank], group=mp_group)
input_tensor = flat_tensor.view(list(size.numpy()))
item.data = input_tensor.data
inputs.append(item)
return tuple(inputs)
def extract_tensors(all_objects):
"""
Separate objects in list/tuple into tensors and non-tensors and create a mapping to enable re-aggregation.
The order of tensors and non-tensors is preserved in their respective output groups.
Parameters:
all_objects (list/tuple): Objects containing tensors and non-tensors to be split.
Returns:
tuple: Containing tensors, non-tensors, and bools of whether each position in original list/tuple was a tensor.
"""
tensor_objects = [v for v in all_objects if torch.is_tensor(v)]
non_tensor_objects = [v for v in all_objects if not torch.is_tensor(v)]
tensor_flags = [torch.is_tensor(v) for v in all_objects]
if type(all_objects) is tuple:
return tuple(tensor_objects), tuple(non_tensor_objects), tuple(tensor_flags)
return tensor_objects, non_tensor_objects, tensor_flags
def merge_tensors(tensor_objects, non_tensor_objects, tensor_flags):
"""
Merge two lists (or tuples) of tensors and non-tensors using a mapping of positions in merged list (or tuple).
Parameters:
tensor_objects (list/tuple): Tensors to merge.
non_tensor_objects (list/tuple): Non-tensors to merge.
tensor_flags (list/tuple): Indicates whether each position in output is a tensor.
Returns:
tuple: Merge of tensors and non-tensors
"""
merged_objects = []
tensor_idx = 0
non_tensor_idx = 0
real_tensor_flags = None
# remove the flags that are assigned to the size of the flattened tensors
if PARTITION_ACTIVATIONS:
real_tensor_flags = []
previous_flag = False
for flag in tensor_flags:
if previous_flag:
previous_flag = False
continue
previous_flag = flag
real_tensor_flags.append(flag)
else:
real_tensor_flags = tensor_flags
for is_tensor in real_tensor_flags:
if is_tensor:
merged_objects.append(tensor_objects[tensor_idx])
tensor_idx += 1
else:
merged_objects.append(non_tensor_objects[non_tensor_idx])
non_tensor_idx += 1
return tuple(merged_objects)
def is_activation_to_checkpoint(item):
"""
Is an activation to be checkpointed
"""
global mp_size
return torch.is_tensor(item) and item.is_floating_point() and item.numel() >= mp_size
def partition_activations(args, cpu_checkpoint, contiguous_checkpoint):
global contiguous_data_buffers, data_offsets
inputs = []
num_non_fp_tensors = 0
for arg_index, item in enumerate(args):
if not is_activation_to_checkpoint(item):
inputs.append(item)
num_non_fp_tensors += 1
continue
i = arg_index - num_non_fp_tensors
partition_size = get_partition_size(item)
partition = item.detach().contiguous().view(-1).narrow(0, get_partition_start(item), partition_size).clone()
buffer_device = torch.device('cpu') if cpu_checkpoint else partition.device
if contiguous_checkpoint:
if i >= len(contiguous_data_buffers):
tensor_list = [
torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device)
for _ in range(num_layers)
]
contiguous_data_buffers.append(tensor_list)
data_offsets.append(0)
elif contiguous_data_buffers[i] is None:
tensor_list = [
torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device)
for _ in range(num_layers)
]
contiguous_data_buffers[i] = tensor_list
data_offsets[i] = 0
# Because the 'new_empty' returns uninitialized pages,
# the pages need to be populated during the cudaMemcpy time
# which increases the data copy time. To avoid this, we
# pre-populate these pages by simply writing 0 ahead of
# the actual cudaMemcpy operation time. Due to the
# previously launched GPU kernels, there is a small
# window of time here for CPUs to populate pages asynchronously.
contiguous_data_buffers[i][data_offsets[i]].data[range(
0, contiguous_data_buffers[i][data_offsets[i]].data.shape[0],
int(mmap.PAGESIZE / contiguous_data_buffers[i][data_offsets[i]].data.element_size()))] = 0
contiguous_partition = contiguous_data_buffers[i][data_offsets[i]].data.copy_(partition.data)
data_offsets[i] = data_offsets[i] + 1
inputs.append(contiguous_partition)
else:
partition = partition.cpu() if CPU_CHECKPOINT else partition
inputs.append(partition)
return inputs
def get_partitioned_activations_for_backward(args, inputs, contiguous_checkpoint):
global contiguous_size_buffers, size_offsets
new_args = []
num_non_fp_tensors = 0
for arg_index, (arg, inp) in enumerate(zip(args, inputs)):
size = torch.tensor(arg.size()) if torch.is_tensor(arg) else None
if not is_activation_to_checkpoint(arg):
new_args.append(arg)
new_args.append(size)
num_non_fp_tensors += 1
continue
arg.data = inp.data
new_args.append(arg)
i = arg_index - num_non_fp_tensors
if contiguous_checkpoint:
numel = size.numel()
if i >= len(contiguous_size_buffers):
tmp = torch.tensor(())
contiguous_size_buffers.append(
tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device))
size_offsets.append(0)
elif contiguous_size_buffers[i] is None:
tmp = torch.tensor(())
contiguous_size_buffers[i] = tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device)
size_offsets[i] = 0
contiguous_size = contiguous_size_buffers[i].narrow(0, size_offsets[i], numel).data.copy_(size.data)
contiguous_size = contiguous_size.view_as(size)
size_offsets[i] = size_offsets[i] + numel
new_args.append(contiguous_size)
else:
new_args.append(size)
return new_args
def get_cpu_activations_for_backward(args, inputs):
new_args = []
for i, (arg, inp) in enumerate(zip(args, inputs)):
if not is_activation_to_checkpoint(arg):
new_args.append(arg)
continue
arg.data = inp.data
new_args.append(arg)
return new_args
class CheckpointFunction(torch.autograd.Function):
"""This function is adapted from torch.utils.checkpoint with
two main changes:
1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state` #ignore-cuda
2) the states in the model parallel tracker are also properly
tracked/set/reset.
3) Performance activation partitioning, contiguous memory optimization
4) CPU Checkpointing
5) Profile forward and backward functions
"""
@staticmethod
def forward(ctx, run_function, all_outputs, *args):
global mpu, timers, SYNCHRONIZE, PROFILE_TIME
def save_args_for_backward(*all_args):
tensor_args, non_tensor_args, tensor_flags = extract_tensors(all_objects=all_args)
ctx.deepspeed_saved_tensors = tensor_args
ctx.non_tensor_args = non_tensor_args
ctx.tensor_flags = tensor_flags
if SYNCHRONIZE:
get_accelerator().synchronize()
if timers is None and PROFILE_TIME:
timers = Timers()
if PROFILE_TIME:
timers('forward').start()
ctx.run_function = run_function
global num_layers
global mp_rank, mp_size, mp_group
global contiguous_data_buffers, contiguous_size_buffers
global data_offsets, size_offsets
if mp_rank is None:
if mpu is not None:
if hasattr(mpu, 'get_tensor_model_parallel_rank'):
mp_rank = mpu.get_tensor_model_parallel_rank()
mp_size = mpu.get_tensor_model_parallel_world_size()
mp_group = mpu.get_tensor_model_parallel_group()
else:
mp_rank = mpu.get_model_parallel_rank()
mp_size = mpu.get_model_parallel_world_size()
mp_group = mpu.get_model_parallel_group()
else:
mp_rank = 0
mp_size = 1
mp_group = None
global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset
if cuda_device is None:
see_memory_usage("First Forward Beginning", force=False)
if dist.get_rank() == 0:
logger.info(f"Activation Checkpointing Information")
logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}")
logger.info(
f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers")
logger.info(f"----Synchronization {SYNCHRONIZE}")
logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}")
cuda_device = get_accelerator().current_device_name()
transport_stream = get_accelerator().Stream(device=cuda_device)
if PARTITION_ACTIVATIONS:
inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING)
elif CPU_CHECKPOINT:
inputs = copy_to_device(args, device=torch.device('cpu'), criterion_func=is_activation_to_checkpoint)
# just in case something funky is happening such as reuse of inputs
inputs_cuda = copy_to_device(args, device=cuda_device, criterion_func=is_activation_to_checkpoint)
# Copy the rng states.
ctx.fwd_cpu_rng_state = torch.get_rng_state()
ctx.fwd_cuda_rng_state = get_accelerator().get_rng_state()
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
see_memory_usage("Before running forward on the layer", force=False)
# ctx.save_for_backward(*args)
with torch.no_grad():
outputs = run_function(*inputs_cuda)
see_memory_usage("After running forward on the layer", force=False)
del inputs_cuda
if PARTITION_ACTIVATIONS:
new_args = get_partitioned_activations_for_backward(args, inputs, CONTIGUOUS_CHECKPOINTING)
assert len(new_args) % 2 == 0, f'save_for_backward called with odd number of args, {len(new_args)}'
save_args_for_backward(*new_args)
elif CPU_CHECKPOINT:
new_args = get_cpu_activations_for_backward(args, inputs)
save_args_for_backward(*new_args)
else:
save_args_for_backward(*args)
if PROFILE_TIME:
timers('forward').stop()
timers.log(['forward'])
if SYNCHRONIZE:
get_accelerator().synchronize()
# Tensors returned from forward() may not be differentiable.
if torch.is_tensor(outputs):
non_grad_outputs = [outputs] if not outputs.is_floating_point() else []
else:
non_grad_outputs = [o for o in outputs if torch.is_tensor(o) and not o.is_floating_point()]
ctx.mark_non_differentiable(*non_grad_outputs)
if torch.is_tensor(outputs):
all_outputs += [outputs]
return outputs
else:
all_outputs += outputs
outputs, _, _ = extract_tensors(all_objects=outputs)
return tuple(outputs)
@staticmethod
def backward(ctx, *grads):
global timers
see_memory_usage("In backward", force=False)
# removing pointers to the contiguous buffer memory
# so that they can be garbage collected once the checkpoints
# have been used
if SYNCHRONIZE:
get_accelerator().synchronize()
if PROFILE_TIME:
timers('backward').start()
if CONTIGUOUS_CHECKPOINTING:
global data_offsets, size_offsets
global contiguous_data_buffers, contiguous_size_buffers
for buffers in contiguous_data_buffers:
buffers = []
# frees up all the pointers to the checkpoints except for the ones
# stored by save for backward
contiguous_data_buffers = []
contiguous_size_buffers = []
data_offsets = []
size_offsets = []
see_memory_usage("In backward checkpointing code", force=False)
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError("Checkpointing is not compatible with .grad(), "
"please use .backward() if possible")
global cuda_device, transport_stream, PARTITION_ACTIVATIONS
if PARTITION_ACTIVATIONS:
# with get_accelerator().stream(transport_stream):
inputs = gather_partitioned_activations(ctx.deepspeed_saved_tensors,
device=cuda_device if CPU_CHECKPOINT else None)
detached_inputs = detach_variable(inputs)
elif CPU_CHECKPOINT:
inputs = move_to_device(ctx.deepspeed_saved_tensors, cuda_device, is_activation_to_checkpoint)
detached_inputs = detach_variable(inputs)
else:
inputs = ctx.deepspeed_saved_tensors
detached_inputs = detach_variable(inputs)
# Add non tensor input args
detached_inputs = merge_tensors(tensor_objects=detached_inputs,
non_tensor_objects=ctx.non_tensor_args,
tensor_flags=ctx.tensor_flags)
# Store the current states.
bwd_cpu_rng_state = torch.get_rng_state()
bwd_cuda_rng_state = get_accelerator().get_rng_state()
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
# Set the states to what it used to be before the forward pass.
torch.set_rng_state(ctx.fwd_cpu_rng_state)
_set_cuda_rng_state(ctx.fwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
# if PARTITION_ACTIVATIONS:
# current_stream=get_accelerator().current_stream()
# current_stream.wait_stream(transport_stream)
see_memory_usage("In backward checkpointing code before forward", force=False)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
see_memory_usage("In backward checkpointing code after forward", force=False)
# Set the states back to what it was at the start of this function.
torch.set_rng_state(bwd_cpu_rng_state)
_set_cuda_rng_state(bwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
if isinstance(outputs, torch.Tensor):
outputs = (outputs, )
# Filter out non tensor outputs
outputs, _, _ = extract_tensors(all_objects=outputs)
# Construct arguments to autograd.backward().
# This is usually just outputs and grads, but forward() can return tensors that
# are not differentiable.
output_tensors = []
grad_tensors = []
for out, grad in zip(outputs, grads):
if out.requires_grad:
output_tensors.append(out)
grad_tensors.append(grad)
see_memory_usage("In backward checkpointing code before backward", force=False)
torch.autograd.backward(output_tensors, grad_tensors)
# Force clear our stashed tensors to prevent a memory leak in certain scenarios
ctx.deepspeed_saved_tensors = None
ctx.non_tensor_args = None
ctx.tensor_flags = None
see_memory_usage("After backward checkpointing code after backward", force=False)
if PROFILE_TIME:
timers('backward').stop()
timers.log(['backward'])
if SYNCHRONIZE:
get_accelerator().synchronize()
ret_list = [None, None] # first None for ctx
for inp in detached_inputs:
if torch.is_tensor(inp):
ret_list.append(inp.grad)
else:
ret_list.append(None)
return tuple(ret_list)
def checkpoint(function, *args):
"""Checkpoint a model or part of the model.
This has been directly copied from torch.utils.checkpoint. """
all_outputs = []
CheckpointFunction.apply(function, all_outputs, *args)
if len(all_outputs) == 1:
return all_outputs[0]
else:
return tuple(all_outputs)
def partition_activations_in_checkpoint(partition_activation):
global PARTITION_ACTIVATIONS
PARTITION_ACTIVATIONS = partition_activation
if dist.get_rank() == 0:
logger.info(f"**************Partition Activations {PARTITION_ACTIVATIONS}************")
def set_num_layers(nlayers):
global num_layers
num_layers = nlayers
def reset():
"""Resets memory buffers related to contiguous memory optimizations.
Should be called during eval when multiple forward propagations are
computed without any backward propagation that usually clears these
buffers.
Arguments:
None
Return:
None
"""
if CONTIGUOUS_CHECKPOINTING:
global data_offsets, size_offsets
global contiguous_data_buffers, contiguous_size_buffers
for buffers in contiguous_data_buffers:
buffers = []
# frees up all the pointers to the checkpoints except for the ones
# stored by save for backward
contiguous_data_buffers = []
contiguous_size_buffers = []
data_offsets = []
size_offsets = []
def _configure_using_config_file(config, mpu=None):
global num_layers, PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME
config = DeepSpeedConfig(config, mpu=mpu).activation_checkpointing_config
if dist.get_rank() == 0:
logger.info(config.repr())
PARTITION_ACTIVATIONS = config.partition_activations
CONTIGUOUS_CHECKPOINTING = config.contiguous_memory_optimization
num_layers = config.number_checkpoints
CPU_CHECKPOINT = config.cpu_checkpointing
SYNCHRONIZE = config.synchronize_checkpoint_boundary
PROFILE_TIME = config.profile
def _configure_defaults():
global mpu, num_layers, deepspeed_checkpointing_enabled
global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME
PARTITION_ACTIVATIONS = False
CONTIGUOUS_CHECKPOINTING = False
num_layers = False
CPU_CHECKPOINT = False
SYNCHRONIZE = False
PROFILE_TIME = False
deepspeed_checkpointing_enabled = True
def configure(
mpu_,
deepspeed_config=None,
partition_activations=None,
contiguous_checkpointing=None,
num_checkpoints=None,
checkpoint_in_cpu=None,
synchronize=None,
profile=None,
):
"""Configure DeepSpeed Activation Checkpointing.
Arguments:
mpu_: Optional: An object that implements the following methods
get_model_parallel_rank/group/world_size, and get_data_parallel_rank/group/world_size
deepspeed_config: Optional: DeepSpeed Config json file when provided will be used to
configure DeepSpeed Activation Checkpointing
partition_activations: Optional: Partitions activation checkpoint across model parallel
GPUs when enabled. By default False. Will overwrite deepspeed_config if provided
contiguous_checkpointing: Optional: Copies activation checkpoints to a contiguous memory
buffer. Works only with homogeneous checkpoints when partition_activations is enabled.
Must provide num_checkpoints. By default False. Will overwrite deepspeed_config if
provided
num_checkpoints: Optional: Number of activation checkpoints stored during the forward
propagation of the model. Used to calculate the buffer size for contiguous_checkpointing
Will overwrite deepspeed_config if provided
checkpoint_in_cpu: Optional: Moves the activation checkpoint to CPU. Only works with
partition_activation. Default is false. Will overwrite deepspeed_config if provided
synchronize: Optional: Performs get_accelerator().synchronize() at the beginning and end of
each call to deepspeed.checkpointing.checkpoint for both forward and backward pass.
By default false. Will overwrite deepspeed_config if provided
profile: Optional: Logs the forward and backward time for each
deepspeed.checkpointing.checkpoint invocation. Will overwrite deepspeed_config
if provided
Returns:
None
"""
global mpu, num_layers, deepspeed_checkpointing_enabled
global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME
_configure_defaults()
if mpu_ is not None:
mpu = mpu_
if deepspeed_config is not None:
_configure_using_config_file(deepspeed_config, mpu=mpu)
if partition_activations is not None:
PARTITION_ACTIVATIONS = partition_activations
if contiguous_checkpointing is not None:
CONTIGUOUS_CHECKPOINTING = contiguous_checkpointing
if num_checkpoints is not None:
num_layers = num_checkpoints
if checkpoint_in_cpu is not None:
CPU_CHECKPOINT = checkpoint_in_cpu
if synchronize is not None:
SYNCHRONIZE = synchronize
if profile is not None:
PROFILE_TIME = profile
if CONTIGUOUS_CHECKPOINTING:
assert PARTITION_ACTIVATIONS, "Contiguous Checkpointing is only available with partitioned activations. Set partitioned activations to true in deepspeed config"
if CONTIGUOUS_CHECKPOINTING:
assert num_layers is not None, "Must specify the number of layers with contiguous memory checkpointing"
def is_configured():
"""True if deepspeed activation checkpointing has been configured
by calling deepspeed.checkpointing.configure, else returns false
Arguments:
None
Return:
True of configured, else False
"""
return deepspeed_checkpointing_enabled | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/activation_checkpointing/checkpointing.py | checkpointing.py |
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
from deepspeed.ops.op_builder import AsyncIOBuilder
from deepspeed import comm as dist
from deepspeed.runtime.swap_tensor.constants import *
from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, print_object
from deepspeed.runtime.swap_tensor.async_swapper import AsyncTensorSwapper
from deepspeed.runtime.swap_tensor.utils import get_sized_buffer
from deepspeed.runtime.swap_tensor.optimizer_utils import OptimizerSwapper
class OptimizerSwapOp(object):
def __init__(self, aio_handle, read_op, param_info, allocated_buffers, state_buffers, num_ops):
self.aio_handle = aio_handle
self.read_op = read_op
self.param_info = param_info
self.allocated_buffers = allocated_buffers
self.state_buffers = state_buffers
self.wait_required = True
self.num_ops = num_ops
def is_parameter(self, parameter):
return id(parameter) == self.param_info.param_id
def wait(self):
assert self.wait_required
assert self.aio_handle.wait() == self.num_ops
self.wait_required = False
SYNC_SWAP_IN = 'sync_swap_in'
ASYNC_SWAP_IN = 'async_swap_in'
SYNC_SWAP_OUT = 'sync_swap_out'
ASYNC_SWAP_OUT = 'async_swap_out'
SWAP_IN_STATE_TIMER = 'swap_in_state'
SWAP_OUT_STATE_TIMER = 'swap_out_state'
SWAP_OUT_GRADIENT_TIMER = 'swap_out_gradient'
ASYNC_SWAP_IN_STATE_TIMER = "async_swap_in_state"
ASYNC_SWAP_OUT_STATE_TIMER = 'async_swap_out_state'
class PipelinedOptimizerSwapper(OptimizerSwapper):
def __init__(self, swap_config, aio_config, base_folder, optimizer, largest_numel, device, dtype, timers):
super(PipelinedOptimizerSwapper, self).__init__(swap_config, aio_config, base_folder, optimizer, largest_numel,
device, dtype, timers)
aio_op = AsyncIOBuilder().load()
self.write_aio_handle = aio_op.aio_handle(aio_config[AIO_BLOCK_SIZE], aio_config[AIO_QUEUE_DEPTH],
aio_config[AIO_SINGLE_SUBMIT], aio_config[AIO_OVERLAP_EVENTS],
aio_config[AIO_THREAD_COUNT])
self.read_aio_handle = aio_op.aio_handle(aio_config[AIO_BLOCK_SIZE], aio_config[AIO_QUEUE_DEPTH],
aio_config[AIO_SINGLE_SUBMIT], aio_config[AIO_OVERLAP_EVENTS],
aio_config[AIO_THREAD_COUNT])
# Overlap gradient swap out
self.gradient_swapper = AsyncTensorSwapper(aio_handle=self.write_aio_handle,
numel_alignment=self.numel_alignment,
timers=self.timers)
self.async_swap_in = swap_config.pipeline_read
self.async_swap_out = swap_config.pipeline_write
self.swap_ops = {SYNC_SWAP_IN: None, ASYNC_SWAP_IN: None, SYNC_SWAP_OUT: None, ASYNC_SWAP_OUT: None}
self.print_exclude_list += [
'gradient_swapper', 'read_aio_handle', 'write_aio_handle', 'swap_ops', 'print_exclude_list'
]
if dist.get_rank() == 0:
print_object(obj=self, name='PipelinedOptimizerSwapper', exclude_list=self.print_exclude_list)
def initialize_parameters(self, parameters, src_tensors):
self._initialize_parameters(parameters=parameters, src_tensors=src_tensors, aio_handle=self.write_aio_handle)
def initialize_from_swapped_fp16_params(self, fp16_partitions_info, fp16_num_elems, fp16_pinned_buffers,
fp32_parameters):
self._initialize_from_swapped_fp16_params(aio_handle=self.write_aio_handle,
fp16_partitions_info=fp16_partitions_info,
fp16_num_elems=fp16_num_elems,
fp16_pinned_buffers=fp16_pinned_buffers,
fp32_parameters=fp32_parameters)
def flush_gradients(self):
self._flush_gradient_swapper(self.gradient_swapper)
def swap_in_optimizer_state(self, parameter, async_parameter):
assert parameter is not None
assert self.swap_ops[SYNC_SWAP_IN] is None
self._flush_gradient_swapper(self.gradient_swapper)
self._start_timer(SWAP_IN_STATE_TIMER)
if self.swap_ops[ASYNC_SWAP_IN]:
assert self.swap_ops[ASYNC_SWAP_IN].is_parameter(parameter)
self.swap_ops[SYNC_SWAP_IN] = self.swap_ops[ASYNC_SWAP_IN]
self.swap_ops[ASYNC_SWAP_IN] = None
else:
self.swap_ops[SYNC_SWAP_IN] = self._swap_in_optimizer_state(aio_handle=self.read_aio_handle,
parameter=parameter)
if self.swap_ops[SYNC_SWAP_IN]:
self.swap_ops[SYNC_SWAP_IN].wait()
if self.async_swap_in and async_parameter is not None:
assert self.swap_ops[ASYNC_SWAP_IN] is None
self.swap_ops[ASYNC_SWAP_IN] = self._swap_in_optimizer_state(aio_handle=self.read_aio_handle,
parameter=async_parameter)
self._stop_timer(SWAP_IN_STATE_TIMER)
self.timer_names.add(SWAP_IN_STATE_TIMER)
def swap_out_optimizer_state(self, parameter, async_swap):
self._start_timer(SWAP_OUT_STATE_TIMER)
if self.swap_ops[ASYNC_SWAP_OUT]:
self._start_timer(ASYNC_SWAP_OUT_STATE_TIMER)
self._complete_swap_out(ASYNC_SWAP_OUT)
self._stop_timer(ASYNC_SWAP_OUT_STATE_TIMER)
self.timer_names.add(ASYNC_SWAP_OUT_STATE_TIMER)
assert self.swap_ops[SYNC_SWAP_IN] is not None
assert not self.swap_ops[SYNC_SWAP_IN].wait_required
swap_op = self._swap_out_optimizer_state(aio_handle=self.write_aio_handle,
parameter=parameter,
swap_in_op=self.swap_ops[SYNC_SWAP_IN])
self.swap_ops[SYNC_SWAP_IN] = None
if self.async_swap_out and async_swap:
self.swap_ops[ASYNC_SWAP_OUT] = swap_op
else:
self.swap_ops[SYNC_SWAP_OUT] = swap_op
self._complete_swap_out(SYNC_SWAP_OUT)
self._stop_timer(SWAP_OUT_STATE_TIMER)
self.timer_names.add(SWAP_OUT_STATE_TIMER)
def swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors):
self._swap_out_gradients(parameter=parameter,
gradient_offsets=gradient_offsets,
gradient_tensors=gradient_tensors,
gradient_swapper=self.gradient_swapper)
def _complete_swap_out(self, swap_out_type):
self.swap_ops[swap_out_type].wait()
self.swap_buffer_manager.free(self.swap_ops[swap_out_type].allocated_buffers)
self.swap_ops[swap_out_type] = None
def _swap_out_optimizer_state(self, aio_handle, parameter, swap_in_op):
assert swap_in_op.is_parameter(parameter)
allocated_buffers = swap_in_op.allocated_buffers.copy()
swap_buffers = swap_in_op.state_buffers.copy()
param_info = swap_in_op.param_info
self._update_param_state_info(param_info, parameter)
unpinned_tensors = param_info.get_unpinned_state_tensors()
if len(unpinned_tensors) > 0:
new_alloc_buffers = self.swap_buffer_manager.allocate(num_elems=self._io_aligned_numel(param_info.numel()),
count=len(unpinned_tensors),
dtype=param_info.dtype())
assert new_alloc_buffers is not None
allocated_buffers += new_alloc_buffers
swap_buffers += new_alloc_buffers
for pinned_dst, unpinned_src in zip(new_alloc_buffers, unpinned_tensors):
dst = get_sized_buffer(pinned_dst, unpinned_src.numel())
dst.data.copy_(unpinned_src.data)
swap_paths = param_info.swap_paths.copy()
assert len(swap_paths) == len(swap_buffers)
swap_out_tensors(aio_handle, swap_buffers, swap_paths)
swap_out_op = OptimizerSwapOp(aio_handle=aio_handle,
param_info=param_info,
read_op=False,
allocated_buffers=allocated_buffers,
state_buffers=swap_buffers,
num_ops=len(swap_buffers))
return swap_out_op
def _swap_in_optimizer_state(self, aio_handle, parameter):
param_info = self._get_param_swap_info(parameter)
if param_info is None:
return None
required_buffer_count = len(param_info.tensors) + (1 if param_info.has_gradients() else 0)
aligned_numel = self._io_aligned_numel(param_info.numel())
allocated_buffers = self.swap_buffer_manager.allocate(num_elems=aligned_numel,
count=required_buffer_count,
dtype=parameter.dtype)
assert allocated_buffers is not None, \
f"PipelinedOptimizerSwapper ran out of swap buffers, try increasing 'buffer_count'"
state_buffers = allocated_buffers[:len(param_info.tensors)]
param_info.set_swap_buffers(state_buffers)
swap_buffers = state_buffers.copy()
swap_paths = param_info.swap_paths.copy()
if param_info.has_gradients():
parameter.grad = allocated_buffers[-1].narrow(0, 0, param_info.numel())
if param_info.swapped_gradients:
swap_buffers += param_info.get_swap_gradient_buffers(parameter.grad)
swap_paths += param_info.get_swap_gradient_paths()
swap_in_tensors(aio_handle, swap_buffers, swap_paths)
if param_info.unswapped_gradients:
self._retrieve_unswapped_grad_partitions(swap_info=param_info, dest_buffer=parameter.grad)
swap_in_op = OptimizerSwapOp(aio_handle=aio_handle,
param_info=param_info,
read_op=True,
allocated_buffers=allocated_buffers,
state_buffers=state_buffers,
num_ops=len(swap_buffers))
return swap_in_op | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/swap_tensor/pipelined_optimizer_swapper.py | pipelined_optimizer_swapper.py |
# DeepSpeed Team
"""
Functionality of swapping tensors to/from (NVMe) storage devices.
"""
import torch
from deepspeed import comm as dist
from deepspeed.utils.logging import logger
from deepspeed.runtime.swap_tensor.utils import swap_out_tensors, SwapBuffer
INVALID_BUFFER_INDEX = -1
ASYNC_SWAPPER_WAIT_TIMER = 'async_swap_gradient_wait'
class AsyncTensorSwapper(object):
def __init__(self, aio_handle, numel_alignment, timers):
self.free_buffer_index = []
self.swapping_buffer_index = []
self.ready_buffer_index = []
self.current_buffer_index = INVALID_BUFFER_INDEX
self.all_buffers = []
self.aio_handle = aio_handle
self.numel_alignment = numel_alignment
self.max_numel = 0
self.num_pending_swaps = 0
self.timers = timers
self.timer_names = set()
self.num_elements_swapped = 0
self.dtype = None
def has_buffers(self):
return len(self.all_buffers) > 0
def add_buffers(self, buffer_list):
assert len(self.all_buffers) == 0
assert all([buffer.is_pinned() for buffer in buffer_list])
dtype = buffer_list[0].dtype
assert all([buffer.dtype == dtype for buffer in buffer_list])
self.dtype = dtype
self.all_buffers = [SwapBuffer(buffer) for buffer in buffer_list]
self.free_buffer_index += [i for i in range(len(self.all_buffers))]
self.max_numel = max([buffer.numel() for buffer in buffer_list])
self.timer_names = set()
def get_timer_names(self):
return list(self.timer_names)
def release_buffers(self):
self._report_statistics('Swapped out[Before flush]')
self._flush_buffers_until_complete()
self._report_statistics('Swapped out[After flush]')
pinned_buffers = [buf.buffer for buf in self.all_buffers]
self.all_buffers = []
self.free_buffer_index = []
self.current_buffer_index = INVALID_BUFFER_INDEX
self.num_elements_swapped = 0
self.dtype = None
return pinned_buffers
def swap_out_tensors(self, tensor_list, path_list):
for tensor, swap_path in zip(tensor_list, path_list):
self._swap_out_tensor(tensor, swap_path)
def _report_statistics(self, message):
if dist.get_rank() == 0:
element_size = torch.tensor([], dtype=self.dtype).element_size()
swapped_GB = (self.num_elements_swapped * element_size) / (1024**3)
logger.debug(f'{message} num_elems = {self.num_elements_swapped}, {swapped_GB:5.2f} GB')
def _swap_out_tensor(self, tensor, swap_path):
assert len(self.all_buffers) > 0
aligned_numel = self._io_aligned_numel(tensor.numel())
assert aligned_numel <= self.max_numel
self._make_swap_space(aligned_numel)
assert self.current_buffer_index != INVALID_BUFFER_INDEX
swap_buffer = self._get_current_buffer()
swap_buffer.insert_tensor(tensor, swap_path, aligned_numel)
def _make_swap_space(self, numel):
if self.current_buffer_index == INVALID_BUFFER_INDEX:
self._allocate_buffer()
return
if not self._get_current_buffer().has_space(numel):
if len(self.free_buffer_index) > 0:
self._flush_ready_buffers()
else:
self._flush_buffers_until_complete()
self._allocate_buffer()
def _io_aligned_numel(self, numel):
remainder = numel % self.numel_alignment
return numel if remainder == 0 else (numel + self.numel_alignment - remainder)
def _allocate_buffer(self):
assert self.current_buffer_index == INVALID_BUFFER_INDEX
assert len(self.all_buffers) > 0
assert len(self.free_buffer_index) > 0
self.current_buffer_index = self.free_buffer_index[-1]
self.free_buffer_index = self.free_buffer_index[:-1]
def _flush_ready_buffers(self):
if self.current_buffer_index != INVALID_BUFFER_INDEX:
self.ready_buffer_index.append(self.current_buffer_index)
self.current_buffer_index = INVALID_BUFFER_INDEX
self._swap_out_ready_buffers()
def _flush_buffers_until_complete(self):
self._flush_ready_buffers()
assert len(self.ready_buffer_index) == 0
self._wait_for_swap_complete()
assert len(self.swapping_buffer_index) == 0
assert len(self.free_buffer_index) == len(self.all_buffers)
def _swap_out_ready_buffers(self):
for buffer_index in self.ready_buffer_index:
buffer = self._get_buffer(buffer_index)
swap_tensors = buffer.get_swap_tensors()
swap_paths = buffer.get_swap_paths()
self.num_pending_swaps += len(swap_tensors)
swap_out_tensors(self.aio_handle, swap_tensors, swap_paths)
self.swapping_buffer_index += self.ready_buffer_index
self.ready_buffer_index = []
def _wait_for_swap_complete(self):
assert len(self.swapping_buffer_index) > 0
self._start_timer(ASYNC_SWAPPER_WAIT_TIMER)
assert self.aio_handle.wait() == self.num_pending_swaps
self._stop_timer(ASYNC_SWAPPER_WAIT_TIMER)
self.timer_names.add(ASYNC_SWAPPER_WAIT_TIMER)
self.num_pending_swaps = 0
for buffer_index in self.swapping_buffer_index:
buffer = self._get_buffer(buffer_index)
self.num_elements_swapped += buffer.get_num_elem()
buffer.reset()
self.free_buffer_index += self.swapping_buffer_index
assert len(self.free_buffer_index) <= len(self.all_buffers)
self.swapping_buffer_index = []
def _get_buffer(self, index):
assert index != INVALID_BUFFER_INDEX
return self.all_buffers[index]
def _get_current_buffer(self):
return self._get_buffer(self.current_buffer_index)
def _start_timer(self, name):
if self.timers:
self.timers(name).start()
def _stop_timer(self, name):
if self.timers:
self.timers(name).stop()
def _log_timers(self, name_list, force=False):
if self.timers and force:
self.timers.log(name_list) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/swap_tensor/async_swapper.py | async_swapper.py |
# DeepSpeed Team
"""
Functionality of swapping tensors to/from (NVMe) storage devices.
"""
import torch
from deepspeed.utils.logging import logger
from deepspeed.accelerator import get_accelerator
from deepspeed import comm as dist
MIN_AIO_BYTES = 1024**2
AIO_ALIGNED_BYTES = 1024
def swap_in_tensors(swap_handle, tensor_buffers, swap_paths):
for buffer, path in zip(tensor_buffers, swap_paths):
assert (swap_handle.async_pread(buffer, path) == 0)
def swap_out_tensors(swap_handle, tensor_buffers, swap_paths):
for buffer, path in zip(tensor_buffers, swap_paths):
assert (swap_handle.async_pwrite(buffer, path) == 0)
def print_object(obj, name, exclude_list=[]):
logger.info('{}:'.format(name))
for arg in sorted(vars(obj)):
if not arg in exclude_list:
dots = '.' * (29 - len(arg))
logger.info(' {} {} {}'.format(arg, dots, getattr(obj, arg)))
class SwapBuffer(object):
def __init__(self, buffer):
self.buffer = buffer
self.reset()
def reset(self):
self.offset = 0
self.swap_tensors = {}
self.compute_tensors = {}
self.swap_paths = {}
self.num_elem = 0
def insert_tensor(self, tensor, swap_path, aligned_numel):
swap_tensor, compute_tensor = self.allocate_tensor(swap_path, tensor.numel(), aligned_numel)
compute_tensor.data.copy_(tensor.data)
return swap_tensor, compute_tensor
def allocate_tensor(self, swap_path, numel, aligned_numel):
assert self.has_space(aligned_numel)
assert not self.offset in self.swap_tensors
allocate_offset = self.offset
swap_tensor = self.buffer.narrow(0, allocate_offset, aligned_numel)
dest_tensor = swap_tensor.narrow(0, 0, numel)
self.swap_tensors[allocate_offset] = swap_tensor
self.compute_tensors[allocate_offset] = dest_tensor
self.swap_paths[allocate_offset] = swap_path
self.offset += aligned_numel
self.num_elem += numel
return self.swap_tensors[allocate_offset], self.compute_tensors[allocate_offset]
def has_space(self, numel):
return (self.offset + numel) <= self.buffer.numel()
def get_swap_tensors(self):
return [tensor for tensor in self.swap_tensors.values()]
def get_swap_paths(self):
return [path for path in self.swap_paths.values()]
def get_compute_tensors(self):
return [tensor for tensor in self.compute_tensors.values()]
def get_num_elem(self):
return self.num_elem
def get_swap_tensor(self, offset):
return self.swap_tensors.get(offset, None)
def get_compute_tensor(self, offset):
return self.compute_tensors.get(offset, None)
def get_swap_path(self, offset):
return self.swap_paths(offset, None)
class SwapBufferPool(object):
def __init__(self, buffers):
assert all([buf.is_pinned() for buf in buffers])
self.buffers = [SwapBuffer(buf) for buf in buffers]
self.current_index = 0
def reset(self):
self.current_index = 0
for buffer in self.buffers:
buffer.reset()
def allocate_tensor(self, numel, swap_path, aligned_numel):
if self.has_space(aligned_numel):
swap_tensor, compute_tensor = self._get_current_buffer().allocate_tensor(swap_path, numel, aligned_numel)
return swap_tensor, compute_tensor
return None, None
def insert_tensor(self, tensor, swap_path, aligned_numel):
if self.has_space(aligned_numel):
swap_tensor, compute_tensor = self._get_current_buffer().insert_tensor(tensor, swap_path, aligned_numel)
return swap_tensor, compute_tensor
return None, None
def get_swap_tensors(self):
swap_tensors = []
for buffer in self._get_used_buffers():
swap_tensors += buffer.get_swap_tensors()
return swap_tensors
def get_swap_paths(self):
swap_paths = []
for buffer in self._get_used_buffers():
swap_paths += buffer.get_swap_paths()
return swap_paths
def get_compute_tensors(self):
compute_tensors = []
for buffer in self._get_used_buffers():
compute_tensors += buffer.get_compute_tensors()
return compute_tensors
def has_space(self, numel):
if self._get_current_buffer().has_space(numel):
return True
if self.current_index == len(self.buffers) - 1:
return False
self.current_index += 1
return self._get_current_buffer().has_space(numel)
def swap_out(self, aio_handle, async_op=False):
swap_tensors = self.get_swap_tensors()
swap_paths = self.get_swap_paths()
assert all([p is not None for p in swap_paths])
swap_out_tensors(aio_handle, swap_tensors, swap_paths)
if not async_op:
assert len(swap_tensors) == aio_handle.wait()
def swap_in(self, aio_handle, async_op=False):
swap_tensors = self.get_swap_tensors()
swap_paths = self.get_swap_paths()
assert all([p is not None for p in swap_paths])
swap_in_tensors(aio_handle, swap_tensors, swap_paths)
if not async_op:
assert len(swap_tensors) == aio_handle.wait()
def _get_current_buffer(self):
return self.buffers[self.current_index]
def _get_used_buffers(self):
return self.buffers[:self.current_index + 1]
class SwapBufferManager(object):
def __init__(self, num_elems, count, dtype):
self.num_elems = num_elems
self.count = count
self.dtype = dtype
self.all_buffers = [
get_accelerator().pin_memory(torch.zeros(num_elems, device='cpu', dtype=dtype)) for _ in range(count)
]
self.free_buffer_index = [i for i in range(count)]
self.used_buffer_index = {}
self.gigabytes = (self.all_buffers[0].element_size() * num_elems * count) / (1024**3)
if dist.get_rank() == 0:
exclude_list = ['all_buffers']
print_object(obj=self, name='SwapBufferManager', exclude_list=exclude_list)
def allocate(self, num_elems, count, dtype):
assert dtype == self.dtype
assert num_elems <= self.num_elems
if count > len(self.free_buffer_index):
return None
used_indices = self.free_buffer_index[-count:]
self.free_buffer_index = self.free_buffer_index[:-count]
buffers = []
for i in used_indices:
tmp_buffer = self.all_buffers[i].narrow(0, 0, num_elems)
buffers.append(tmp_buffer)
self.used_buffer_index[id(tmp_buffer)] = i
return buffers
def allocate_all(self, num_elems, dtype):
return self.allocate(num_elems=num_elems, count=len(self.free_buffer_index), dtype=dtype)
def free(self, buffers):
buffer_ids = []
for buf in buffers:
buffer_ids.append(id(buf))
assert all([b_id in self.used_buffer_index for b_id in buffer_ids])
for b_id in buffer_ids:
self.free_buffer_index.append(self.used_buffer_index[b_id])
del (self.used_buffer_index[b_id])
def get_sized_buffer(buffer, num_elems):
assert num_elems <= buffer.numel(), \
f'num_elems {num_elems} > buffer {buffer.numel()}'
return buffer.narrow(0, 0, num_elems) if num_elems < buffer.numel() else buffer
def get_sized_buffers(buffer_list, num_elems_list):
swap_buffers = [
get_sized_buffer(buffer, num_elems) \
for buffer, num_elems in zip(buffer_list, num_elems_list)
]
return swap_buffers | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/swap_tensor/utils.py | utils.py |
# DeepSpeed Team
"""
Functionality of swapping tensors to/from (NVMe) storage devices.
"""
import os
import shutil
from enum import Enum
import torch
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import AsyncIOBuilder
from .constants import *
from .utils import swap_in_tensors, swap_out_tensors, MIN_AIO_BYTES, AIO_ALIGNED_BYTES, print_object, SwapBufferPool
def print_rank_0(message, debug=False, force=False):
if dist.get_rank() == 0 and (debug or force):
print(message)
class PartitionedParamStatus(Enum):
# Partitioned parameters are present and ready for use
AVAILABLE = 1
# partitioned params are in some non-memory device
NOT_AVAILABLE = 2
# partitioned params are being read from some non-memory device.
INFLIGHT = 3
class AsyncPartitionedParameterSwapper(object):
def __init__(self, ds_config, model_dtype):
aio_op = AsyncIOBuilder().load(verbose=False)
self.aio_handle = aio_op.aio_handle
self.dtype = model_dtype
#set swap buffers, create aio handles
self._configure_aio(ds_config)
#mapping from param id to path
self.id_to_path = {}
#mapping from pram_id to buffer id
self.param_id_to_buffer_id = {}
# mapping from param_id to swap buffer
self.param_id_to_swap_buffer = {}
#number of elements in the param
self.param_id_to_numel = {}
self.pending_writes = 0
self.pending_reads = 0
#keep track of async swap in params and buffers
self.inflight_params = []
self.inflight_swap_in_buffers = []
self.inflight_numel = 0
#keep track of available params
self.available_params = set()
self.available_numel = 0
# for swapping out from partitioned fp32 params
self.partitioned_swap_buffer = None
self.partitioned_swap_pool = None
self.invalid_buffer = torch.tensor(1).half()
if dist.get_rank() == 0:
exclude_list = ['aio_read_handle', 'aio_write_handle', 'buffers']
print_object(obj=self, name='AsyncPartitionedParameterSwapper', exclude_list=exclude_list)
def available_swap_in_buffers(self):
return len(self.available_buffer_ids)
def _configure_aio(self, ds_config):
self.swap_config = ds_config.zero_config.offload_param
torch_dtype_string = str(self.dtype).split(".")[1]
self.swap_folder = os.path.join(self.swap_config.nvme_path, 'zero_stage_3', f'{torch_dtype_string}params',
f'rank{dist.get_rank()}')
shutil.rmtree(self.swap_folder, ignore_errors=True)
os.makedirs(self.swap_folder, exist_ok=True)
self.swap_element_size = torch.tensor([], dtype=self.dtype).element_size()
self.aio_config = ds_config.aio_config
# Read/Write alignment for each thread during Intra-request parallelism
self.min_aio_bytes = max(MIN_AIO_BYTES, self.aio_config[AIO_BLOCK_SIZE])
self.aligned_bytes = AIO_ALIGNED_BYTES * self.aio_config[AIO_THREAD_COUNT]
self.numel_alignment = self.aligned_bytes // self.swap_element_size
self.elements_per_buffer = self.swap_config.buffer_size
self.aligned_elements_per_buffer = self._io_aligned_numel(self.elements_per_buffer)
self.param_buffer_count = self.swap_config.buffer_count
self.available_buffer_ids = [i for i in range(self.param_buffer_count)]
self.reserved_buffer_ids = []
self.buffers = get_accelerator().pin_memory(
torch.empty(int(self.aligned_elements_per_buffer * self.param_buffer_count),
dtype=self.dtype,
requires_grad=False))
self.aio_read_handle = self.aio_handle(self.aio_config[AIO_BLOCK_SIZE], self.aio_config[AIO_QUEUE_DEPTH],
self.aio_config[AIO_SINGLE_SUBMIT], self.aio_config[AIO_OVERLAP_EVENTS],
self.aio_config[AIO_THREAD_COUNT])
self.aio_write_handle = self.aio_handle(self.aio_config[AIO_BLOCK_SIZE], self.aio_config[AIO_QUEUE_DEPTH],
self.aio_config[AIO_SINGLE_SUBMIT],
self.aio_config[AIO_OVERLAP_EVENTS], self.aio_config[AIO_THREAD_COUNT])
self.swap_out_params = []
#Check if partitioned param or numel in a tensor is swappable or not
def swappable_tensor(self, param=None, numel=None):
if param is not None:
assert numel is None, "Both parma and numel cannot be provided"
numel = param.ds_tensor.ds_numel
if numel is not None:
return self.min_aio_bytes <= numel * self.swap_element_size
assert False, "Either param or numel must be provided"
def get_path(self, param, must_exist=False):
paths = self._get_swap_paths([param], must_exist=must_exist)
return paths[0]
def _get_swap_paths(self, params, must_exist=False):
paths = []
for param in params:
param_id = param.ds_id
if param_id in self.id_to_path.keys():
param_path = self.id_to_path[param_id]
else:
assert not must_exist, f"Path for param id {param_id} does not exist"
param_path = os.path.join(self.swap_folder, f'{param_id}_param.tensor.swp')
self.id_to_path[param_id] = param_path
paths.append(param_path)
return paths
def _get_swap_buffers(self, params):
buffers = []
for param in params:
param_id = param.ds_id
assert param_id in self.param_id_to_swap_buffer.keys(), \
f'param {param_id} has not been assigned a swap buffer'
buffers.append(self.param_id_to_swap_buffer[param_id])
return buffers
def _track_numel(self, params):
for param in params:
assert param.ds_tensor is not None, "Partitioned tensor is None"
self.param_id_to_numel[param.ds_id] = param.ds_tensor.ds_numel
def _allocate_and_return_buffers_for_swap_in(self, params):
compute_buffers = []
swap_buffers = []
for param in params:
param_id = param.ds_id
assert param_id in self.param_id_to_numel.keys(), f" Number of elements in param {param_id} is unknown"
assert param_id not in self.param_id_to_buffer_id.keys(
), f"param {param_id} already assigned swap buffer id {self.param_id_to_buffer_id[param_id]}"
assert param_id not in self.param_id_to_swap_buffer.keys(
), f"param {param_id} has already been assigned a swap buffer"
buffer_id = self.available_buffer_ids.pop()
print_rank_0(f"param {param.ds_id} is assigned swap in buffer id {buffer_id} ")
self.param_id_to_buffer_id[param_id] = buffer_id
aligned_swap_numel = self._io_aligned_numel(self.param_id_to_numel[param_id])
swap_buffer = self.buffers.narrow(0, int(buffer_id * self.aligned_elements_per_buffer), aligned_swap_numel)
self.param_id_to_swap_buffer[param_id] = swap_buffer
compute_buffer = swap_buffer.narrow(0, 0, self.param_id_to_numel[param_id])
compute_buffers.append(compute_buffer)
swap_buffers.append(swap_buffer)
return compute_buffers, swap_buffers
#waits for inflight nvme write to complete
def synchronize_writes(self):
if self.pending_writes == 0:
return
assert self.pending_writes == self.aio_write_handle.wait()
self.pending_writes = 0
self.remove_partition_and_release_buffers(self.swap_out_params)
self.swap_out_params = []
#waits for inflight nvme reads to complete
def synchronize_reads(self):
if self.pending_reads == 0:
return
assert self.pending_reads == self.aio_read_handle.wait()
self.pending_reads = 0
for param, swap_in_buffer in zip(self.inflight_params, self.inflight_swap_in_buffers):
param_id = param.ds_id
compute_buffer = swap_in_buffer.narrow(0, 0, self.param_id_to_numel[param_id])
param.ds_tensor.data = compute_buffer.data
param.ds_tensor.status = PartitionedParamStatus.AVAILABLE
self.available_params.update([param.ds_id for param in self.inflight_params])
self.available_numel += self.inflight_numel
self.inflight_params = []
self.inflight_swap_in_buffers = []
self.inflight_numel = 0
#Removes the memory assignment and releases the buffers
#Should only be executed after swapping out the tensors
def remove_partition_and_release_buffers(self, params):
for param in params:
param_id = param.ds_id
if param_id in self.param_id_to_buffer_id.keys():
buffer_id = self.param_id_to_buffer_id[param_id]
assert buffer_id is not None, "Missing buffer id for releasing"
self.available_buffer_ids.append(buffer_id)
del self.param_id_to_buffer_id[param_id]
del self.param_id_to_swap_buffer[param_id]
print_rank_0(f"param {param.ds_id} releases buffer id {buffer_id} ")
if param_id in self.available_params:
self.available_params.remove(param_id)
self.available_numel -= self.param_id_to_numel[param_id]
param.ds_tensor.data = self.invalid_buffer.data
param.ds_tensor.status = PartitionedParamStatus.NOT_AVAILABLE
#writes from in memory to nvme. Does not release the buffers
def _swap_out(self, params, async_op=True):
swap_out_paths = self._get_swap_paths(params)
swap_out_params = self._get_swap_buffers(params)
self._track_numel(params)
swap_out_tensors(self.aio_write_handle, swap_out_params, swap_out_paths)
self.pending_writes += len(swap_out_params)
self.swap_out_params += params
if not async_op:
self.synchronize_writes()
#blocking swap out followed by releasing the memory buffers
def swap_out_and_release(self, params, async_op=False, force_buffer_release=False):
if async_op:
assert force_buffer_release, "Should not release preallocated buffers without completing the swap out. Set force_buffer_release to True to do it anyways"
self._swap_out(params, async_op=async_op)
# book keeping function for inflight swap in
def _update_inflight_swap_in(self, params, swap_in_buffers, inflight_numel):
self.inflight_params.extend(params)
self.inflight_swap_in_buffers.extend(swap_in_buffers)
self.inflight_numel += inflight_numel
for param in params:
param.ds_tensor.status = PartitionedParamStatus.INFLIGHT
self.pending_reads += len(params)
#assigns an in memory buffer and swaps in from nvme
def swap_in(self, params, async_op=True, swap_in_buffers=None):
assert all([param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE
for param in params]), "Some params are already available or in flight"
swap_in_paths = self._get_swap_paths(params)
if swap_in_buffers is None:
if len(self.available_buffer_ids) < len(swap_in_paths):
ids = [p.ds_id for p in params]
print_rank_0(
f'Not enough swap in buffers {len(self.available_buffer_ids)} for {len(swap_in_paths)} params, ids = {ids}',
force=True)
print_rank_0(
f'Num inflight: params {len(self.inflight_params)}, buffers {len(self.inflight_swap_in_buffers)}, numel = {self.inflight_numel}',
force=True)
print_rank_0(
f'Num available params: count = {len(self.available_params)}, ids = {self.available_params}, numel = {self.available_numel}',
force=True)
assert len(swap_in_paths) <= len(
self.available_buffer_ids
), f"Not enough buffers {len(self.available_buffer_ids)} for swapping {len(swap_in_paths)}"
compute_buffers, swap_in_buffers = self._allocate_and_return_buffers_for_swap_in(params)
inflight_numel = sum([t.numel() for t in compute_buffers])
else:
inflight_numel = sum([t.numel() for t in swap_in_buffers])
swap_in_tensors(self.aio_read_handle, swap_in_buffers, swap_in_paths)
self._update_inflight_swap_in(params, swap_in_buffers, inflight_numel)
if not async_op:
self.synchronize_reads()
# Enables swapping into buffer that is out the control of swapper. This is always synchronous
def swap_into_buffer(self, param, dest_buffer):
assert param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE, f"param {param.ds_id} is already available or inflight"
require_swap_buffer = not (dest_buffer.is_pinned() and self._is_io_aligned(dest_buffer.numel()))
if require_swap_buffer:
assert len(self.available_buffer_ids) > 0, f"No buffer available to swap param {param.ds_id}."
compute_buffers, swap_in_buffers = self._allocate_and_return_buffers_for_swap_in([param])
inflight_numel = compute_buffers[0].numel()
else:
swap_in_buffers = [dest_buffer]
inflight_numel = dest_buffer.numel()
swap_in_paths = self._get_swap_paths([param])
swap_in_tensors(self.aio_read_handle, swap_in_buffers, swap_in_paths)
self._update_inflight_swap_in([param], swap_in_buffers, inflight_numel)
self.synchronize_reads()
if require_swap_buffer:
dest_buffer.data.copy_(param.ds_tensor.data)
# Release swap buffer memory assignment. Note, this will mark the parameter not available.
self.remove_partition_and_release_buffers([param])
#assign a buffer to a param and return the buffer
def get_buffer(self, param, numel):
param_id = param.ds_id
assert self.available_swap_in_buffers(
) > 0, f"No swap buffers to allocate for fp16 param {param_id} of numel = {numel}"
assert numel < self.elements_per_buffer, f"More elements {numel} than buffer size {self.elements_per_buffer}"
self.param_id_to_numel[param_id] = numel
buffer_id = self.available_buffer_ids.pop()
self.param_id_to_buffer_id[param_id] = buffer_id
aligned_swap_numel = self._io_aligned_numel(self.param_id_to_numel[param_id])
swap_buffer = self.buffers.narrow(0, int(buffer_id * self.aligned_elements_per_buffer), aligned_swap_numel)
self.param_id_to_swap_buffer[param_id] = swap_buffer
compute_buffer = swap_buffer.narrow(0, 0, self.param_id_to_numel[param_id])
print_rank_0(f"param {param.ds_id} is assigned swap in buffer id {buffer_id}")
return compute_buffer
def reserve_available_buffers(self):
buffers = []
for id in self.available_buffer_ids:
buffers.append(
self.buffers.narrow(0, int(id * self.aligned_elements_per_buffer),
int(self.aligned_elements_per_buffer)))
self.reserved_buffer_ids.append(id)
self.available_buffer_ids = []
return buffers
def release_reserved_buffers(self):
for id in self.reserved_buffer_ids:
self.available_buffer_ids.append(id)
self.reserved_buffer_ids = []
def _io_aligned_numel(self, numel):
remainder = numel % self.numel_alignment
return numel if remainder == 0 else (numel + self.numel_alignment - remainder)
def _is_io_aligned(self, numel):
return (numel % self.numel_alignment) == 0
def reserve_partitioned_swap_space(self, partition_num_elems):
aligned_numel = sum([self._io_aligned_numel(numel) for numel in partition_num_elems])
self.partitioned_swap_buffer = get_accelerator().pin_memory(
torch.zeros(aligned_numel, device='cpu', dtype=self.dtype))
self.partitioned_swap_pool = SwapBufferPool([self.partitioned_swap_buffer])
def swap_out_partitioned_params(self, dst_fp16_params, src_fp32_params):
assert self.partitioned_swap_buffer is not None, f'partitioned swap buffers for fp16 params not initialized'
assert self.partitioned_swap_pool is not None, f'partitioned swap pool for fp16 params not initialized'
assert len(dst_fp16_params) == len(src_fp32_params), \
f'mismatch in number of fp16 params {len(dst_fp16_params)} and fp32 params {len(src_fp32_params)}'
fp16_swap_paths = self._get_swap_paths(dst_fp16_params, must_exist=True)
self.synchronize_writes()
self.partitioned_swap_pool.reset()
for i, fp32_tensor in enumerate(src_fp32_params):
swap_tensor, _ = self.partitioned_swap_pool.insert_tensor(fp32_tensor, fp16_swap_paths[i],
self._io_aligned_numel(fp32_tensor.numel()))
assert swap_tensor is not None
dst_fp16_params[i].ds_tensor.status = PartitionedParamStatus.AVAILABLE
self.partitioned_swap_pool.swap_out(self.aio_write_handle)
for param in dst_fp16_params:
param.ds_tensor.status = PartitionedParamStatus.NOT_AVAILABLE | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/swap_tensor/partitioned_param_swapper.py | partitioned_param_swapper.py |
# DeepSpeed Team
"""
Functionality of swapping tensors to/from (NVMe) storage devices.
"""
import os
import torch
from deepspeed import comm as dist
from deepspeed.utils.logging import logger
from deepspeed.runtime.swap_tensor.constants import *
from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, \
MIN_AIO_BYTES, AIO_ALIGNED_BYTES, get_sized_buffers
from deepspeed.runtime.swap_tensor.utils import SwapBufferManager, SwapBufferPool
class FlattenedTensorSwapInfo(object):
def __init__(self, path, length, offset):
self.path = path
self.offset = offset
self.length = length
class OptimizerStateSwapInfo(object):
def __init__(self, parameter, numel, base_folder):
self.tensors = []
self.param_id = id(parameter)
self.swap_folder = base_folder
self.swap_paths = []
self.swapped_gradients = {}
self.unswapped_gradients = {}
self.tensor_numel = numel
self.tensor_dtype = parameter.dtype
self.tensor_device = parameter.device
self.has_state_tensors = False
self._add_tensors([parameter])
def numel(self):
return self.tensor_numel
def has_gradients(self):
return self.swapped_gradients or self.unswapped_gradients
def _add_tensors(self, tensor_list):
for t in tensor_list:
self.tensors.append(t)
self.swap_paths.append(os.path.join(self.swap_folder, f'{id(t)}.tensor.swp'))
def add_state_tensors(self, tensor_list):
self.has_state_tensors = True
self._add_tensors(tensor_list)
def device(self):
return self.tensor_device
def dtype(self):
return self.tensor_dtype
def release_memory(self):
for tensor in self.tensors:
tensor.data = torch.Tensor()
def get_or_create_gradient_paths(self, offsets, lengths):
gradient_paths = []
for offset, length in zip(offsets, lengths):
if not offset in self.swapped_gradients.keys():
path = os.path.join(self.swap_folder, f'{self.param_id}_gradient_{offset}_{length}.tensor.swp')
self.swapped_gradients[offset] = FlattenedTensorSwapInfo(path, length, offset)
gradient_paths.append(self.swapped_gradients[offset].path)
return gradient_paths
def set_swap_buffers(self, buffers):
compute_lengths = [self.numel()] * len(self.tensors)
compute_buffers = get_sized_buffers(buffers, compute_lengths)
for t, buffer in zip(self.tensors, compute_buffers):
t.data = buffer.data
def get_swap_gradient_buffers(self, swap_buffer):
assert self.numel() <= swap_buffer.numel()
return [swap_buffer.narrow(0, grad.offset, grad.length) for grad in self.swapped_gradients.values()]
def get_swap_gradient_paths(self):
return [grad.path for grad in self.swapped_gradients.values()]
def get_unpinned_state_tensors(self):
return [t for t in self.tensors if not t.is_pinned()]
def read_unswapped_gradients(self, dest_buffer):
num_elem_count = 0
for offset, grad_partition in self.unswapped_gradients.items():
dst_tensor = dest_buffer.narrow(0, offset, grad_partition.numel())
dst_tensor.data.copy_(grad_partition.data)
num_elem_count += grad_partition.numel()
return num_elem_count
def release_unswapped_gradients(self):
self.unswapped_gradients = {}
SWAPPER_DEBUG_MODE = False
SWAP_OUT_GRADIENT_TIMER = 'swap_out_gradient'
class OptimizerSwapper(object):
def __init__(self, swap_config, aio_config, base_folder, optimizer, largest_numel, device, dtype, timers):
self.swap_config = swap_config
self.aio_config = aio_config
# NVMe swap management
self.swap_params_info = {}
self.swap_element_size = torch.tensor([], dtype=dtype).element_size()
self.swap_folder = os.path.join(base_folder, 'optimizer', f'rank{dist.get_rank()}')
os.makedirs(self.swap_folder, exist_ok=True)
self.optimizer = optimizer
# Read/Write alignment for each thread during Intra-request parallelism
self.min_aio_bytes = max(MIN_AIO_BYTES, aio_config[AIO_BLOCK_SIZE])
self.aligned_bytes = AIO_ALIGNED_BYTES * aio_config[AIO_THREAD_COUNT]
self.numel_alignment = self.aligned_bytes // self.swap_element_size
# Swap buffer management
self.largest_numel = self._io_aligned_numel(largest_numel)
self.dtype = dtype
self.swap_buffer_manager = SwapBufferManager(num_elems=self.largest_numel,
count=swap_config.buffer_count,
dtype=dtype)
# Timers
self.timers = timers
self.timer_names = set()
# Print exclusion list
self.print_exclude_list = [
'optimizer',
'swap_buffer_manager',
'swap_params_info',
'timers',
'timer_names',
]
def swappable_tensor(self, param=None, numel=None):
assert param is not None or numel is not None, "Either param or numel must be provided"
if param is not None:
return self.min_aio_bytes <= (param.numel() * self.swap_element_size)
return self.min_aio_bytes <= (numel * self.swap_element_size)
def init_timers(self):
self.timer_names = set()
def log_timers(self):
if self.timer_names:
self._log_timers(list(self.timer_names), force=True)
def pre_backward(self):
self.init_timers()
def post_backward(self):
pass
def _flush_gradient_swapper(self, gradient_swapper):
if gradient_swapper.has_buffers():
self._start_timer(SWAP_OUT_GRADIENT_TIMER)
pinned_buffers = gradient_swapper.release_buffers()
self.swap_buffer_manager.free(pinned_buffers)
self._stop_timer(SWAP_OUT_GRADIENT_TIMER)
self.timer_names.add(SWAP_OUT_GRADIENT_TIMER)
self.timer_names.update(gradient_swapper.get_timer_names())
def _swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors, gradient_swapper):
if not id(parameter) in self.swap_params_info.keys():
return
swap_info = self.swap_params_info[id(parameter)]
swappable_tensors = []
swappable_offsets = []
swappable_lengths = []
aligned_gradients, aligned_offsets = self._adjust_for_misaligned_lengths(tensors=gradient_tensors,
offsets=gradient_offsets)
self._start_timer(SWAP_OUT_GRADIENT_TIMER)
for tensor, offset in zip(aligned_gradients, aligned_offsets):
if not self.swappable_tensor(param=tensor):
swap_info.unswapped_gradients[offset] = tensor
continue
swappable_tensors.append(tensor)
swappable_offsets.append(offset)
swappable_lengths.append(tensor.numel())
if len(swappable_tensors) > 0:
if not gradient_swapper.has_buffers():
pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype)
gradient_swapper.add_buffers(pinned_buffers)
swappable_paths = swap_info.get_or_create_gradient_paths(swappable_offsets, swappable_lengths)
gradient_swapper.swap_out_tensors(tensor_list=swappable_tensors, path_list=swappable_paths)
self._stop_timer(SWAP_OUT_GRADIENT_TIMER)
self.timer_names.add(SWAP_OUT_GRADIENT_TIMER)
def _initialize_from_swapped_fp16_params(self, aio_handle, fp16_partitions_info, fp16_num_elems,
fp16_pinned_buffers, fp32_parameters):
assert len(fp32_parameters) == len(fp16_partitions_info)
assert len(fp32_parameters) == len(fp16_num_elems)
assert all([buffer.is_pinned() for buffer in fp16_pinned_buffers])
fp32_swap_paths = self._get_swap_paths(parameters=fp32_parameters, num_elems=fp16_num_elems)
fp32_pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype)
fp16_buffer_numel = [buf.numel() for buf in fp16_pinned_buffers]
assert all([numel >= self.largest_numel for numel in fp16_buffer_numel]), \
f"numel of fp16 buffers {fp16_buffer_numel} is too small for initializing fp32 params {self.largest_numel}"
fp32_swap_buffers = SwapBufferPool(fp32_pinned_buffers)
fp16_swap_buffers = SwapBufferPool(fp16_pinned_buffers)
curr_index = 0
while curr_index < len(fp32_parameters):
fp16_pinned_tensors = self._swap_in_fp16_params(aio_handle=aio_handle,
fp16_num_elems=fp16_num_elems[curr_index:],
fp16_partitions_info=fp16_partitions_info[curr_index:],
fp16_swap_buffers=fp16_swap_buffers)
if dist.get_rank() == 0 and SWAPPER_DEBUG_MODE:
for i, tensor in enumerate(fp16_pinned_tensors):
true_index = curr_index + i
logger.info(
f'swap_in_fp16_param: fp32_id = {id(fp32_parameters[true_index])} index = {true_index} orig_num_elem = {fp16_num_elems[true_index]}, swap_num_elem = {fp16_pinned_tensors[i].numel()}'
)
swap_out_count = self._swap_out_fp16_params(aio_handle=aio_handle,
fp32_swap_paths=fp32_swap_paths[curr_index:],
fp32_swap_buffers=fp32_swap_buffers,
fp16_pinned_tensors=fp16_pinned_tensors)
assert swap_out_count == len(fp16_pinned_tensors), \
f"{swap_out_count} does not match {len(fp16_pinned_tensors)}"
fp16_swap_buffers.reset()
fp32_swap_buffers.reset()
curr_index += swap_out_count
self.swap_buffer_manager.free(fp32_pinned_buffers)
def _swap_in_fp16_params(self, aio_handle, fp16_num_elems, fp16_partitions_info, fp16_swap_buffers):
assert len(fp16_num_elems) > 0
swapped_fp16_tensors = []
swap_tensors = []
swap_paths = []
unswapped_srcs = []
unswapped_dsts = []
for i, numel in enumerate(fp16_num_elems):
pinned_tensor, _ = fp16_swap_buffers.allocate_tensor(numel, None, numel)
if pinned_tensor is None:
break
swapped_fp16_tensors.append(pinned_tensor)
offset = 0
for tensor, partition_numel, partition_path in fp16_partitions_info[i]:
dst_tensor = pinned_tensor.narrow(0, offset, partition_numel)
if partition_path is None:
unswapped_srcs.append(tensor)
unswapped_dsts.append(dst_tensor)
else:
swap_paths.append(partition_path)
swap_tensors.append(dst_tensor)
offset += partition_numel
assert len(swapped_fp16_tensors) + len(unswapped_srcs) > 0
ret = swap_in_tensors(aio_handle, swap_tensors, swap_paths)
for src, dst in zip(unswapped_srcs, unswapped_dsts):
dst.data.copy_(src.data)
assert len(swap_tensors) == aio_handle.wait()
return swapped_fp16_tensors
def _swap_out_fp16_params(self, aio_handle, fp32_swap_paths, fp32_swap_buffers, fp16_pinned_tensors):
assert len(fp16_pinned_tensors) <= len(fp32_swap_paths)
swap_out_count = 0
for i, fp16_tensor in enumerate(fp16_pinned_tensors):
if not fp32_swap_buffers.has_space(fp16_tensor.numel()):
fp32_swap_buffers.swap_out(aio_handle)
fp32_swap_buffers.reset()
pinned_tensor, _ = fp32_swap_buffers.insert_tensor(fp16_tensor, fp32_swap_paths[i],
self._io_aligned_numel(fp16_tensor.numel()))
assert pinned_tensor is not None
swap_out_count += 1
if len(fp32_swap_buffers.get_swap_tensors()) > 0:
fp32_swap_buffers.swap_out(aio_handle)
return swap_out_count
def _initialize_parameters(self, parameters, src_tensors, aio_handle):
assert len(parameters) == len(src_tensors)
swap_paths = self._get_swap_paths(parameters=parameters, num_elems=[src.numel() for src in src_tensors])
SWAP_INIT_TIMER = "swap_init_write"
self._start_timer(SWAP_INIT_TIMER)
pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype)
assert pinned_buffers is not None
self._swap_out_unpinned_tensors(aio_handle=aio_handle,
unpinned_tensors=src_tensors,
dest_paths=swap_paths,
pinned_buffers=pinned_buffers)
if dist.get_rank() == 0 and SWAPPER_DEBUG_MODE:
for i, tensor in enumerate(src_tensors):
logger.info(
f'copy_in_fp16_param: fp32_id = {id(parameters[i])} index = {i}, swap_num_elem = {src_tensors[i].numel()}'
)
self.swap_buffer_manager.free(pinned_buffers)
self._stop_timer(SWAP_INIT_TIMER)
self._log_timers([SWAP_INIT_TIMER])
def _get_swap_paths(self, parameters, num_elems):
swap_info_list = [
self._create_param_swap_info(parameter=p,
numel=numel) \
for p, numel in zip(parameters, num_elems)
]
assert len(swap_info_list) == len(num_elems)
swap_paths = [info.swap_paths[0] for info in swap_info_list]
return swap_paths
def _swap_out_unpinned_tensors(self, aio_handle, unpinned_tensors, dest_paths, pinned_buffers):
swap_buffer_count = len(pinned_buffers)
unpinned_tensor_count = len(unpinned_tensors)
for i in range(0, unpinned_tensor_count, swap_buffer_count):
swap_tensor_count = min((unpinned_tensor_count - i), swap_buffer_count)
src_tensors = unpinned_tensors[i:(i + swap_tensor_count)]
compute_lengths = [t.numel() for t in src_tensors]
compute_buffers = get_sized_buffers(pinned_buffers, compute_lengths)
for dst, src in zip(compute_buffers, src_tensors):
dst.data.copy_(src.data)
swap_lengths = [self._io_aligned_numel(t.numel()) for t in src_tensors]
swap_buffers = get_sized_buffers(pinned_buffers, swap_lengths)
swap_paths = dest_paths[i:(i + swap_tensor_count)]
swap_out_tensors(aio_handle, swap_buffers, swap_paths)
assert aio_handle.wait() == swap_tensor_count
def _adjust_for_misaligned_lengths(self, tensors, offsets):
new_tensors = []
new_offsets = []
for orig_tensor, orig_offset in zip(tensors, offsets):
if not self.swappable_tensor(param=orig_tensor):
new_tensors.append(orig_tensor)
new_offsets.append(orig_offset)
continue
remainder = orig_tensor.numel() % self.numel_alignment
if remainder == 0:
new_tensors.append(orig_tensor)
new_offsets.append(orig_offset)
continue
# Split into two by making remainder a tensor
aligned_length = (orig_tensor.numel() // self.numel_alignment) * self.numel_alignment
new_tensors.append(orig_tensor.narrow(0, 0, aligned_length))
new_offsets.append(orig_offset)
# remainder tensor
new_tensors.append(orig_tensor.narrow(0, aligned_length, remainder))
new_offsets.append(orig_offset + aligned_length)
return new_tensors, new_offsets
def _retrieve_unswapped_grad_partitions(self, swap_info, dest_buffer):
UNSWAPPED_READ_GRADIENTS = 'unswapped_read_gradients'
self._start_timer(UNSWAPPED_READ_GRADIENTS)
tensor_count = len(swap_info.unswapped_gradients)
num_elem_count = swap_info.read_unswapped_gradients(dest_buffer)
self._stop_timer(UNSWAPPED_READ_GRADIENTS)
self._log_timers([UNSWAPPED_READ_GRADIENTS])
# It should be safe to discard unswapped gradient partitions
swap_info.release_unswapped_gradients()
if SWAPPER_DEBUG_MODE:
logger.info(
f'optimizer_retrieve_unswapped_gradients: param={swap_info.param_id} tensor_count={tensor_count} elem_count={num_elem_count}'
)
def _get_state_tensors(self, parameter):
if not parameter in self.optimizer.state:
return []
tensor_list = []
for value in self.optimizer.state[parameter].values():
if torch.is_tensor(value):
tensor_list.append(value)
return tensor_list
def _update_param_state_info(self, swap_info, parameter):
if not swap_info.has_state_tensors:
state_tensors = self._get_state_tensors(parameter)
if state_tensors:
swap_info.add_state_tensors(state_tensors)
def _create_param_swap_info(self, parameter, numel):
param_id = id(parameter)
assert not param_id in self.swap_params_info
self.swap_params_info[param_id] = OptimizerStateSwapInfo(parameter=parameter,
numel=numel,
base_folder=self.swap_folder)
swap_info = self.swap_params_info[param_id]
self._update_param_state_info(swap_info, parameter)
return swap_info
def _get_param_swap_info(self, parameter):
param_id = id(parameter)
swap_info = self.swap_params_info.get(param_id, None)
if swap_info is not None:
self._update_param_state_info(swap_info, parameter)
return swap_info
def _start_timer(self, name):
if self.timers:
self.timers(name).start()
def _stop_timer(self, name):
if self.timers:
self.timers(name).stop()
def _log_timers(self, name_list, force=False):
if self.timers and (SWAPPER_DEBUG_MODE or force):
self.timers.log(name_list)
def _io_aligned_numel(self, numel):
remainder = numel % self.numel_alignment
return numel if remainder == 0 else (numel + self.numel_alignment - remainder) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/swap_tensor/optimizer_utils.py | optimizer_utils.py |
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import torch
from deepspeed.utils.logging import logger
from deepspeed.ops.op_builder import AsyncIOBuilder
from deepspeed import comm as dist
from deepspeed.runtime.swap_tensor.constants import *
from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, print_object, \
get_sized_buffers
from deepspeed.runtime.swap_tensor.async_swapper import AsyncTensorSwapper
from deepspeed.runtime.swap_tensor.optimizer_utils import OptimizerSwapper
DEBUG_MODE = False
SWAP_IN_PARAM_TIMER = 'swap_in_param'
SWAP_OUT_PARAM_TIMER = 'swap_out_param'
SWAP_IN_GRADIENT_TIMER = 'swap_in_gradient'
class PartitionedOptimizerSwapper(OptimizerSwapper):
def __init__(self, swap_config, aio_config, base_folder, optimizer, largest_numel, device, dtype, timers):
super(PartitionedOptimizerSwapper, self).__init__(swap_config, aio_config, base_folder, optimizer,
largest_numel, device, dtype, timers)
aio_op = AsyncIOBuilder().load()
self.aio_handle = aio_op.aio_handle(aio_config[AIO_BLOCK_SIZE], aio_config[AIO_QUEUE_DEPTH],
aio_config[AIO_SINGLE_SUBMIT], aio_config[AIO_OVERLAP_EVENTS],
aio_config[AIO_THREAD_COUNT])
# Overlap swapping out
self.gradient_swapper = AsyncTensorSwapper(aio_handle=self.aio_handle,
numel_alignment=self.numel_alignment,
timers=self.timers)
self.print_exclude_list += ['aio_handle', 'gradient_swapper', 'print_exclude_list']
if dist.get_rank() == 0:
print_object(obj=self, name='PartitionedOptimizerSwapper', exclude_list=self.print_exclude_list)
def initialize_parameters(self, parameters, src_tensors):
self._initialize_parameters(parameters=parameters, src_tensors=src_tensors, aio_handle=self.aio_handle)
def initialize_from_swapped_fp16_params(self, fp16_partitions_info, fp16_num_elems, fp16_pinned_buffers,
fp32_parameters):
self._initialize_from_swapped_fp16_params(aio_handle=self.aio_handle,
fp16_partitions_info=fp16_partitions_info,
fp16_num_elems=fp16_num_elems,
fp16_pinned_buffers=fp16_pinned_buffers,
fp32_parameters=fp32_parameters)
def flush_gradients(self):
self._flush_gradient_swapper(self.gradient_swapper)
def swap_in_optimizer_state(self, parameter, async_parameter=None):
swap_info = self._get_param_swap_info(parameter)
if swap_info is None:
return
self._flush_gradient_swapper(self.gradient_swapper)
required_buffer_count = len(swap_info.tensors) + (1 if swap_info.has_gradients() else 0)
aligned_numel = self._io_aligned_numel(swap_info.numel())
pinned_buffers = self.swap_buffer_manager.allocate(num_elems=aligned_numel,
count=required_buffer_count,
dtype=parameter.dtype)
assert pinned_buffers is not None
self.allocated_swap_buffers = pinned_buffers.copy()
self._start_timer(SWAP_IN_PARAM_TIMER)
self._swap_in_parameter(aio_handle=self.aio_handle,
parameter=parameter,
dest_buffers=pinned_buffers[:required_buffer_count])
self._stop_timer(SWAP_IN_PARAM_TIMER)
self.timer_names.add(SWAP_IN_PARAM_TIMER)
self._start_timer(SWAP_IN_GRADIENT_TIMER)
self._swap_in_gradients(aio_handle=self.aio_handle, parameter=parameter, dest_buffer=pinned_buffers[-1])
self._stop_timer(SWAP_IN_GRADIENT_TIMER)
self.timer_names.add(SWAP_IN_GRADIENT_TIMER)
def swap_out_optimizer_state(self, parameter, async_swap=False):
swap_info = self._get_param_swap_info(parameter=parameter)
if swap_info is None:
return
self._start_timer(SWAP_OUT_PARAM_TIMER)
pinned_tensors, pinned_paths, unpinned_tensors, unpinned_paths = self._separate_pinned_tensors(swap_info)
swap_bytes = sum([self._io_aligned_numel(t.numel()) * t.element_size() for t in swap_info.tensors])
WRITE_TIMER = 'swap_submit_write'
self._start_timer(WRITE_TIMER)
swap_out_tensors(self.aio_handle, pinned_tensors, pinned_paths)
assert self.aio_handle.wait() == len(pinned_tensors)
for t in pinned_tensors:
t.data = torch.Tensor()
if len(unpinned_tensors) > 0:
pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype)
self._swap_out_unpinned_tensors(aio_handle=self.aio_handle,
unpinned_tensors=unpinned_tensors,
dest_paths=unpinned_paths,
pinned_buffers=pinned_buffers)
self.allocated_swap_buffers += pinned_buffers
for t in unpinned_tensors:
t.data = torch.Tensor()
self._stop_timer(WRITE_TIMER)
self.swap_buffer_manager.free(self.allocated_swap_buffers)
self.allocated_swap_buffers = []
self._stop_timer(SWAP_OUT_PARAM_TIMER)
self.timer_names.add(SWAP_OUT_PARAM_TIMER)
self._log_timers([WRITE_TIMER])
if DEBUG_MODE and dist.get_rank() == 0:
logger.info(f'optimizer_param_swap_out: {(swap_bytes/(1024**3)):5.2f} GB')
def swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors):
self._swap_out_gradients(parameter=parameter,
gradient_offsets=gradient_offsets,
gradient_tensors=gradient_tensors,
gradient_swapper=self.gradient_swapper)
def _swap_in_parameter(self, aio_handle, parameter, dest_buffers):
swap_info = self._get_param_swap_info(parameter)
if swap_info is None:
return
assert len(swap_info.tensors) <= len(dest_buffers)
swap_lengths = [self._io_aligned_numel(swap_info.numel())] * len(swap_info.tensors)
swap_buffers = get_sized_buffers(dest_buffers, swap_lengths)
READ_TIMER = 'swap_submit_read_param'
WAIT_TIMER = 'swap_wait_read_param'
self._start_timer(READ_TIMER)
swap_in_tensors(aio_handle, swap_buffers, swap_info.swap_paths)
self._stop_timer(READ_TIMER)
swap_bytes = sum([buffer.numel() * buffer.element_size() for buffer in swap_buffers])
self._start_timer(WAIT_TIMER)
aio_handle.wait()
self._stop_timer(WAIT_TIMER)
compute_lengths = [swap_info.numel()] * len(swap_info.tensors)
compute_buffers = get_sized_buffers(dest_buffers, compute_lengths)
for t, buffer in zip(swap_info.tensors, compute_buffers):
t.data = buffer.data
self._log_timers([READ_TIMER, WAIT_TIMER])
if DEBUG_MODE and dist.get_rank() == 0:
logger.info(f'optimizer_param_swap_in: {(swap_bytes/(1024**3)):5.2f} GB')
def _separate_pinned_tensors(self, swap_info):
pinned_tensors = []
pinned_paths = []
unpinned_tensors = []
unpinned_paths = []
for tensor, path in zip(swap_info.tensors, swap_info.swap_paths):
if tensor.is_pinned():
pinned_tensors.append(tensor)
pinned_paths.append(path)
else:
unpinned_tensors.append(tensor)
unpinned_paths.append(path)
return pinned_tensors, pinned_paths, unpinned_tensors, unpinned_paths
def _swap_in_pinned_gradients(self, aio_handle, parameter, gradient_tensor):
swap_info = self.swap_params_info[id(parameter)]
param_gradients = swap_info.swapped_gradients.values()
swap_buffers = [gradient_tensor.narrow(0, grad.offset, grad.length) for grad in param_gradients]
swap_paths = [grad.path for grad in param_gradients]
SWAP_READ_GRADIENTS = 'swap_submit_read_gradient'
SWAP_WAIT_GRADIENTS = 'swap_submit_wait_gradient'
self._start_timer(SWAP_READ_GRADIENTS)
swap_in_tensors(aio_handle, swap_buffers, swap_paths)
self._stop_timer(SWAP_READ_GRADIENTS)
self._start_timer(SWAP_WAIT_GRADIENTS)
assert len(swap_buffers) == aio_handle.wait()
self._stop_timer(SWAP_WAIT_GRADIENTS)
self._log_timers([SWAP_READ_GRADIENTS, SWAP_WAIT_GRADIENTS])
def _swap_in_gradients(self, aio_handle, parameter, dest_buffer):
swap_info = self.swap_params_info.get(id(parameter), None)
if not (swap_info and swap_info.has_gradients()):
return
assert dest_buffer.is_pinned()
assert parameter.numel() <= dest_buffer.numel()
parameter.grad = dest_buffer.narrow(0, 0, parameter.numel())
if swap_info.swapped_gradients:
self._swap_in_pinned_gradients(aio_handle, parameter, parameter.grad)
if swap_info.unswapped_gradients:
self._retrieve_unswapped_grad_partitions(swap_info=swap_info, dest_buffer=parameter.grad) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/swap_tensor/partitioned_optimizer_swapper.py | partitioned_optimizer_swapper.py |
# DeepSpeed Team
import math
from deepspeed.utils import logger
from .constants import *
class CurriculumScheduler(object):
def __init__(self, config):
super().__init__()
self.state = {}
assert CURRICULUM_LEARNING_MIN_DIFFICULTY in config, \
f"Curriculum learning requires the config '{CURRICULUM_LEARNING_MIN_DIFFICULTY}'"
assert CURRICULUM_LEARNING_MAX_DIFFICULTY in config, \
f"Curriculum learning requires the config '{CURRICULUM_LEARNING_MAX_DIFFICULTY}'"
assert CURRICULUM_LEARNING_SCHEDULE_TYPE in config, \
f"Curriculum learning requires the config '{CURRICULUM_LEARNING_SCHEDULE_TYPE}'"
self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY] = config[CURRICULUM_LEARNING_MIN_DIFFICULTY]
self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY] = config[CURRICULUM_LEARNING_MAX_DIFFICULTY]
self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = config[CURRICULUM_LEARNING_MIN_DIFFICULTY]
self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] = config[CURRICULUM_LEARNING_SCHEDULE_TYPE]
self.first_step = True
if config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE:
"""
The schedule_config is a list of difficulty and a list of max
step belonging to each difficulty. Example json config:
"schedule_config": {
"difficulty": [1,2,3],
"max_step": [5,10]
}
The "max_step" has one less element than "difficulty", because
the last difficulty will be used for all following steps.
The self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] is a dictionary of
difficulty : [max step for this difficulty, next difficulty].
"""
assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_discrete schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY}'"
assert CURRICULUM_LEARNING_SCHEDULE_MAX_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_discrete schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_MAX_STEP}'"
assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_MAX_STEP]) > 0
assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY]) > 0
assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY]) == len(
config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_MAX_STEP]) + 1
self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT:
"""
The schedule_config includes:
total_curriculum_step: how many steps the curriculum learning takes to go
from min difficulty to max difficulty.
difficulty_step: the difficulty level determined every time must
be a multiple of this difficulty_step. This is used to determine
the step of difficulty increase, and to ensure the use of NVIDIA
Tensor Core acceleration (requires multiple of 8 (FP16) or
16 (INT8)).
root_degree: the degree of the root function. Degree of 2 means
square root and degree of 3 means cube root. Degree of 1 is
equivalent to linear.
"schedule_config": {
"total_curriculum_step": 30000,
"difficulty_step": 8,
"root_degree": 2
}
"""
assert CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP}'"
assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP}'"
assert CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE}'"
if config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP] % 8 != 0:
logger.warning(
f'When using seqlen metric, the difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your metric/hardware.'
)
self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR:
"""
The schedule_config is the same as CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT but without the
root_degree.
"schedule_config": {
"total_curriculum_step": 30000,
"difficulty_step": 8
}
"""
assert CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_linear schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP}'"
assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \
f"Curriculum learning with fixed_linear schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP}'"
if config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP] % 8 != 0:
logger.warning(
f'When using seqlen metric, the difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your metric/hardware.'
)
self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_CUSTOM:
"""
Fully customized schedule. User need to provide a custom schedule
function by using the set_custom_curriculum_learning_schedule API
in deepspeed/runtime/engine.py
"""
self.custom_get_difficulty = None
else:
raise RuntimeError('Unsupported curriculum schedule type')
def get_current_difficulty(self):
return self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY]
def set_current_difficulty(self, difficulty):
self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = difficulty
def set_custom_get_difficulty(self, schedule_function):
self.custom_get_difficulty = schedule_function
def get_state(self):
return self.state
def set_state(self, state):
self.state = state
def __fixed_discrete_get_difficulty(self, global_steps):
s_state = self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
if global_steps > s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP][-1]:
return s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY][-1]
for i in range(len(s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP])):
if global_steps <= s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP][i]:
return s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY][i]
def __fixed_root_get_difficulty(self, global_steps, root_degree=None):
s_state = self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG]
if root_degree is None:
root_degree = s_state[CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE]
next_difficulty = (float(global_steps) / s_state[CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP])**(1.0 / root_degree)
next_difficulty = math.floor(
next_difficulty *
(self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY] - self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY]) +
self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY])
next_difficulty -= (next_difficulty % s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP])
next_difficulty = min(next_difficulty, self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY])
return next_difficulty
def get_difficulty(self, global_steps):
if self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE:
return self.__fixed_discrete_get_difficulty(global_steps)
elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR:
return self.__fixed_root_get_difficulty(global_steps, 1)
elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT:
return self.__fixed_root_get_difficulty(global_steps)
elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_CUSTOM:
return self.custom_get_difficulty(global_steps)
else:
raise RuntimeError('Unsupported curriculum schedule type')
def update_difficulty(self, global_steps):
if self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] < self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY]:
self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = self.get_difficulty(global_steps)
return self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/data_pipeline/curriculum_scheduler.py | curriculum_scheduler.py |
# DeepSpeed Team
"""
Data efficiency library
See sample config at https://www.deepspeed.ai/docs/config-json/data-efficiency
"""
DATA_EFFICIENCY = "data_efficiency"
DATA_EFFICIENCY_ENABLED = "enabled"
DATA_EFFICIENCY_ENABLED_DEFAULT = False
DATA_EFFICIENCY_SEED = "seed"
DATA_EFFICIENCY_SEED_DEFAULT = 1234
#########################################
# Data efficiency - Data Sampling
#########################################
DATA_SAMPLING = "data_sampling"
DATA_SAMPLING_ENABLED = "enabled"
DATA_SAMPLING_ENABLED_DEFAULT = False
DATA_SAMPLING_NUM_EPOCHS = "num_epochs"
DATA_SAMPLING_NUM_EPOCHS_DEFAULT = 1000
DATA_SAMPLING_NUM_WORKERS = "num_workers"
DATA_SAMPLING_NUM_WORKERS_DEFAULT = 0
#########################################
# Data efficiency - Data Sampling - Curriculum Learning
#########################################
CURRICULUM_LEARNING = "curriculum_learning"
CURRICULUM_LEARNING_ENABLED = "enabled"
CURRICULUM_LEARNING_ENABLED_DEFAULT = False
CURRICULUM_LEARNING_CLUSTER_PATH = "data_cluster_path"
CURRICULUM_LEARNING_METRICS = "curriculum_metrics"
CURRICULUM_LEARNING_SAMPLE_PATH = "index_to_sample_path"
CURRICULUM_LEARNING_METRIC_PATH = "index_to_metric_path"
CURRICULUM_LEARNING_CLUSTERING_TYPE = "clustering_type"
CURRICULUM_LEARNING_SINGLE_CLUSTER = "single_cluster"
CURRICULUM_LEARNING_CLUSTER_PREFIX = "cluster"
CURRICULUM_LEARNING_DIFFICULTY_TYPE = "difficulty_type"
CURRICULUM_LEARNING_VALUE_BASED = "value"
CURRICULUM_LEARNING_PERCENTILE_BASED = "percentile"
CURRICULUM_LEARNING_MIN_DIFFICULTY = "min_difficulty"
CURRICULUM_LEARNING_MAX_DIFFICULTY = "max_difficulty"
CURRICULUM_LEARNING_SCHEDULE_TYPE = "schedule_type"
CURRICULUM_LEARNING_SCHEDULE_CONFIG = "schedule_config"
CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY = "difficulty"
CURRICULUM_LEARNING_SCHEDULE_MAX_STEP = "max_step"
CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP = "total_curriculum_step"
CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP = "difficulty_step"
CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE = "root_degree"
CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE = "fixed_discrete"
CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT = "fixed_root"
CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR = "fixed_linear"
CURRICULUM_LEARNING_SCHEDULE_CUSTOM = "custom"
CURRICULUM_LEARNING_CURRENT_DIFFICULTY = "current_difficulty"
CURRICULUM_LEARNING_BATCH = "batch"
CURRICULUM_LEARNING_CONSUMED_SAMPLES = "consumed_samples"
CURRICULUM_LEARNING_STEP = "curriculum_step"
CURRICULUM_LEARNING_CURRENT_DIFFICULTIES = "current_difficulties"
CURRICULUM_LEARNING_DATA_CLUSTER_PATHS = "data_cluster_paths"
CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION = "data_cluster_current_position"
CURRICULUM_LEARNING_NP_RNG_STATE = "np_rng_state"
#########################################
# Curriculum Learning legacy implementation
#########################################
CURRICULUM_LEARNING_LEGACY = "curriculum_learning"
CURRICULUM_ENABLED_LEGACY = "enabled"
CURRICULUM_ENABLED_DEFAULT_LEGACY = False
#########################################
# Data efficiency - Data Routing
#########################################
DATA_ROUTING = "data_routing"
DATA_ROUTING_ENABLED = "enabled"
DATA_ROUTING_ENABLED_DEFAULT = False
#########################################
# Data efficiency - Data Routing - Random LTD
#########################################
RANDOM_LTD = "random_ltd"
RANDOM_LTD_ENABLED = "enabled"
RANDOM_LTD_ENABLED_DEFAULT = False
RANDOM_LTD_MODEL_MASK_NAME = "model_mask_name"
RANDOM_LTD_MODEL_TYPE = "model_type"
RANDOM_LTD_MICRO_BATCH_SIZE = "micro_batch_size"
RANDOM_LTD_GLOBAL_BATCH_SIZE = "global_batch_size"
RANDOM_LTD_SAMPLE_INDEX = "sample_idx"
RANDOM_LTD_ATTENTION_MASK = "attention_mask"
RANDOM_LTD_HIDDEN_STATE_ORDER = "hidden_state_order"
RANDOM_LTD_LAYER_NUM = "random_ltd_layer_num"
RANDOM_LTD_LAYER_ID = "random_ltd_layer_id"
RANDOM_LTD_TOTAL_LAYER_NUM = "total_layer_num"
RANDOM_LTD_CONSUMED_LAYER_TOKENS = "consumed_layer_tokens"
# scheduler
RANDOM_LTD_SCHEDULER = "random_ltd_schedule"
RANDOM_LTD_MAX_VALUE = "max_value"
RANDOM_LTD_MIN_VALUE = "min_value"
RANDOM_LTD_CURRENT_VALUE = "current_value"
RANDOM_LTD_SCHEDULE_CONFIG = "schedule_config"
RANDOM_LTD_INCREASE_STEP = "seq_per_step"
RANDOM_LTD_REQUIRE_STEP = "require_steps"
RANDOM_LTD_SCHEDULER_TYPE = "schedule_type"
RANDOM_LTD_CURR_STEP = "current_steps"
# learning rate schedulers
RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE = "layer_token_lr_schedule"
RANDOM_LTD_LAYER_TOKEN_LR_ENABLED = "enabled"
RANDOM_LTD_LAYER_TOKEN_LR_ENABLED_DEFAULT = False
RANDOM_LTD_TOTAL_LAYER_TOKENS = "total_layer_tokens"
RANDOM_LTD_WARMUP_TYPE = "warmup_type"
RANDOM_LTD_WARMUP_LAYER_TOKENS = "warmup_layer_tokens" | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/data_pipeline/constants.py | constants.py |
# DeepSpeed Team
from .constants import *
import copy
from ..config_utils import get_scalar_param
# TODO: Reducing config verbosity by returning None or {} when disabled.
# One challenge is that we still need to somehow include the default values,
# for example the *_ENABLED has default of false.
def get_data_efficiency_config(param_dict):
output = {}
output[DATA_EFFICIENCY_ENABLED] = get_data_efficiency_enabled(param_dict)
output[DATA_EFFICIENCY_SEED] = get_data_efficiency_seed(param_dict)
if DATA_EFFICIENCY not in param_dict.keys():
param_dict[DATA_EFFICIENCY] = {}
sub_param_dict = param_dict[DATA_EFFICIENCY]
output[DATA_SAMPLING] = get_data_sampling(sub_param_dict)
output[DATA_ROUTING] = get_data_routing(sub_param_dict)
return output
def get_data_efficiency_enabled(param_dict):
if DATA_EFFICIENCY in param_dict.keys():
return get_scalar_param(param_dict[DATA_EFFICIENCY], DATA_EFFICIENCY_ENABLED, DATA_EFFICIENCY_ENABLED_DEFAULT)
else:
return False
def get_data_efficiency_seed(param_dict):
if DATA_EFFICIENCY in param_dict.keys():
return get_scalar_param(param_dict[DATA_EFFICIENCY], DATA_EFFICIENCY_SEED, DATA_EFFICIENCY_SEED_DEFAULT)
else:
return DATA_EFFICIENCY_SEED_DEFAULT
def get_data_sampling(param_dict):
output = {}
output[DATA_SAMPLING_ENABLED] = get_data_sampling_enabled(param_dict)
output[DATA_SAMPLING_NUM_EPOCHS] = get_data_sampling_num_epochs(param_dict)
output[DATA_SAMPLING_NUM_WORKERS] = get_data_sampling_num_workers(param_dict)
if DATA_SAMPLING not in param_dict.keys():
param_dict[DATA_SAMPLING] = {}
sub_param_dict = param_dict[DATA_SAMPLING]
output[CURRICULUM_LEARNING] = get_curriculum_learning(sub_param_dict)
return output
def get_data_sampling_enabled(param_dict):
if DATA_SAMPLING in param_dict.keys():
return get_scalar_param(param_dict[DATA_SAMPLING], DATA_SAMPLING_ENABLED, DATA_SAMPLING_ENABLED_DEFAULT)
else:
return False
def get_data_sampling_num_epochs(param_dict):
if DATA_SAMPLING in param_dict.keys():
return get_scalar_param(param_dict[DATA_SAMPLING], DATA_SAMPLING_NUM_EPOCHS, DATA_SAMPLING_NUM_EPOCHS_DEFAULT)
else:
return DATA_SAMPLING_NUM_EPOCHS_DEFAULT
def get_data_sampling_num_workers(param_dict):
if DATA_SAMPLING in param_dict.keys():
return get_scalar_param(param_dict[DATA_SAMPLING], DATA_SAMPLING_NUM_WORKERS,
DATA_SAMPLING_NUM_WORKERS_DEFAULT)
else:
return DATA_SAMPLING_NUM_WORKERS_DEFAULT
def get_curriculum_learning(param_dict):
output = {}
output[CURRICULUM_LEARNING_ENABLED] = get_curriculum_learning_enabled(param_dict)
if CURRICULUM_LEARNING not in param_dict.keys():
param_dict[CURRICULUM_LEARNING] = {}
sub_param_dict = param_dict[CURRICULUM_LEARNING]
if output[CURRICULUM_LEARNING_ENABLED]:
assert CURRICULUM_LEARNING_METRICS in sub_param_dict.keys(
), f"Curriculum learning is enabled, {CURRICULUM_LEARNING_METRICS} must be specified"
for key, val in get_curriculum_learning_params(param_dict).items():
output[key] = val
return output
def get_curriculum_learning_enabled(param_dict):
if CURRICULUM_LEARNING in param_dict.keys():
return get_scalar_param(param_dict[CURRICULUM_LEARNING], CURRICULUM_LEARNING_ENABLED,
CURRICULUM_LEARNING_ENABLED_DEFAULT)
else:
return False
def get_curriculum_learning_params(param_dict):
if CURRICULUM_LEARNING in param_dict.keys():
curriculum_learning_params = copy.copy(param_dict[CURRICULUM_LEARNING])
curriculum_learning_params.pop(CURRICULUM_LEARNING_ENABLED)
return curriculum_learning_params
else:
return {}
def get_curriculum_enabled_legacy(param_dict):
if CURRICULUM_LEARNING_LEGACY in param_dict.keys():
return get_scalar_param(param_dict[CURRICULUM_LEARNING_LEGACY], CURRICULUM_ENABLED_LEGACY,
CURRICULUM_ENABLED_DEFAULT_LEGACY)
else:
return False
def get_curriculum_params_legacy(param_dict):
if CURRICULUM_LEARNING_LEGACY in param_dict.keys():
curriculum_params = copy.copy(param_dict[CURRICULUM_LEARNING_LEGACY])
curriculum_params.pop(CURRICULUM_ENABLED_LEGACY)
return curriculum_params
else:
return False
def get_data_routing(param_dict):
output = {}
output[DATA_ROUTING_ENABLED] = get_data_routing_enabled(param_dict)
if DATA_ROUTING not in param_dict.keys():
param_dict[DATA_ROUTING] = {}
sub_param_dict = param_dict[DATA_ROUTING]
output[RANDOM_LTD] = get_random_ltd(sub_param_dict)
return output
def get_data_routing_enabled(param_dict):
if DATA_ROUTING in param_dict.keys():
return get_scalar_param(param_dict[DATA_ROUTING], DATA_ROUTING_ENABLED, DATA_ROUTING_ENABLED_DEFAULT)
else:
return False
def get_random_ltd(param_dict):
output = {}
output[RANDOM_LTD_ENABLED] = RANDOM_LTD_ENABLED_DEFAULT
output[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE] = {}
output[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][
RANDOM_LTD_LAYER_TOKEN_LR_ENABLED] = RANDOM_LTD_LAYER_TOKEN_LR_ENABLED_DEFAULT
if get_random_ltd_enabled(param_dict):
output[RANDOM_LTD_ENABLED] = get_random_ltd_enabled(param_dict)
for key, val in get_random_ltd_params(param_dict).items():
output[key] = val
return output
def get_random_ltd_enabled(param_dict):
if RANDOM_LTD in param_dict.keys():
return get_scalar_param(param_dict[RANDOM_LTD], RANDOM_LTD_ENABLED, RANDOM_LTD_ENABLED_DEFAULT)
else:
return False
def get_random_ltd_params(param_dict):
if RANDOM_LTD in param_dict.keys():
random_ltd_params = copy.copy(param_dict[RANDOM_LTD])
random_ltd_params.pop(RANDOM_LTD_ENABLED)
return random_ltd_params
else:
return {} | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/data_pipeline/config.py | config.py |
# DeepSpeed Team
import math
from deepspeed.utils import logger
# from deepspeed.runtime.lr_schedules import WarmupLR
from ..constants import *
#####based on the paper random-ltd: https://arxiv.org/abs/2211.11586
class BaseScheduler(object):
def __init__(self):
self.state = {}
def __fixed_root_get_value(self, global_steps, root_degree=None):
s_state = self.state[RANDOM_LTD_SCHEDULE_CONFIG]
if root_degree is None:
root_degree = s_state['root_degree']
next_seq = (float(global_steps) / s_state[RANDOM_LTD_REQUIRE_STEP])**(1.0 / root_degree)
next_seq = math.floor(next_seq * (self.state[RANDOM_LTD_MAX_VALUE] - self.state[RANDOM_LTD_MIN_VALUE]) +
self.state[RANDOM_LTD_MIN_VALUE])
next_seq -= (next_seq % s_state[RANDOM_LTD_INCREASE_STEP])
next_seq = min(next_seq, self.state[RANDOM_LTD_MAX_VALUE])
return next_seq
def get_value(self, global_steps):
if self.state[RANDOM_LTD_SCHEDULER_TYPE] == 'fixed_linear':
return self.__fixed_root_get_value(global_steps, 1)
else:
raise RuntimeError('Unsupported random LTD schedule type')
class RandomLTDScheduler(BaseScheduler):
def __init__(self, config):
super().__init__()
self.model_layer_num = config[RANDOM_LTD_TOTAL_LAYER_NUM]
self.random_ltd_layer_num = config[RANDOM_LTD_LAYER_NUM]
self.config_schedule = config[RANDOM_LTD_SCHEDULER]
self.global_batch_size = config[RANDOM_LTD_GLOBAL_BATCH_SIZE]
self.reset_to_init()
if config[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][RANDOM_LTD_LAYER_TOKEN_LR_ENABLED]:
logger.warning("**********Work In Progress************")
raise NotImplementedError
self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = 0
# self.first_step = True
def get_total_layer_tokens(self, train_iters):
for step in range(train_iters):
self.update_seq(step)
return self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS]
def reset_to_init(self):
if self.config_schedule is not None:
self.state[RANDOM_LTD_MIN_VALUE] = self.config_schedule[RANDOM_LTD_MIN_VALUE]
self.state[RANDOM_LTD_MAX_VALUE] = self.config_schedule[RANDOM_LTD_MAX_VALUE]
self.state[RANDOM_LTD_CURRENT_VALUE] = self.config_schedule[RANDOM_LTD_MIN_VALUE]
self.state[RANDOM_LTD_SCHEDULE_CONFIG] = self.config_schedule[RANDOM_LTD_SCHEDULE_CONFIG]
self.state[RANDOM_LTD_SCHEDULER_TYPE] = self.config_schedule[RANDOM_LTD_SCHEDULER_TYPE]
self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = 0
self.state[RANDOM_LTD_CURR_STEP] = -1
def get_current_seq(self):
return self.state[RANDOM_LTD_CURRENT_VALUE]
def set_current_seq(self, seq_length):
self.state[RANDOM_LTD_CURRENT_VALUE] = seq_length
def get_random_ltd_layer_num(self):
return self.random_ltd_layer_num
def get_state(self):
return self.state
def set_state(self, state):
self.state = state
def update_seq(self, global_steps):
if self.state[RANDOM_LTD_CURRENT_VALUE] < self.state[RANDOM_LTD_MAX_VALUE]:
self.state[RANDOM_LTD_CURRENT_VALUE] = self.get_value(global_steps)
if global_steps != self.state[RANDOM_LTD_CURR_STEP]:
self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] += self.global_batch_size*(self.state[RANDOM_LTD_CURRENT_VALUE] * self.random_ltd_layer_num \
+ self.state[RANDOM_LTD_MAX_VALUE] * (self.model_layer_num - self.random_ltd_layer_num))
self.state[RANDOM_LTD_CURR_STEP] = global_steps
def state_dict(self):
return {
RANDOM_LTD_CONSUMED_LAYER_TOKENS: self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS],
RANDOM_LTD_CURR_STEP: self.state[RANDOM_LTD_CURR_STEP],
RANDOM_LTD_CURRENT_VALUE: self.state[RANDOM_LTD_CURRENT_VALUE],
RANDOM_LTD_MIN_VALUE: self.state[RANDOM_LTD_MIN_VALUE],
RANDOM_LTD_MAX_VALUE: self.state[RANDOM_LTD_MAX_VALUE],
}
def load_state_dict(self, state_dict):
self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = state_dict[RANDOM_LTD_CONSUMED_LAYER_TOKENS]
self.state[RANDOM_LTD_CURR_STEP] = state_dict[RANDOM_LTD_CURR_STEP]
self.state[RANDOM_LTD_CURRENT_VALUE] = state_dict[RANDOM_LTD_CURRENT_VALUE]
self.state[RANDOM_LTD_MIN_VALUE] = state_dict[RANDOM_LTD_MIN_VALUE]
self.state[RANDOM_LTD_MAX_VALUE] = state_dict[RANDOM_LTD_MAX_VALUE] | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/data_pipeline/data_routing/scheduler.py | scheduler.py |
# DeepSpeed Team
from deepspeed.utils import logger
from torch import Tensor
from torch.nn import Module
from ..constants import *
from deepspeed.ops.random_ltd.dropping_utils import gpt_sample_tokens, bert_sample_tokens, GatherTokens, ScatterTokens
#####based on the paper random-ltd: https://arxiv.org/abs/2211.11586
class RandomLayerTokenDrop(Module):
"""
A layer wrapper for random LTD
"""
def __init__(self, layer: Module):
super(RandomLayerTokenDrop, self).__init__()
self.random_ltd_layer = layer
self.reserved_length = None #config['max_value']
self.random_ltd_scheduler = None
self.max_length = None
self.reserved_length = -1
self.curr_seq = -1
self.batch_first = False
def init_config(self, config, scheduler, random_ltd_layer_id):
self.random_ltd_scheduler = scheduler
self.random_ltd_layer_id = random_ltd_layer_id
self.max_length = self.random_ltd_scheduler.state[RANDOM_LTD_MAX_VALUE]
self.mask_name = config[RANDOM_LTD_MODEL_MASK_NAME]
self.micro_bs = config[RANDOM_LTD_MICRO_BATCH_SIZE]
self.random_ltd_num_layer = self.random_ltd_scheduler.random_ltd_layer_num
hs_order = config[RANDOM_LTD_HIDDEN_STATE_ORDER]
self.model_type = config[RANDOM_LTD_MODEL_TYPE]
if hs_order == 'batch_seq_dim':
self.get_hidden_tensor_shape = self.get_bsh
self.batch_first = True
elif hs_order == 'seq_batch_dim':
self.get_hidden_tensor_shape = self.get_sbh
self.batch_first = False
else:
logger.warning(
"************For now, we only support batch_seq_dim or seq_batch_dim inputs. You can easily \
your own input dimension orders************")
raise NotImplementedError
if self.model_type == 'encoder':
self.index_generator = bert_sample_tokens
elif self.model_type == 'decoder':
self.index_generator = gpt_sample_tokens
else:
logger.warning("************For now, we only support encoder-only or decoder-only models************")
raise NotImplementedError
def get_bsh(self, hidden_stats):
self.curr_seq, self.curr_micro_batch = hidden_stats.size()[1], hidden_stats.size()[0]
def get_sbh(self, hidden_stats):
self.curr_seq, self.curr_micro_batch = hidden_stats.size()[0], hidden_stats.size()[1]
def forward(self, hidden_states, **kwargs) -> Tensor:
if self.random_ltd_scheduler is not None:
self.reserved_length = self.random_ltd_scheduler.get_current_seq()
self.get_hidden_tensor_shape(hidden_states)
if self.training and self.random_ltd_scheduler is not None and self.reserved_length < self.curr_seq:
if self.mask_name is not None:
mask = kwargs[self.mask_name]
else:
mask = None
if self.random_ltd_layer_id == 0:
sampled_indices, part_attention_mask = self.index_generator(self.reserved_length,\
self.curr_seq, \
self.curr_micro_batch, \
self.random_ltd_num_layer, \
hidden_states.device, mask)
self.random_ltd_scheduler.state[RANDOM_LTD_SAMPLE_INDEX] = sampled_indices
self.random_ltd_scheduler.state[RANDOM_LTD_ATTENTION_MASK] = part_attention_mask
else:
sampled_indices = self.random_ltd_scheduler.state[RANDOM_LTD_SAMPLE_INDEX]
part_attention_mask = self.random_ltd_scheduler.state[RANDOM_LTD_ATTENTION_MASK]
hidden_states, part_hidden_states = GatherTokens.apply(hidden_states,
sampled_indices[self.random_ltd_layer_id, :, :],
self.batch_first)
if self.mask_name is not None:
if self.model_type == 'encoder':
kwargs[self.mask_name] = part_attention_mask[self.random_ltd_layer_id]
else:
kwargs[self.mask_name] = part_attention_mask
outputs = self.random_ltd_layer(part_hidden_states, **kwargs)
if isinstance(outputs, tuple):
hidden_states = ScatterTokens.apply(hidden_states, outputs[0],
sampled_indices[self.random_ltd_layer_id, :, :], self.batch_first)
my_list = list(outputs)
my_list[0] = hidden_states
return tuple(my_list)
elif isinstance(outputs, Tensor):
hidden_states = ScatterTokens.apply(hidden_states, outputs,
sampled_indices[self.random_ltd_layer_id, :, :], self.batch_first)
return hidden_states
else:
logger.warning("************For now, we only support tuple and tensor output. \
You need to adjust the output according to the layer in your model************")
raise NotImplementedError
else:
return self.random_ltd_layer(hidden_states, **kwargs) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/data_pipeline/data_routing/basic_layer.py | basic_layer.py |
# DeepSpeed Team
import os
from collections import defaultdict
import csv
import time
from multiprocessing import Process, Manager
import numpy as np
import torch
from torch.utils.data import BatchSampler, SequentialSampler, DataLoader, Subset
from deepspeed.utils import logger
from .indexed_dataset import MMapIndexedDataset
from .utils import split_dataset, split_index, create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype
class DataAnalyzer(object):
def __init__(self,
dataset,
num_workers=1,
worker_id=0,
num_threads=1,
num_threads_reduce=1,
specific_threads=[],
batch_size=1,
metric_names=[],
metric_functions=[],
metric_types=[],
metric_dtypes=[],
save_path="./",
collate_fn=None,
custom_map_init=None,
custom_map_update=None,
custom_map_finalize=None,
custom_reduce=None):
super().__init__()
self.dataset = dataset
self.num_workers = num_workers
self.worker_id = worker_id
self.num_threads = num_threads
self.num_threads_reduce = num_threads_reduce
self.specific_threads = specific_threads
self.batch_size = batch_size
self.metric_names = metric_names
self.metric_functions = metric_functions
self.metric_types = metric_types
self.metric_dtypes = metric_dtypes
self.save_path = save_path
self.collate_fn = collate_fn
self.custom_map_init = custom_map_init
self.custom_map_update = custom_map_update
self.custom_map_finalize = custom_map_finalize
self.custom_reduce = custom_reduce
def init_metric_results(self, thread_id, metric_names, metric_types, metric_dtypes, save_path, worker_id):
metric_results = []
for m_idx in range(len(metric_names)):
metric_name, metric_type, metric_dtype = metric_names[m_idx], \
metric_types[m_idx], metric_dtypes[m_idx]
assert metric_dtype not in [
np.float64, np.double
], "Currently floating point metric values are not supported. Please change your metric into integer values (and potentially multiply a larger coefficient to keep the precision)."
metric_save_path = f"{save_path}/{metric_name}/worker{worker_id}_thread{thread_id}/"
os.makedirs(metric_save_path, exist_ok=True)
if metric_type == 'single_value_per_sample':
sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric"
sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_dtype)
metric_to_sample_fname = f"{metric_save_path}/{metric_name}_metric_to_sample"
os.system(f"rm -rf {metric_to_sample_fname}*")
metric_to_sample_dict = defaultdict(list)
metric_results.append({
"sample_to_metric_fname": sample_to_metric_fname,
"sample_to_metric_builder": sample_to_metric_builder,
"metric_to_sample_fname": metric_to_sample_fname,
"metric_to_sample_dict": metric_to_sample_dict
})
elif metric_type == 'accumulate_value_over_samples':
metric_value = None
metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value"
metric_results.append({"metric_value": metric_value, "metric_value_fname": metric_value_fname})
return metric_results
def update_metric_results(self, data, metric_types, metric_functions, metric_results):
for m_idx in range(len(metric_types)):
metric_type, metric_function, metric_result = metric_types[m_idx], \
metric_functions[m_idx], metric_results[m_idx]
if metric_type == 'single_value_per_sample':
metric_values = metric_function(data)
for row in range(metric_values.size()[0]):
metric_result["sample_to_metric_builder"].add_item(metric_values[row].reshape(-1))
metric_result["metric_to_sample_dict"][metric_values[row].item()].append(
data['index'][row][0].item())
for m_value in metric_result["metric_to_sample_dict"]:
if len(metric_result["metric_to_sample_dict"][m_value]) > 100:
metric_fname = metric_result["metric_to_sample_fname"]
with open(f"{metric_fname}_{m_value}.csv", 'a') as f:
writer = csv.writer(f)
writer.writerows([metric_result["metric_to_sample_dict"][m_value]])
metric_result["metric_to_sample_dict"][m_value] = []
elif metric_type == 'accumulate_value_over_samples':
metric_values = metric_function(data)
if metric_result["metric_value"] is None:
metric_result["metric_value"] = metric_values
else:
metric_result["metric_value"].add_(metric_values)
def finalize_metric_results(self, metric_types, metric_dtypes, metric_results):
for m_idx in range(len(metric_types)):
metric_type, metric_dtype, metric_result = metric_types[m_idx], \
metric_dtypes[m_idx], metric_results[m_idx]
if metric_type == 'single_value_per_sample':
metric_fname = metric_result["sample_to_metric_fname"]
close_mmap_dataset_builder(metric_result["sample_to_metric_builder"], metric_fname)
for m_value in metric_result["metric_to_sample_dict"]:
if len(metric_result["metric_to_sample_dict"][m_value]) > 0:
metric_fname = metric_result["metric_to_sample_fname"]
with open(f"{metric_fname}_{m_value}.csv", 'a') as f:
writer = csv.writer(f)
writer.writerows([metric_result["metric_to_sample_dict"][m_value]])
metric_result["metric_to_sample_dict"][m_value] = []
elif metric_type == 'accumulate_value_over_samples':
if metric_result["metric_value"] is not None:
metric_value_builder = create_mmap_dataset_builder(metric_result["metric_value_fname"],
metric_dtype)
metric_value_builder.add_item(metric_result["metric_value"].reshape(-1))
close_mmap_dataset_builder(metric_value_builder, metric_result["metric_value_fname"])
def run_map_helper(self, thread_id):
start_idx, end_idx = self.thread_splits[thread_id][0], \
self.thread_splits[thread_id][1]
logger.info(f"worker {self.worker_id} thread {thread_id}: start working " \
f"on data subset {start_idx} to {end_idx}")
thread_dataset = Subset(self.dataset, list(range(start_idx, end_idx)))
sampler = BatchSampler(SequentialSampler(thread_dataset), batch_size=self.batch_size, drop_last=False)
if self.collate_fn is None:
iterator = iter(DataLoader(thread_dataset, batch_sampler=sampler, num_workers=0, pin_memory=False))
else:
iterator = iter(
DataLoader(thread_dataset,
batch_sampler=sampler,
num_workers=0,
collate_fn=self.collate_fn,
pin_memory=False))
if self.custom_map_init is None:
metric_results = self.init_metric_results(thread_id, self.metric_names, self.metric_types,
self.metric_dtypes, self.save_path, self.worker_id)
else:
metric_results = self.custom_map_init(thread_id, self.metric_names, self.metric_types, self.metric_dtypes,
self.save_path, self.worker_id)
total_sample = len(thread_dataset)
processed_sample = 0
start = time.time()
while True:
try:
data = next(iterator)
if self.custom_map_update is None:
self.update_metric_results(data, self.metric_types, self.metric_functions, metric_results)
else:
self.custom_map_update(data, self.metric_types, self.metric_functions, metric_results)
processed_sample += self.batch_size
duration = (time.time() - start) / 3600.0
remain_duration = duration * total_sample / processed_sample - duration
logger.info(
f"worker {self.worker_id} thread {thread_id}: {processed_sample} " \
f"out of {total_sample} processed in {duration:.2f} hr, " \
f"estimated to finish in {remain_duration:.2f} hr")
except StopIteration:
logger.info(f"worker {self.worker_id} thread {thread_id}: reach end of file")
break
if self.custom_map_finalize is None:
self.finalize_metric_results(self.metric_types, self.metric_dtypes, metric_results)
else:
self.custom_map_finalize(self.metric_types, self.metric_dtypes, metric_results)
logger.info(f"worker {self.worker_id} thread {thread_id}: finished")
def run_map(self):
self.worker_splits, self.thread_splits = split_dataset(self.dataset, self.num_workers, self.worker_id,
self.num_threads)
if len(self.specific_threads) > 0:
threads_to_run = self.specific_threads
else:
threads_to_run = list(range(self.num_threads))
if self.num_threads > 1:
p = []
for thread in threads_to_run:
p.append(Process(target=self.run_map_helper, args=(thread, )))
p[thread].start()
for thread in threads_to_run:
p[thread].join()
else:
assert self.num_threads == 1
self.run_map_helper(0)
def get_metric_value_percentiles(self, metric_name, num_sample_per_value, total_num_samples):
logger.info(f"Checking the value percentiles of metric {metric_name}...")
processed_samples = 0
current_percentile = 5
for key in sorted(num_sample_per_value.keys()):
processed_samples += num_sample_per_value[key]
if processed_samples >= total_num_samples * current_percentile / 100.0:
logger.info(f"Metric {metric_name} {current_percentile}th percentile: {key}")
current_percentile += 5
def merge_gather_map_stats(self, num_workers, num_threads, num_threads_reduce, t_idx_reduce, metric_save_path,
metric_name, return_dict):
results = []
for w_idx in range(num_workers):
for t_idx in range(num_threads):
if (w_idx * num_threads + t_idx) % num_threads_reduce == t_idx_reduce:
w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/"
w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric"
w_sample_to_metric = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True)
unique_v = list(np.unique(w_sample_to_metric))
sample_to_metric_count = len(w_sample_to_metric)
logger.info(f"Finished gathering map stats from worker {w_idx} thread {t_idx}.")
results.append([unique_v, sample_to_metric_count])
return_dict[t_idx_reduce] = results
def merge_sample_to_metric(self, t_idx_reduce, metric_save_path, metric_name, metric_value_dtype,
map_worker_thread):
sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}"
sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype)
for w_t in map_worker_thread:
w_metric_save_path = f"{metric_save_path}/worker{w_t[0]}_thread{w_t[1]}/"
w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric"
w_data = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True)
for row in range(len(w_data)):
sample_to_metric_builder.add_item(torch.tensor(w_data[row].astype(np.int64), dtype=torch.long))
logger.info(f"Finished merge_sample_to_metric from worker {w_t[0]} thread {w_t[1]}.")
close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname)
def merge_metric_to_sample(self, t_idx_reduce, metric_save_path, metric_name, sample_idx_dtype, metric_value_dtype,
unique_metric_values, num_workers, num_threads):
index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}"
index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype)
index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}"
index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype)
for unique_v in unique_metric_values:
samples = []
for w_idx in range(num_workers):
for t_idx in range(num_threads):
w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/"
w_metric_to_sample_fname = f"{w_metric_save_path}/{metric_name}_metric_to_sample_{unique_v}.csv"
if os.path.isfile(w_metric_to_sample_fname):
with open(w_metric_to_sample_fname, 'r') as f:
datareader = csv.reader(f)
for row in datareader:
samples += [int(x) for x in row]
index_to_sample_builder.add_item(torch.tensor(samples, dtype=torch.long))
index_to_metric_builder.add_item(torch.tensor([unique_v], dtype=torch.long))
logger.info(f"Finished reducing metric {metric_name} value {unique_v}.")
close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname)
close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname)
def merge_map_results(self, dataset, metric_names, metric_types, save_path, num_workers, num_threads,
num_threads_reduce):
total_num_samples = len(dataset)
sample_idx_dtype = find_fit_int_dtype(0, total_num_samples - 1)
logger.info(
f"Total number of data samples: {total_num_samples}. Will use {sample_idx_dtype} to store the sample indexes."
)
for m_idx in range(len(metric_names)):
metric_name, metric_type = metric_names[m_idx], metric_types[m_idx]
if metric_type == 'single_value_per_sample':
metric_save_path = f"{save_path}/{metric_name}/"
sample_to_metric_count = 0
unique_metric_values = set([])
manager = Manager()
return_dict = manager.dict()
p = []
for t_idx_reduce in range(num_threads_reduce):
p.append(
Process(target=self.merge_gather_map_stats,
args=(
num_workers,
num_threads,
num_threads_reduce,
t_idx_reduce,
metric_save_path,
metric_name,
return_dict,
)))
p[t_idx_reduce].start()
for t_idx_reduce in range(num_threads_reduce):
p[t_idx_reduce].join()
for t_idx_reduce in range(num_threads_reduce):
results = return_dict[t_idx_reduce]
for res in results:
unique_metric_values = unique_metric_values.union(set(res[0]))
sample_to_metric_count += res[1]
value_max = max(unique_metric_values)
value_min = min(unique_metric_values)
assert sample_to_metric_count == total_num_samples, "The number of samples in map result files are not correct. It's possible that some map worker didn't finish successfully."
metric_value_dtype = find_fit_int_dtype(value_min, value_max)
logger.info(
f"Metric {metric_name} has values between {value_min} and {value_max}. Will use {metric_value_dtype} to store the metric values."
)
# sample_to_metric
map_worker_thread = []
for w_idx in range(num_workers):
for t_idx in range(num_threads):
map_worker_thread.append([w_idx, t_idx])
thread_splits = split_index(0, len(map_worker_thread), num_threads_reduce)
p = []
for t_idx_reduce in range(num_threads_reduce):
start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1]
p.append(
Process(target=self.merge_sample_to_metric,
args=(
t_idx_reduce,
metric_save_path,
metric_name,
metric_value_dtype,
map_worker_thread[start_idx:end_idx],
)))
p[t_idx_reduce].start()
for t_idx_reduce in range(num_threads_reduce):
p[t_idx_reduce].join()
sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric"
sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype)
for t_idx_reduce in range(num_threads_reduce):
chunk_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}"
logger.info(f"Merging file {chunk_fname}")
sample_to_metric_builder.merge_file_(chunk_fname)
close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname)
sample_to_metric = MMapIndexedDataset(sample_to_metric_fname, skip_warmup=True)
assert len(sample_to_metric) == total_num_samples
# metric_to_sample
unique_metric_values = list(sorted(unique_metric_values))
thread_splits = split_index(0, len(unique_metric_values), num_threads_reduce)
p = []
for t_idx_reduce in range(num_threads_reduce):
start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1]
p.append(
Process(target=self.merge_metric_to_sample,
args=(
t_idx_reduce,
metric_save_path,
metric_name,
sample_idx_dtype,
metric_value_dtype,
unique_metric_values[start_idx:end_idx],
num_workers,
num_threads,
)))
p[t_idx_reduce].start()
for t_idx_reduce in range(num_threads_reduce):
p[t_idx_reduce].join()
index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample"
index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype)
index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric"
index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype)
for t_idx_reduce in range(num_threads_reduce):
chunk_is_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}"
logger.info(f"Merging file {chunk_is_fname}")
index_to_sample_builder.merge_file_(chunk_is_fname)
chunk_im_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}"
logger.info(f"Merging file {chunk_im_fname}")
index_to_metric_builder.merge_file_(chunk_im_fname)
close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname)
close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname)
num_sample_per_value = {}
index_to_sample = MMapIndexedDataset(index_to_sample_fname, skip_warmup=True)
index_to_metric = MMapIndexedDataset(index_to_metric_fname, skip_warmup=True)
index_to_sample_merged_fname = f"{metric_save_path}/{metric_name}_index_to_sample_percentile_merged"
index_to_sample_merged_builder = create_mmap_dataset_builder(index_to_sample_merged_fname,
sample_idx_dtype)
for v_idx in range(len(index_to_sample)):
if v_idx > 0:
assert index_to_metric[v_idx] > index_to_metric[v_idx - 1]
num_sample_per_value[index_to_metric[v_idx][0]] = len(index_to_sample[v_idx])
assert sum(num_sample_per_value.values()) == total_num_samples
merge_step = len(index_to_sample) // 100
for v_idx in range(0, len(index_to_sample), merge_step):
merged_samples = np.copy(
np.concatenate(index_to_sample[v_idx:min(len(index_to_sample), (v_idx + merge_step))],
axis=None))
index_to_sample_merged_builder.add_item(
torch.tensor(merged_samples.astype(np.int64), dtype=torch.long))
logger.info(f"Finished merging index_to_sample {v_idx} to {v_idx+merge_step}.")
close_mmap_dataset_builder(index_to_sample_merged_builder, index_to_sample_merged_fname)
self.get_metric_value_percentiles(metric_name, num_sample_per_value, total_num_samples)
elif metric_type == 'accumulate_value_over_samples':
metric_save_path = f"{save_path}/{metric_name}/"
metric_value = None
for w_idx in range(num_workers):
for t_idx in range(num_threads):
w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/"
w_metric_value_fname = f"{w_metric_save_path}/{metric_name}_metric_value"
w_metric_value = MMapIndexedDataset(w_metric_value_fname, skip_warmup=True)
if metric_value is None:
metric_value = np.copy(w_metric_value[0])
else:
metric_value += np.copy(w_metric_value[0])
value_max = int(max(metric_value))
value_min = int(min(metric_value))
metric_value_dtype = find_fit_int_dtype(value_min, value_max)
metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value"
metric_value_builder = create_mmap_dataset_builder(metric_value_fname, metric_value_dtype)
metric_value_builder.add_item(torch.tensor(metric_value.astype(np.int64), dtype=torch.long))
close_mmap_dataset_builder(metric_value_builder, metric_value_fname)
def run_reduce(self):
if self.custom_reduce is None:
self.merge_map_results(self.dataset, self.metric_names, self.metric_types, self.save_path,
self.num_workers, self.num_threads, self.num_threads_reduce)
else:
self.custom_reduce(self.dataset, self.metric_names, self.metric_types, self.save_path, self.num_workers,
self.num_threads, self.num_threads_reduce) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py | data_analyzer.py |
# DeepSpeed Team
"""
Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/indexed_dataset.py
"""
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# copied from fairseq/fairseq/data/indexed_dataset.py
# Removed IndexedRawTextDataset since it relied on Fairseq dictionary
# other slight modifications to remove fairseq dependencies
# Added document index to index file and made it accessible.
# An empty sentence no longer separates documents.
# Some of the fixes/improvements are adopted from
# https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/main/megatron/data/indexed_dataset.py
from functools import lru_cache
import os
import shutil
import struct
from itertools import accumulate
import numpy as np
import torch
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ['lazy', 'cached', 'mmap']
def infer_dataset_impl(path):
if IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return 'cached'
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return 'mmap'
else:
return None
else:
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx and .bin can be appended to get full filenames.")
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == 'mmap':
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, skip_warmup=False):
if not IndexedDataset.exists(path):
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx and .bin can be appended to get full filenames.")
return None
if impl == 'infer':
impl = infer_dataset_impl(path)
if impl == 'lazy' and IndexedDataset.exists(path):
return IndexedDataset(path)
elif impl == 'cached' and IndexedDataset.exists(path):
return IndexedCachedDataset(path)
elif impl == 'mmap' and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path, skip_warmup)
print(f"Unknown dataset implementation: {impl}")
return None
def dataset_exists(path, impl):
if impl == 'mmap':
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float64,
7: np.double,
8: np.uint16,
9: np.uint32,
10: np.uint64
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
def create_doc_idx(sizes):
doc_idx = [0]
for i, s in enumerate(sizes):
if s == 0:
doc_idx.append(i + 1)
return doc_idx
class IndexedDataset(torch.utils.data.Dataset):
"""Loader for IndexedDataset"""
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path):
super().__init__()
self.path = path
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, ('Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.')
version = f.read(8)
assert struct.unpack('<Q', version) == (1, )
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack('<QQ', f.read(16))
self.doc_count = struct.unpack('<Q', f.read(8))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
self.doc_idx = read_longs(f, self.doc_count)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if not self.data_file:
self.read_data(self.path)
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
return a
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
sizes = self.sizes[self.dim_offsets[start]:self.dim_offsets[stop]]
size = sum(sizes)
a = np.empty(size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[start] * self.element_size)
self.data_file.readinto(a)
offsets = list(accumulate(sizes))
sents = np.split(a, offsets[:-1])
return sents
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)))
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path):
super().__init__(path)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx:ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx:ptx + a.size])
return a
elif isinstance(idx, slice):
# Hack just to make this work, can optimizer later if necessary
sents = []
for i in range(*idx.indices(len(self))):
sents.append(self[i])
return sents
class IndexedDatasetBuilder(object):
element_sizes = {np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, np.float64: 4, np.double: 8}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
self.doc_idx = [0]
def add_item(self, tensor):
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def end_document(self):
self.doc_idx.append(len(self.sizes))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
doc_offset = len(self.sizes)
begin = self.data_offsets[-1]
for data_offset in index.data_offsets[1:]:
self.data_offsets.append(begin + data_offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
self.doc_idx.extend((doc_offset + index.doc_idx)[1:])
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
index.write(struct.pack('<Q', len(self.doc_idx)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
write_longs(index, self.doc_idx)
index.close()
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
def exscan_from_cumsum_(arr):
# given an array holding the result of an inclusive scan (cumsum),
# convert to an exclusive scan (shift to the right)
# [10, 30, 35, 50] --> [0, 10, 30, 35]
if arr.size > 1:
arr[1:] = arr[:-1]
if arr.size > 0:
arr[0] = 0
def get_pointers_with_total(sizes, elemsize, dtype):
"""Return a numpy array of type np.dtype giving the byte offsets.
Multiplies values in the sizes array by elemsize (bytes),
and then computes an exclusive scan to get byte offsets.
Returns the total number of bytes as second item in a tuple.
"""
# scale values in sizes array by elemsize to get sizes in bytes
pointers = np.array(sizes, dtype=dtype)
pointers *= elemsize
np.cumsum(pointers, axis=0, out=pointers)
# get total number of bytes from all sizes (last element)
bytes_last = pointers[-1] if len(sizes) > 0 else 0
# convert to byte offsets
exscan_from_cumsum_(pointers)
return pointers, bytes_last
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes, npdtype):
"""Return a numpy array of byte offsets given a list of sizes.
Multiplies values in the sizes array by dtype size (bytes),
and then computes an exclusive scan to get byte offsets.
"""
# compute element sizes in bytes
pointers, _ = get_pointers_with_total(sizes, dtype().itemsize, npdtype)
return pointers
def write(self, sizes, doc_idx):
self._file.write(struct.pack('<Q', len(sizes)))
self._file.write(struct.pack('<Q', len(doc_idx)))
sizes32 = np.array(sizes, dtype=np.int32)
self._file.write(sizes32.tobytes(order='C'))
del sizes32
pointers = self._get_pointers(sizes, np.int64)
del sizes
self._file.write(pointers.tobytes(order='C'))
del pointers
doc_idx = np.array(doc_idx, dtype=np.int64)
self._file.write(doc_idx.tobytes(order='C'))
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=False):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, ('Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.')
version = struct.unpack('<Q', stream.read(8))
assert (1, ) == version
dtype_code, = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
self._doc_count = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
if not skip_warmup:
print(" warming up index mmap file...")
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
print(" reading sizes...")
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
print(" reading pointers...")
self._pointers = np.frombuffer(self._bin_buffer,
dtype=np.int64,
count=self._len,
offset=offset + self._sizes.nbytes)
print(" reading document index...")
self._doc_idx = np.frombuffer(self._bin_buffer,
dtype=np.int64,
count=self._doc_count,
offset=offset + self._sizes.nbytes + self._pointers.nbytes)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@property
def doc_idx(self):
return self._doc_idx
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path, skip_warmup=False):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path, skip_warmup)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path, skip_warmup):
self._path = path
self._index = self.Index(index_file_path(self._path), skip_warmup)
if not skip_warmup:
print(" warming up data mmap file...")
_warmup_mmap_file(data_file_path(self._path))
print(" creating numpy buffer of mmap...")
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
print(" creating memory view of numpy buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
ptr, size = self._index[idx]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
sizes = self._index._sizes[idx]
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr)
sents = np.split(np_array, offsets[:-1])
return sents
def get(self, idx, offset=0, length=None):
""" Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
ptr, size = self._index[idx]
if length is None:
length = size - offset
ptr += offset * np.dtype(self._index.dtype).itemsize
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr)
return np_array
@property
def sizes(self):
return self._index.sizes
def size(self, index):
return self._index.sizes[index]
@property
def doc_idx(self):
return self._index.doc_idx
def get_doc_idx(self):
return self._index._doc_idx
def set_doc_idx(self, doc_idx_):
self._index._doc_idx = doc_idx_
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)))
@property
def dtype(self):
return self._index.dtype
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
self._doc_idx = [0]
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def add_item_numpy(self, np_array):
if np_array.dtype != self._dtype:
np_array = np_array.astype(self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def end_document(self):
self._doc_idx.append(len(self._sizes))
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
total_len = len(index.sizes) + len(self._sizes)
print(f" concat {another_file} size={len(index.sizes)} for a total size of {total_len}")
offset = len(self._sizes)
self._sizes.extend(index.sizes)
self._doc_idx.extend((offset + index.doc_idx)[1:])
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes, self._doc_idx) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/data_pipeline/data_sampling/indexed_dataset.py | indexed_dataset.py |
# DeepSpeed Team
"""
coding=utf-8
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/data_samplers.py
"""
import torch
import os
import numpy as np
import deepspeed.comm as dist
from deepspeed.utils import logger
from deepspeed.accelerator import get_accelerator
from ..constants import *
from ..curriculum_scheduler import CurriculumScheduler
from .indexed_dataset import MMapIndexedDataset
from .utils import create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype
class DeepSpeedDataSampler(object):
def __init__(self,
data_efficiency_config,
one_epoch_total_samples,
micro_batch_size,
data_parallel_rank,
data_parallel_size,
data_parallel_group,
gradient_accumulation_steps,
global_rank,
drop_last=True):
# Keep a copy of input params for later use.
self.data_efficiency_config = data_efficiency_config
self.one_epoch_total_samples = one_epoch_total_samples
self.index_dtype = find_fit_int_dtype(0, one_epoch_total_samples)
self.total_samples = one_epoch_total_samples * self.data_efficiency_config[DATA_SAMPLING][
DATA_SAMPLING_NUM_EPOCHS]
self.micro_batch_size = micro_batch_size
self.data_parallel_rank = data_parallel_rank
self.data_parallel_group = data_parallel_group
self.micro_batch_times_data_parallel_size = \
self.micro_batch_size * data_parallel_size
self.gradient_accumulation_steps = gradient_accumulation_steps
self.global_batch_size = self.micro_batch_times_data_parallel_size * \
self.gradient_accumulation_steps
self.global_rank = global_rank
self.drop_last = drop_last
self.np_rng = np.random.default_rng(self.data_efficiency_config[DATA_EFFICIENCY_SEED])
self.state = {}
self.batch = []
self.consumed_samples = 0
if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]:
self.curriculum_step = 0
self.current_difficulties = {}
self.data_cluster_paths = []
self.data_cluster_current_position = []
self.curriculum_schedulers = {}
self.curriculum_index_to_sample = {}
self.curriculum_index_to_metric = {}
self.difficulty_type = {}
self.clustering_type = {}
self.data_1epoch_size = None
if self.global_rank == 0:
self.data_clusters = []
self.data_cluster_sizes = []
cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
CURRICULUM_LEARNING_CLUSTER_PATH]
if not os.path.exists(cluster_path):
os.makedirs(cluster_path)
for metric in self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]:
self.curriculum_schedulers[metric] = CurriculumScheduler(
data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][metric])
self.difficulty_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_DIFFICULTY_TYPE]
self.clustering_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_CLUSTERING_TYPE]
if self.global_rank == 0:
if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER:
self.curriculum_index_to_sample[metric] = MMapIndexedDataset(
data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]
[metric][CURRICULUM_LEARNING_SAMPLE_PATH],
skip_warmup=True)
if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
self.curriculum_index_to_metric[metric] = MMapIndexedDataset(
data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]
[metric][CURRICULUM_LEARNING_METRIC_PATH],
skip_warmup=True)
# Sanity checks.
assert self.total_samples > 0, \
'no sample to consume: {}'.format(self.total_samples)
assert self.micro_batch_size > 0
assert data_parallel_size > 0
assert self.data_parallel_rank < data_parallel_size, \
'data_parallel_rank should be smaller than data size: {}, ' \
'{}'.format(self.data_parallel_rank, data_parallel_size)
def __len__(self):
return self.total_samples
def set_custom_curriculum_learning_schedule(self, schedule_func_dict):
for metric in self.curriculum_schedulers:
if metric in schedule_func_dict:
self.curriculum_schedulers[metric].set_custom_get_difficulty(schedule_func_dict[metric])
def get_start_end_idx(self):
start_idx = self.data_parallel_rank * self.micro_batch_size
end_idx = start_idx + self.micro_batch_size
return start_idx, end_idx
def get_sample_based_on_metric_value(self, metric, value_start, value_end):
new_samples = None
for row in range(len(self.curriculum_index_to_sample[metric])):
if self.curriculum_index_to_metric[metric][row] <= value_end and self.curriculum_index_to_metric[metric][
row] > value_start:
row_samples = np.copy(self.curriculum_index_to_sample[metric][row])
new_samples = row_samples if new_samples is None else np.concatenate(
(new_samples, row_samples), axis=None)
return new_samples
def get_sample_based_on_metric_percentile(self, metric, percentile_start, percentile_end):
new_samples = None
if self.data_1epoch_size is None:
self.data_1epoch_size = sum(len(x) for x in self.curriculum_index_to_sample[metric])
max_percentile = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][
metric][CURRICULUM_LEARNING_MAX_DIFFICULTY]
sample_per_percentile = self.data_1epoch_size // max_percentile
start_count = sample_per_percentile * percentile_start
end_count = sample_per_percentile * percentile_end
if percentile_end == max_percentile:
end_count = self.data_1epoch_size
current_count = 0
for row in range(len(self.curriculum_index_to_sample[metric])):
row_size = len(self.curriculum_index_to_sample[metric][row])
if current_count + row_size > start_count:
row_start = max(0, start_count - current_count)
if current_count + row_size <= end_count:
row_end = row_size
else:
row_end = end_count - current_count
row_samples = np.copy(self.curriculum_index_to_sample[metric][row][row_start:row_end])
new_samples = row_samples if new_samples is None else np.concatenate(
(new_samples, row_samples), axis=None)
current_count += row_size
if current_count >= end_count:
break
return new_samples
def get_new_cluster(self, previous_difficulties):
cluster_fname = CURRICULUM_LEARNING_CLUSTER_PREFIX
for metric in self.curriculum_schedulers:
cluster_fname = f"{cluster_fname}_{metric}{self.current_difficulties[metric]}"
cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
CURRICULUM_LEARNING_CLUSTER_PATH]
cluster_path = f"{cluster_path}/{cluster_fname}"
if self.global_rank == 0:
new_cluster = None
need_clustering = 0
for metric in self.clustering_type:
if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER:
need_clustering += 1
if need_clustering > 1:
for metric in self.curriculum_schedulers:
if self.clustering_type[metric] == CURRICULUM_LEARNING_SINGLE_CLUSTER:
metric_cluster = np.arange(start=0,
stop=self.one_epoch_total_samples,
step=1,
dtype=self.index_dtype)
else:
if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
metric_cluster = self.get_sample_based_on_metric_value(metric, float('-inf'),
self.current_difficulties[metric])
elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED:
metric_cluster = self.get_sample_based_on_metric_percentile(
metric, 0, self.current_difficulties[metric])
new_cluster = metric_cluster if new_cluster is None else \
np.intersect1d(new_cluster, metric_cluster, assume_unique=True)
for cluster in self.data_clusters:
new_cluster = np.setdiff1d(new_cluster, cluster[0], assume_unique=True)
else:
if len(self.data_clusters) == 0:
new_cluster = np.arange(start=0, stop=self.one_epoch_total_samples, step=1, dtype=self.index_dtype)
for metric in self.curriculum_schedulers:
if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER:
if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
new_cluster = self.get_sample_based_on_metric_value(metric, previous_difficulties[metric],
self.current_difficulties[metric])
elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED:
new_cluster = self.get_sample_based_on_metric_percentile(
metric, previous_difficulties[metric], self.current_difficulties[metric])
if new_cluster is not None and len(new_cluster) > 0:
logger.info(
f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) with size {len(new_cluster)} generated."
)
self.np_rng.shuffle(new_cluster)
cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype)
cluster_builder.add_item_numpy(new_cluster)
close_mmap_dataset_builder(cluster_builder, cluster_path)
self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True))
self.data_cluster_sizes.append(len(self.data_clusters[-1][0]))
else:
logger.info(
f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) has no matched data thus skipped."
)
dist.barrier(group=self.data_parallel_group)
if os.path.isfile(f"{cluster_path}.bin"):
self.data_cluster_paths.append(cluster_fname)
self.data_cluster_current_position.append(0)
def sample_from_clusters(self):
num_clusters = len(self.data_clusters)
weight_sum = sum(self.data_cluster_sizes)
weights = [x / weight_sum for x in self.data_cluster_sizes]
samples = self.np_rng.choice(num_clusters, self.global_batch_size, replace=True, p=weights)
samples = np.bincount(samples, minlength=num_clusters)
return samples
def reshuffle_clusters(self, cidx):
cluster_fname = self.data_cluster_paths[cidx]
cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
CURRICULUM_LEARNING_CLUSTER_PATH]
cluster_path = f"{cluster_path}/{cluster_fname}"
cluster = np.copy(self.data_clusters[cidx][0])
self.np_rng.shuffle(cluster)
cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype)
cluster_builder.add_item_numpy(cluster)
close_mmap_dataset_builder(cluster_builder, cluster_path)
self.data_clusters[cidx] = MMapIndexedDataset(cluster_path, skip_warmup=True)
def get_sample_from_cluster(self, cidx, num_samples):
start_idx = self.data_cluster_current_position[cidx]
samples = list(np.copy(self.data_clusters[cidx][0][start_idx:(start_idx + num_samples)]))
self.data_cluster_current_position[cidx] += num_samples
if len(samples) < num_samples:
num_samples_remained = num_samples - len(samples)
logger.info(f"reshuffling cluster {cidx}.")
self.reshuffle_clusters(cidx)
samples += list(np.copy(self.data_clusters[cidx][0][:num_samples_remained]))
self.data_cluster_current_position[cidx] = num_samples_remained
return samples
def get_next_global_batch(self):
if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]:
self.curriculum_step += 1
new_cluster = False
previous_difficulties = {}
for metric in self.curriculum_schedulers:
next_difficulty = self.curriculum_schedulers[metric].update_difficulty(self.curriculum_step)
if metric not in self.current_difficulties or \
next_difficulty != self.current_difficulties[metric]:
new_cluster = True
if metric in self.current_difficulties:
previous_difficulties[metric] = self.current_difficulties[metric]
else:
if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED:
previous_difficulties[metric] = float('-inf')
elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED:
previous_difficulties[metric] = 0
self.current_difficulties[metric] = next_difficulty
if new_cluster:
self.get_new_cluster(previous_difficulties)
if self.global_rank == 0:
samples_per_cluster = self.sample_from_clusters()
batch = []
for cidx in range(len(samples_per_cluster)):
batch += self.get_sample_from_cluster(cidx, samples_per_cluster[cidx])
self.np_rng.shuffle(batch)
batch = torch.tensor(batch, device=get_accelerator().current_device_name(), dtype=torch.long).view(-1)
else:
batch = torch.empty(self.global_batch_size,
device=get_accelerator().current_device_name(),
dtype=torch.long)
dist.broadcast(batch, 0, group=self.data_parallel_group)
self.batch = batch.tolist()
def __iter__(self):
while self.consumed_samples <= self.total_samples:
if len(self.batch) == 0:
self.get_next_global_batch()
current_batch = self.batch[:self.micro_batch_times_data_parallel_size]
self.batch = self.batch[self.micro_batch_times_data_parallel_size:]
if len(current_batch) == self.micro_batch_times_data_parallel_size or \
(len(current_batch) > 0 and not self.drop_last):
start_idx, end_idx = self.get_start_end_idx()
yield current_batch[start_idx:end_idx]
self.consumed_samples += len(current_batch)
current_batch = []
def state_dict(self):
return {
CURRICULUM_LEARNING_BATCH: self.batch,
CURRICULUM_LEARNING_CONSUMED_SAMPLES: self.consumed_samples,
CURRICULUM_LEARNING_STEP: self.curriculum_step,
CURRICULUM_LEARNING_CURRENT_DIFFICULTIES: self.current_difficulties,
CURRICULUM_LEARNING_DATA_CLUSTER_PATHS: self.data_cluster_paths,
CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION: self.data_cluster_current_position,
CURRICULUM_LEARNING_NP_RNG_STATE: np.random.get_state()
}
def load_state_dict(self, state_dict):
self.batch = state_dict[CURRICULUM_LEARNING_BATCH]
self.consumed_samples = state_dict[CURRICULUM_LEARNING_CONSUMED_SAMPLES]
self.curriculum_step = state_dict[CURRICULUM_LEARNING_STEP]
self.current_difficulties = state_dict[CURRICULUM_LEARNING_CURRENT_DIFFICULTIES]
self.data_cluster_paths = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_PATHS]
self.data_cluster_current_position = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION]
np.random.set_state(state_dict[CURRICULUM_LEARNING_NP_RNG_STATE])
cluster_root_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][
CURRICULUM_LEARNING_CLUSTER_PATH]
# Backward compatibility: previously data_cluster_paths were stored as
# absolute paths. Now we changed it to just the file name so that even
# if user moved the cluster files, the checkpoint loading still works
# as long as user set the correct new CURRICULUM_LEARNING_CLUSTER_PATH
# in deepspeed json config.
for idx in range(len(self.data_cluster_paths)):
if '/' in self.data_cluster_paths[idx]:
self.data_cluster_paths[idx] = self.data_cluster_paths[idx].split('/')[-1]
if self.global_rank == 0:
for cluster_fname in self.data_cluster_paths:
cluster_path = f"{cluster_root_path}/{cluster_fname}"
self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True))
self.data_cluster_sizes.append(len(self.data_clusters[-1][0])) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py | data_sampler.py |
# DeepSpeed Team
from ..utils import call_to_str
from abc import ABC, abstractmethod
class PipeSchedule(ABC):
"""Directs the execution of a pipeline engine by generating sequences of
:class:`PipeInstruction`.
Schedules are generators that yield sequences of
:class:`PipeInstruction` to process the micro-batches in one batch.
Each yielded step is atomic in the sense that a barrier
synchronization can be placed between successive steps without
deadlock.
Below is an example schedule that implements data parallelism with gradient accumulation:
.. code-block:: python
class DataParallelSchedule(PipeSchedule):
def steps(self):
for step_id in range(self.micro_batches):
cmds = [
LoadMicroBatch(buffer_id=0),
ForwardPass(buffer_id=0),
BackwardPass(buffer_id=0),
]
if step_id == self.micro_batches - 1:
cmds.extend([
ReduceGrads(),
OptimizerStep(),
])
yield cmds
def num_pipe_buffers(self):
return 1
Args:
micro_batches (int): The number of micro-batches that comprise a batch.
stages (int): The number of pipeline stages.
stage_id (int): The pipe stage that will execute the generated schedule.
"""
def __init__(self, micro_batches, stages, stage_id):
super().__init__()
self.micro_batches = micro_batches
self.stages = stages
self.stage_id = stage_id
self.prev_stage = self.stage_id - 1
self.next_stage = self.stage_id + 1
@abstractmethod
def steps(self):
"""Yield a list of :class:`PipeInstruction` for each step in the schedule.
.. note::
Schedules must implement ``steps()`` to define the schedule.
Returns:
Instructions to be executed as one step of the pipeline
"""
pass
def num_pipe_buffers(self):
"""The number of pipeline buffers that will be used by this stage.
.. note::
Schedules should specialize ``num_pipe_buffers()`` for memory savings at scale.
Returns:
The number of buffers for the engine to allocate.
"""
return self.micro_batches
def _valid_micro_batch(self, micro_batch_id):
return 0 <= micro_batch_id < self.micro_batches
def _valid_stage(self, stage_id):
return 0 <= stage_id < self.stages
@property
def stage(self):
"""Stage index used to configure this schedule."""
return self.stage_id
@property
def num_stages(self):
"""The number of total pipeline stages used to configure this schedule."""
return self.stages
@property
def num_micro_batches(self):
"""The number of total micro_batches used to configure this schedule."""
return self.micro_batches
@property
def is_first_stage(self):
"""True if the configured ``stage_id`` is the first stage in the pipeline."""
return self.stage_id == 0
@property
def is_last_stage(self):
"""True if the configured ``stage_id`` is the last stage in the pipeline."""
return self.stage_id == self.stages - 1
def _buffer_idx(self, micro_batch_id):
"""Map a micro-batch index to a pipeline buffer index.
This method uses a cyclic allocation strategy.
Args:
micro_batch_id (int): The micro-batch index relative to the beginning of the schedule.
Returns:
int: The index of the buffer that should store data.
"""
assert self._valid_micro_batch(micro_batch_id)
return micro_batch_id % self.num_pipe_buffers()
def __iter__(self):
self.it = None
return self
def __next__(self):
if self.it is None:
self.it = self.steps()
return next(self.it)
class InferenceSchedule(PipeSchedule):
"""A schedule for inferencing batches using pipeline parallelism.
"""
def steps(self):
""""""
prev_micro_batch_id = -1
total_steps = self.micro_batches + self.stages - 1
for step_id in range(total_steps):
cmds = []
micro_batch_id = step_id - self.stage_id
# Alternate send/recv buffers
if _is_even(self.stage_id):
recv_buf = step_id % 2
send_buf = (step_id + 1) % 2
else:
recv_buf = (step_id + 1) % 2
send_buf = step_id % 2
if self.is_first_stage or self.is_last_stage:
if self._valid_micro_batch(micro_batch_id):
cmds.append(LoadMicroBatch(recv_buf))
if _is_even(self.stage_id):
if self._valid_stage(self.next_stage):
if self._valid_micro_batch(micro_batch_id - 1):
cmds.append(SendActivation(send_buf))
if self._valid_stage(self.prev_stage):
if self._valid_micro_batch(micro_batch_id):
cmds.append(RecvActivation(recv_buf))
else:
if self._valid_stage(self.prev_stage):
if self._valid_micro_batch(micro_batch_id):
cmds.append(RecvActivation(recv_buf))
if self._valid_stage(self.next_stage):
if self._valid_micro_batch(micro_batch_id - 1):
cmds.append(SendActivation(send_buf))
if self._valid_micro_batch(micro_batch_id):
cmds.append(ForwardPass(recv_buf))
yield cmds
def num_pipe_buffers(self):
"""Only two pipeline buffers are required for inferencing.
Returns:
``2``
"""
return 2
class TrainSchedule(PipeSchedule):
"""A schedule for training a batch using hybrid parallelism.
Pipeline parallelism is extracted through gradient accumulation and thus
convergence follows that of a data parallel approach with the same batch
size.
"""
def steps(self):
""""""
prev_micro_batch_id = -1
total_steps = 2 * (self.micro_batches + self.stages - 1)
for step_id in range(total_steps):
# Map the step of the pipeline to the micro-batch id and also whether it is a
# forward or backward pass step.
micro_batch_id, is_forward = self._step_to_micro_batch(step_id)
if self._valid_micro_batch(prev_micro_batch_id):
prev_buffer = self._buffer_idx(prev_micro_batch_id)
if self._valid_micro_batch(micro_batch_id):
curr_buffer = self._buffer_idx(micro_batch_id)
cmds = []
# Exchange activations
if is_forward:
if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(self.prev_stage):
cmds.append(SendGrad(prev_buffer))
if self._valid_micro_batch(micro_batch_id) and self._valid_stage(self.prev_stage):
cmds.append(RecvActivation(curr_buffer))
else:
if self._valid_micro_batch(micro_batch_id) and self._valid_stage(self.next_stage):
cmds.append(RecvGrad(curr_buffer))
if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(self.next_stage):
cmds.append(SendActivation(prev_buffer))
# First/last stage loads
if self.stage_id == 0 or self.stage_id == self.stages - 1:
if is_forward and self._valid_micro_batch(micro_batch_id):
cmds.append(LoadMicroBatch(curr_buffer))
# Computation
if self._valid_micro_batch(micro_batch_id):
if is_forward:
cmds.append(ForwardPass(curr_buffer))
else:
cmds.append(BackwardPass(curr_buffer))
# Model step at the end of the batch
if step_id == total_steps - 1:
cmds.append(ReduceTiedGrads())
cmds.append(ReduceGrads())
cmds.append(OptimizerStep())
# Prepare state for next time
prev_micro_batch_id = micro_batch_id
yield cmds
def num_pipe_buffers(self):
"""Return the number of pipeline buffers required for this stage.
This is equivalent to the maximum number of in-flight forward passes,
since we need to remember the activations of forward passes in order
to run backpropagation. For synchronous 1F1B, this is equivalent to
the index difference between this stage and the last stage.
"""
buffers = min(self.stages - self.stage_id, self.micro_batches)
return max(2, buffers)
def _step_to_micro_batch(self, step_id):
if _is_even(step_id) and _is_even(self.stage_id):
micro_batch_id = self._even_step_forward_id(step_id)
is_forward = True
elif _is_odd(step_id) and _is_odd(self.stage_id):
micro_batch_id = self._odd_step_forward_id(step_id)
is_forward = True
elif _is_even(step_id) and _is_odd(self.stage_id):
micro_batch_id = self._even_step_backward_id(step_id)
is_forward = False
elif _is_odd(step_id) and _is_even(self.stage_id):
micro_batch_id = self._odd_step_backward_id(step_id)
is_forward = False
else:
assert False
return micro_batch_id, is_forward
def _even_step_forward_id(self, step_id):
base = step_id // 2
micro_batch_id = int(base - self.stage_id // 2)
return micro_batch_id
def _odd_step_forward_id(self, step_id):
base = (step_id - 1) // 2
micro_batch_id = int(base - self.stage_id // 2)
return micro_batch_id
def _even_step_backward_id(self, step_id):
base = step_id // 2
micro_batch_id = int(base - self.stages + (self.stage_id + 1) // 2)
return micro_batch_id
def _odd_step_backward_id(self, step_id):
base = ((step_id - 1) // 2) - self.stages + 1
micro_batch_id = int(base + self.stage_id // 2)
return micro_batch_id
class DataParallelSchedule(PipeSchedule):
"""An example schedule that trains using traditional data parallelism with gradient
accumulation.
"""
def steps(self):
""""""
for step_id in range(self.micro_batches):
cmds = [
LoadMicroBatch(buffer_id=0),
ForwardPass(buffer_id=0),
BackwardPass(buffer_id=0),
]
if step_id == self.micro_batches - 1:
cmds.extend([
ReduceGrads(),
OptimizerStep(),
])
yield cmds
def num_pipe_buffers(self):
"""Only one pipeline buffer needed.
"""
return 1
class PipeInstruction:
"""Base class for all instructions to be executed by the pipeline engine.
All keyword arguments are stored as members similar to a ``namedtuple``. These are
then accessible to the :class:`PipeEngine` during execution.
Args:
kwargs (optional): keyword arguments to store as members
"""
def __init__(self, **kwargs):
self.name = self.__class__.__name__
self.kwargs = kwargs
for key, val in kwargs.items():
setattr(self, key, val)
def __repr__(self):
return call_to_str(self.name, **self.kwargs)
class OptimizerStep(PipeInstruction):
"""Performs one step with the optimizer and zeros gradients.
.. note:: Should be issued after :class:`ReduceGrads` and :class:`ReduceTiedGrads`.
.. note:: Can be a synchronization point among data-parallel ranks.
"""
pass
class ReduceGrads(PipeInstruction):
"""Reduce the computed gradients among data-parallel processes within the stage.
"""
pass
class ReduceTiedGrads(PipeInstruction):
"""Reduce the computed gradients of tied modules within a pipeline-parallel group.
.. warning::
The stages included in this synchronization point are not known until
the model is partitioned among pipeline stages. In the worst case, it
includes all pipeline stages. This instruction should be scheduled
carefully to avoid deadlocks.
"""
pass
class BufferOpInstruction(PipeInstruction):
"""A pipeline instruction that operates on pipeline buffer(s).
Args:
buffer_id (int): the index of the pipeline buffer() to modify.
"""
def __init__(self, buffer_id, **kwargs):
super().__init__(buffer_id=buffer_id, **kwargs)
# IO
class LoadMicroBatch(BufferOpInstruction):
"""Load a micro-batch into a buffer.
Roughly:
.. code-block:: python
buffers['inputs'][buffer_id] = next(data_iter)
"""
pass
# Compute
class ForwardPass(BufferOpInstruction):
"""Compute a forward pass.
Roughly:
.. code-block:: python
buffers['outputs'][buffer_id] = forward(buffers['inputs'][buffer_id])
"""
pass
class BackwardPass(BufferOpInstruction):
"""Compute a backward pass and accumulate gradients.
Roughly:
.. code-block:: python
outputs = buffers['outputs'][buffer_id]
gradients = buffers['gradients'][buffer_id]
torch.autograd.backward(tensors=outputs,
grad_tensors=gradients)
"""
pass
# Communication
class SendActivation(BufferOpInstruction):
"""Send activations to the next stage in the pipeline.
Roughly:
.. code-block:: python
send(buffers['outputs'][buffer_id])
.. note::
The communication is blocking and must be paired with a :class:`RecvActivation`
on the next pipeline stage to avoid deadlock.
"""
pass
class RecvActivation(BufferOpInstruction):
"""Receive activations from the previous stage in the pipeline.
Roughly:
.. code-block:: python
buffers['inputs'][buffer_id] = recv()
.. note::
The communication is blocking and must be paired with a :class:`SendActivation`
on the previous pipeline stage to avoid deadlock.
"""
pass
class SendGrad(BufferOpInstruction):
"""Send computed gradients to the previous pipeline stage.
with respect to the received activations
.. note::
Only received tensors with ``requires_grad==True`` will produce gradients.
Missing gradients will be replaced with ``None`` on the receiving stage.
.. note::
The communication is blocking and must be paired with a :class:`RecvGrad`
on the previous pipeline stage to avoid deadlock.
"""
pass
class RecvGrad(BufferOpInstruction):
"""Receive computed gradients the next pipeline stage.
.. note::
Only activations with ``requires_grad==True`` will produce gradients.
Missing gradients will be replaced with ``None``.
.. note::
The communication is blocking and must be paired with a :class:`SendGrad`
on the next pipeline stage to avoid deadlock.
"""
pass
def _is_even(x):
return x % 2 == 0
def _is_odd(x):
return x % 2 != 0 | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/pipe/schedule.py | schedule.py |
# DeepSpeed Team
from deepspeed import comm as dist
from collections import namedtuple
from itertools import product as cartesian_product
class ProcessTopology:
""" Manages the mapping of n-dimensional Cartesian coordinates to linear
indices. This mapping is used to map the rank of processes to the grid
for various forms of parallelism.
Each axis of the tensor is accessed by its name. The provided ordering
of the axes defines the layout of the topology. ProcessTopology uses a "row-major"
layout of the tensor axes, and so axes=['x', 'y'] would map coordinates (x,y) and
(x,y+1) to adjacent linear indices. If instead axes=['y', 'x'] was used, coordinates
(x,y) and (x+1,y) would be adjacent.
Some methods return ProcessCoord namedtuples.
"""
def __init__(self, axes, dims):
"""Create a mapping of n-dimensional tensor coordinates to linear indices.
Arguments:
axes (list): the names of the tensor axes
dims (list): the dimension (length) of each axis of the topology tensor
"""
self.axes = axes # names of each topology axis
self.dims = dims # length of each topology axis
# This is actually a class that lets us hash {'row':3, 'col':2} mappings
self.ProcessCoord = namedtuple('ProcessCoord', axes)
self.mapping = {}
ranges = [range(d) for d in dims]
# example: 1, (0,0,1)
for global_rank, coord in enumerate(cartesian_product(*ranges)):
key = {axis: coord[self.axes.index(axis)] for axis in self.axes}
key = self.ProcessCoord(**key)
# for example, {ProcessCoord(row=0, col=1) : 1}
self.mapping[key] = global_rank
def get_rank(self, **coord_kwargs):
"""Return the global rank of a process via its coordinates.
Coordinates are specified as kwargs. For example:
>>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
>>> X.get_rank(x=0, y=1)
1
"""
if len(coord_kwargs) != len(self.axes):
raise ValueError('get_rank() does not support slices. Use filter_match())')
key = self.ProcessCoord(**coord_kwargs)
assert key in self.mapping, f'key {coord_kwargs} invalid'
return self.mapping[key]
def get_axis_names(self):
"""Return a list of the axis names in the ordering of the topology. """
return self.axes
def get_rank_repr(self, rank, omit_axes=['data', 'pipe'], inner_sep='_', outer_sep='-'):
"""Return a string representation of a rank.
This method is primarily used for checkpointing model data.
For example:
>>> topo = Topo(axes=['a', 'b'], dims=[2, 2])
>>> topo.get_rank_repr(rank=3)
'a_01-b_01'
>>> topo.get_rank_repr(rank=3, omit_axes=['a'])
'b_01'
Args:
rank (int): A rank in the topology.
omit_axes (list, optional): Axes that should not be in the representation. Defaults to ['data', 'pipe'].
inner_sep (str, optional): [description]. Defaults to '_'.
outer_sep (str, optional): [description]. Defaults to '-'.
Returns:
str: A string representation of the coordinate owned by ``rank``.
"""
omit_axes = frozenset(omit_axes)
axes = [a for a in self.get_axis_names() if a not in omit_axes]
names = []
for ax in axes:
ax_rank = getattr(self.get_coord(rank=rank), ax)
names.append(f'{ax}{inner_sep}{ax_rank:02d}')
return outer_sep.join(names)
def get_dim(self, axis):
"""Return the number of processes along the given axis.
For example:
>>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
>>> X.get_dim('y')
3
"""
if axis not in self.axes:
return 0
return self.dims[self.axes.index(axis)]
def get_coord(self, rank):
"""Return the coordinate owned by a process rank.
The axes of the returned namedtuple can be directly accessed as members. For
example:
>>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
>>> coord = X.get_coord(rank=1)
>>> coord.x
0
>>> coord.y
1
"""
for coord, idx in self.mapping.items():
if idx == rank:
return coord
raise ValueError(f'rank {rank} not found in topology.')
def get_axis_comm_lists(self, axis):
""" Construct lists suitable for a communicator group along axis ``axis``.
Example:
>>> topo = Topo(axes=['pipe', 'data', 'model'], dims=[2, 2, 2])
>>> topo.get_axis_comm_lists('pipe')
[
[0, 4], # data=0, model=0
[1, 5], # data=0, model=1
[2, 6], # data=1, model=0
[3, 7], # data=1, model=1
]
Returns:
A list of lists whose coordinates match in all axes *except* ``axis``.
"""
# We don't want to RuntimeError because it allows us to write more generalized
# code for hybrid parallelisms.
if axis not in self.axes:
return []
# Grab all axes but `axis`
other_axes = [a for a in self.axes if a != axis]
lists = []
# Construct all combinations of coords with other_axes
ranges = [range(self.get_dim(a)) for a in other_axes]
for coord in cartesian_product(*ranges):
other_keys = {a: coord[other_axes.index(a)] for a in other_axes}
# now go over all ranks in `axis`.
sub_list = []
for axis_key in range(self.get_dim(axis)):
key = self.ProcessCoord(**other_keys, **{axis: axis_key})
sub_list.append(self.mapping[key])
lists.append(sub_list)
return lists
def filter_match(self, **filter_kwargs):
"""Return the list of ranks whose coordinates match the provided criteria.
Example:
>>> X = ProcessTopology(axes=['pipe', 'data', 'model'], dims=[2, 2, 2])
>>> X.filter_match(pipe=0, data=1)
[2, 3]
>>> [X.get_coord(rank) for rank in X.filter_match(pipe=0, data=1)]
[ProcessCoord(pipe=0, data=1, model=0), ProcessCoord(pipe=0, data=1, model=1)]
Arguments:
**filter_kwargs (dict): criteria used to select coordinates.
Returns:
The list of ranks whose coordinates match filter_kwargs.
"""
def _filter_helper(x):
for key, val in filter_kwargs.items():
if getattr(x, key) != val:
return False
return True
coords = filter(_filter_helper, self.mapping.keys())
return [self.mapping[coord] for coord in coords]
def get_axis_list(self, axis, idx):
"""Returns the list of global ranks whose coordinate in an axis is idx.
For example:
>>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3])
>>> X.get_axis_list(axis='x', idx=0)
[0, 1, 2]
>>> X.get_axis_list(axis='y', idx=0)
[0, 3]
"""
# This could be faster by generating the desired keys directly instead of
# filtering.
axis_num = self.axes.index(axis)
ranks = [self.mapping[k] for k in self.mapping.keys() if k[axis_num] == idx]
return ranks
def world_size(self):
return len(self.mapping)
def __str__(self):
return str(self.mapping)
def _prime_factors(N):
""" Returns the prime factorization of positive integer N. """
if N <= 0:
raise ValueError("Values must be strictly positive.")
primes = []
while N != 1:
for candidate in range(2, N + 1):
if N % candidate == 0:
primes.append(candidate)
N //= candidate
break
return primes
class PipeDataParallelTopology(ProcessTopology):
""" A topology specialization for hybrid data and pipeline parallelism.
Uses data parallelism on the last dimension to encourage gradient
reductions to use high-bandwidth intra-node links and lower-volume
pipeline communications to use low-bandwidth inter-node links.
"""
def __init__(self, num_pp, num_dp):
super().__init__(axes=['pipe', 'data'], dims=[num_pp, num_dp])
class PipeModelDataParallelTopology(ProcessTopology):
""" A topology for hybrid pipeline, model, and data parallelism. """
def __init__(self, num_pp, num_mp, num_dp):
super().__init__(axes=['pipe', 'data', 'model'], dims=[num_pp, num_dp, num_mp])
class PipelineParallelGrid:
"""Implements a grid object that stores the data parallel ranks
corresponding to each of the model parallel stages
The grid object organizes the processes in a distributed pytorch job
into a 2D grid, of stage_id and data_parallel_id.
self.stage_id and self.data_parallel_id stores the stage id
and the data parallel id of current process.
self.dp_group groups the processes by stage_id.
self.dp_group[i], is a list containing all process ranks whose
stage_id is i.
self.p2p_groups stores a list of tuple, where each tuple
stores process ranks of adjacent stages for a given data_parallel_id.
For example if num_stage is 5 then a tuple [7,8] represents stages [3, 4],
with data_parallel id = 1. A stage wrap around will appear as non-adjacent ranks,
for example tuple [4,0] with representing wrap-around stage 4 and 0, for
data_parallel_id = 0, or similarly [9,5] represents wrapped around stages [4,0]
for data_parallel_id = 1.
"""
def __init__(self, topology=None, process_group=None):
# TODO use process_group if provided
self.global_rank = dist.get_rank()
self.world_size = dist.get_world_size()
if topology is not None:
if self.global_rank == 0:
print('Using topology:', topology)
self._topo = topology
else:
num_pp = 1
num_dp = 1
for idx, prime in enumerate(_prime_factors(self.world_size)):
if idx % 2 == 0:
num_pp *= prime
else:
num_dp *= prime
self._topo = PipeDataParallelTopology(num_dp=num_dp, num_pp=num_pp)
self.data_parallel_size = max(self._topo.get_dim('data'), 1)
self.pipe_parallel_size = max(self._topo.get_dim('pipe'), 1)
self.model_parallel_size = max(self._topo.get_dim('model'), 1)
self.slice_parallel_size = self.model_parallel_size
assert self._is_grid_valid(), "Invalid Grid"
self.stage_id = self.get_stage_id()
self.data_parallel_id = self.get_data_parallel_id()
# Create new ProcessGroups for all model parallelism. DeepSpeedLight uses these
# to detect overflow, etc.
self.ds_model_proc_group = None
self.ds_model_rank = -1
for dp in range(self.data_parallel_size):
ranks = sorted(self._topo.get_axis_list(axis='data', idx=dp))
if self.global_rank == 0:
#print(f'RANK={self.global_rank} building DeepSpeed model group: {ranks}')
pass
proc_group = dist.new_group(ranks=ranks)
if self.global_rank in ranks:
self.ds_model_proc_group = proc_group
self.ds_model_world_size = len(ranks)
self.ds_model_rank = ranks.index(self.global_rank)
assert self.ds_model_rank > -1
assert self.ds_model_proc_group is not None
# Create new ProcessGroup for gradient all-reduces - these are the data parallel groups
self.dp_group = []
self.dp_groups = self._topo.get_axis_comm_lists('data')
for g in self.dp_groups:
proc_group = dist.new_group(ranks=g)
if self.global_rank in g:
self.dp_group = g
self.dp_proc_group = proc_group
self.is_first_stage = (self.stage_id == 0)
self.is_last_stage = (self.stage_id == (self.pipe_parallel_size - 1))
self.p2p_groups = self._build_p2p_groups()
# Create new ProcessGroup for pipeline collectives - these are pipe parallel groups
self.pp_group = []
self.pp_proc_group = None
self.pipe_groups = self._topo.get_axis_comm_lists('pipe')
for ranks in self.pipe_groups:
if self.global_rank == 0:
#print(f'RANK={self.global_rank} building pipeline group: {ranks}')
pass
proc_group = dist.new_group(ranks=ranks)
if self.global_rank in ranks:
self.pp_group = ranks
self.pp_proc_group = proc_group
assert self.pp_proc_group is not None
# Create new ProcessGroup for model (tensor-slicing) collectives
# Short circuit case without model parallelism.
# TODO: it would be nice if topology had bcast semantics to avoid this branching
# case?
if self.model_parallel_size == 1:
for group_rank in range(self.world_size):
group_rank = [group_rank]
group = dist.new_group(ranks=group_rank)
if group_rank[0] == self.global_rank:
self.slice_group = group_rank
self.slice_proc_group = group
return
else:
self.mp_group = []
self.model_groups = self._topo.get_axis_comm_lists('model')
for g in self.model_groups:
proc_group = dist.new_group(ranks=g)
if self.global_rank in g:
self.slice_group = g
self.slice_proc_group = proc_group
def get_stage_id(self):
return self._topo.get_coord(rank=self.global_rank).pipe
def get_data_parallel_id(self):
return self._topo.get_coord(rank=self.global_rank).data
def _build_p2p_groups(self):
"""Groups for sending and receiving activations and gradients across model
parallel stages.
"""
comm_lists = self._topo.get_axis_comm_lists('pipe')
p2p_lists = []
for rank in range(self.world_size):
for l in comm_lists:
assert len(l) == self.pipe_parallel_size
if rank in l:
idx = l.index(rank)
buddy_rank = l[(idx + 1) % self.pipe_parallel_size]
p2p_lists.append([rank, buddy_rank])
break # next global rank
assert len(p2p_lists) == self.world_size
return p2p_lists
def _is_grid_valid(self):
ranks = 1
for ax in self._topo.get_axis_names():
ranks *= self._topo.get_dim(ax)
return ranks == dist.get_world_size()
#returns the global rank of the process with the provided stage id
#which has the same data_parallel_id as caller process
def stage_to_global(self, stage_id, **kwargs):
me = self._topo.get_coord(self.global_rank)
transform = me._replace(pipe=stage_id, **kwargs)._asdict()
return self._topo.get_rank(**transform)
def topology(self):
return self._topo
# MPU functions for DeepSpeed integration
def get_global_rank(self):
return self.global_rank
def get_pipe_parallel_rank(self):
""" The stage of the pipeline this rank resides in. """
return self.get_stage_id()
def get_pipe_parallel_world_size(self):
""" The number of stages in the pipeline. """
return self.pipe_parallel_size
def get_pipe_parallel_group(self):
""" The group of ranks within the same pipeline. """
return self.pp_proc_group
def get_data_parallel_rank(self):
""" Which pipeline this rank resides in. """
return self.data_parallel_id
def get_data_parallel_world_size(self):
""" The number of pipelines. """
return self.data_parallel_size
def get_data_parallel_group(self):
""" The group of ranks within the same stage of all pipelines. """
return self.dp_proc_group
# These are model parallel groups across all types of model parallelism.
# Deepspeed uses them to detect overflow, etc.
def get_model_parallel_rank(self):
return self.ds_model_rank
def get_model_parallel_world_size(self):
return self.ds_model_world_size
def get_model_parallel_group(self):
return self.ds_model_proc_group
# For Megatron-style tensor slicing
def get_slice_parallel_rank(self):
if 'model' in self._topo.get_axis_names():
return self._topo.get_coord(rank=self.global_rank).model
else:
return 0
def get_slice_parallel_world_size(self):
return self.slice_parallel_size
def get_slice_parallel_group(self):
return self.slice_proc_group | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/pipe/topology.py | topology.py |
# DeepSpeed Team
import pickle
import typing
import torch
from deepspeed import comm as dist
# To query whether we have send/recv support
from packaging.version import Version
from deepspeed.git_version_info import torch_info
from deepspeed.accelerator import get_accelerator
_groups = None
_grid = None
_async = []
def can_send_recv() -> bool:
torch_version = Version(torch_info['version'])
sendrecv_min = Version('1.8')
return torch_version >= sendrecv_min
#initializes adjacent process groups
#run this only after deepspeed.init_distributed() has been called
def init_process_groups(grid):
global _groups, _grid
_grid = grid
assert _grid.pipe_parallel_size > 1, "There is no pipeline parallelism"
if not can_send_recv():
_groups = [dist.new_group(ranks=group) for group in _grid.p2p_groups]
def _is_valid_send_recv(src_stage, dest_stage):
first_stage = 0
last_stage = _grid.pipe_parallel_size - 1
assert abs(src_stage-dest_stage) == 1 or \
(src_stage == first_stage and dest_stage == last_stage) or \
(src_stage == last_stage and dest_stage == first_stage), \
"Functionality currently limited to send and receive between adjacent ranks only"
def send(tensor, dest_stage, async_op=False):
global _groups
assert async_op == False, "Doesn't support async_op true"
src_stage = _grid.get_stage_id()
_is_valid_send_recv(src_stage, dest_stage)
dest_rank = _grid.stage_to_global(stage_id=dest_stage)
if async_op:
global _async
op = dist.isend(tensor, dest_rank)
_async.append(op)
else:
if can_send_recv():
return dist.send(tensor, dest_rank)
else:
group = _get_send_recv_group(src_stage, dest_stage)
src_rank = _grid.stage_to_global(stage_id=src_stage)
return dist.broadcast(tensor, src_rank, group=group, async_op=async_op)
def recv(tensor, src_stage, async_op=False):
global _groups
assert async_op == False, "Doesn't support async_op true"
dest_stage = _grid.get_stage_id()
_is_valid_send_recv(src_stage, dest_stage)
src_rank = _grid.stage_to_global(stage_id=src_stage)
if async_op:
global _async
op = dist.irecv(tensor, src_rank)
_async.append(op)
else:
if can_send_recv():
return dist.recv(tensor, src_rank)
else:
group = _get_send_recv_group(src_stage, dest_stage)
return dist.broadcast(tensor, src_rank, group=group, async_op=async_op)
def wait():
global _async
for op in _async:
op.wait()
_async = []
get_accelerator().synchronize()
def send_obj(msg: typing.Any, dest: int):
"""Send an arbitrary python object to ``dest``.
Note: ``msg`` must be pickleable.
WARN: This incurs a CPU -> GPU transfer and should be used sparingly
for performance reasons.
Args:
msg (typing.Any): The object to send.
dest (int): Destination rank.
"""
# serialize the message
msg = pickle.dumps(msg)
# construct a tensor to send
msg = torch.ByteTensor(torch.ByteStorage.from_buffer(msg)).to(get_accelerator().device_name())
# Send meta and message
length_tensor = torch.tensor([len(msg)], dtype=torch.long).to(get_accelerator().device_name())
dist.send(length_tensor, dst=dest)
dist.send(msg, dst=dest)
def recv_obj(sender: int) -> typing.Any:
"""Receive an arbitrary python object from ``sender``.
WARN: This incur a CPU <-> GPU transfers and should be used sparingly
for performance reasons.
Args:
sender (int): The rank sending the message.
"""
# Get message meta
length = torch.tensor([0], dtype=torch.long).to(get_accelerator().device_name())
dist.recv(length, src=sender)
# Receive and deserialize
msg = torch.empty(length.item(), dtype=torch.uint8).to(get_accelerator().device_name())
dist.recv(msg, src=sender)
msg = pickle.loads(msg.cpu().numpy().tobytes())
def _to(x):
"""Recursively move to the current device."""
if torch.is_tensor(x):
return x.to(get_accelerator().device_name())
if isinstance(x, (tuple, list)):
ret = [_to(x_) for x_ in x]
if isinstance(x, tuple):
ret = tuple(ret)
return ret
# handle kwargs
if isinstance(x, dict):
ret = dict()
for key, val in x.items():
ret[_to(key)] = _to(val)
return ret
# Anything else is a no-op
return x
msg = _to(msg)
return msg
def _get_send_recv_group(src_stage, dest_stage):
'''the group id is always the smaller rank unless its a wrap around'''
stage_id = None
first_stage = 0
last_stage = _grid.pipe_parallel_size - 1
if (src_stage == first_stage and dest_stage == last_stage
or dest_stage == first_stage and src_stage == last_stage):
stage_id = last_stage
elif src_stage > dest_stage:
stage_id = dest_stage
else:
stage_id = src_stage
'''group_id corresponds to group of [group_id, group_id+1]
unless group_id is the rank of the last stage
in which case group_id corresponds to group[group_id-num_stages+1, group_id]
'''
group_id = _grid.stage_to_global(stage_id=stage_id)
return _groups[group_id] | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/pipe/p2p.py | p2p.py |
# DeepSpeed Team
from types import MethodType
import torch
from deepspeed import comm as dist
from deepspeed.utils import logger
from deepspeed.utils.timer import ThroughputTimer
from deepspeed.accelerator import get_accelerator
from ..engine import DeepSpeedEngine, MEMORY_OPT_ALLREDUCE_SIZE
from ..utils import PartitionedTensor
from ..dataloader import RepeatingLoader
from ..zero.config import ZeroStageEnum
from ..activation_checkpointing import checkpointing as ds_checkpointing
from .module import PipelineModule, PipelineError
from . import p2p
from . import schedule
TARGET_ID = -2
LOG_STAGE = -2
DATA_PARALLEL_ID = -2
def is_even(number):
return number % 2 == 0
mem_alloced = 0
mem_cached = 0
def _tensor_bytes(tensor):
return tensor.numel() * tensor.element_size()
class PipelineEngine(DeepSpeedEngine):
""" A training engine hybrid pipeline, data, and model parallel training.
This engine is created by ``deepspeed.initialize()`` when a :class:`PipelineModule`
is provided.
"""
ID_TO_DTYPE = [
torch.float32, torch.float64, torch.complex64, torch.complex128, torch.float16, torch.bfloat16, torch.uint8,
torch.int8, torch.int16, torch.int32, torch.int64, torch.bool
]
DTYPE_TO_ID = {dtype: id_ for id_, dtype in enumerate(ID_TO_DTYPE)}
def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs):
super().__init__(*super_args, **super_kwargs)
assert isinstance(self.module, PipelineModule), "model must base PipelineModule"
assert self.zero_optimization_stage() < 2, "ZeRO-2 and ZeRO-3 are incompatible with pipeline parallelism"
# We schedule the all-reduces, so disable it in super().backward()
self.enable_backward_allreduce = False
self.has_bool_tensors = has_bool_tensors
self.eval_return_logits = False
self.outputs = None
# used to disable the pipeline all-reduce when used with 1-bit Adam/1-bit LAMB
self.pipeline_enable_backward_allreduce = True
if self.elasticity_enabled():
if not self.is_elastic_model_parallel_supported():
assert not self.elasticity_enabled(), "Elasticity is not currently supported" \
" with pipeline parallelism."
# pipeline step for logging
self.log_batch_step_id = -1
self.micro_batch_size = self.train_micro_batch_size_per_gpu()
self.micro_batches = self.gradient_accumulation_steps()
# Set Grid and Communication Groups
self.grid = self.module._grid
if self.grid.get_global_rank() == 0:
logger.info(f'CONFIG: micro_batches={self.micro_batches} '
f'micro_batch_size={self.micro_batch_size}')
self.global_rank = self.grid.get_global_rank()
assert self.dp_world_size == self.grid.data_parallel_size
assert self.train_batch_size() == \
self.micro_batch_size * self.micro_batches * self.grid.data_parallel_size
# Set Stage Inf
self.num_stages = self.grid.pipe_parallel_size
self.stage_id = self.grid.get_stage_id()
self.prev_stage = self.stage_id - 1
self.next_stage = self.stage_id + 1
self.data_iterator = None
self.batch_fn = None
self._force_grad_boundary = False
self.batch_timer = ThroughputTimer(batch_size=self.train_batch_size(),
logging_fn=self.tput_log,
monitor_memory=False,
steps_per_output=self.steps_per_print())
# PipelineEngine needs to handle data loading specially due to only the first
# and last stages loading inputs/labels. We construct a sampler that uses
if self.training_data:
self._build_data_iter(self.training_data)
self.is_pipe_parallel = self.grid.pipe_parallel_size > 1
self.is_data_parallel = self.grid.data_parallel_size > 1
self.is_model_parallel = self.grid.model_parallel_size > 1
# Partition input/output buffers
# XXX temporarily disable while I revert some partition hacks.
self.is_pipe_partitioned = self.is_model_parallel
self.is_grad_partitioned = self.is_model_parallel
model_parameters = filter(lambda p: p.requires_grad, self.module.parameters())
num_params = sum([p.numel() for p in model_parameters])
unique_params = num_params
# Subtract tied parameters if we don't own them
if self.module.tied_comms:
tied_params = 0
for key, d in self.module.tied_comms.items():
if self.global_rank != min(d['ranks']):
tied_params += sum(p.numel() for p in d['module'].parameters())
unique_params -= tied_params
params_tensor = torch.LongTensor(data=[num_params, unique_params]).to(self.device)
dist.all_reduce(params_tensor, group=self.grid.get_model_parallel_group())
params_tensor = params_tensor.tolist()
total_params = params_tensor[0]
unique_params = params_tensor[1]
if self.grid.data_parallel_id == 0:
logger.info(f'RANK={self.global_rank} '
f'STAGE={self.stage_id} '
f'LAYERS={self.module._local_stop - self.module._local_start} '
f'[{self.module._local_start}, {self.module._local_stop}) '
f'STAGE_PARAMS={num_params} ({num_params/1e6:0.3f}M) '
f'TOTAL_PARAMS={total_params} ({total_params/1e6:0.3f}M) '
f'UNIQUE_PARAMS={unique_params} ({unique_params/1e6:0.3f}M)')
#initialize peer-2-peer communication and allreduce groups
if self.is_pipe_parallel:
p2p.init_process_groups(self.grid)
# Pipeline buffers
self.num_pipe_buffers = 0
self.pipe_buffers = {
'inputs': [], # batch input and received activations
'labels': [], # labels from batch input
'outputs': [], # activations
'output_tensors': [], # tensor object to preserve backward graph
}
self.pipe_recv_buf = None
self.grad_layer = None
self.meta_buffer = None
self.first_output_send = True
self.first_gradient_send = True
#stores the loss for the current micro batch being processed
self.loss = torch.tensor(0.0).to(self.device)
#stores the loss for the entire batch
self.total_loss = None
self.agg_loss = torch.tensor(0.0, requires_grad=False).to(self.device)
self.dp_group_loss = torch.tensor(0.0, requires_grad=False).to(self.device)
if self._config.pipeline['activation_checkpoint_interval'] > 0:
self.module.activation_checkpoint_interval = self._config.pipeline['activation_checkpoint_interval']
self.module.checkpoint_parallel_write_pipeline = self._config.checkpoint_parallel_write_pipeline
if self.is_last_stage():
self.loss_model = self.module.loss_fn
self.has_attention_mask = self.module.__class__.__name__ == 'GPT2ModelPipe'
# Initialize pipeline communicators. Just send a 0.
if is_even(self.stage_id):
if not self.is_last_stage():
p2p.send(self.loss, self.next_stage)
if not self.is_first_stage():
p2p.recv(self.loss, self.prev_stage)
else:
if not self.is_first_stage():
p2p.recv(self.loss, self.prev_stage)
if not self.is_last_stage():
p2p.send(self.loss, self.next_stage)
# XXX look into timer reporting timing
# Initialize some timers because of early weirdness.
if self.wall_clock_breakdown():
self.timers('forward_microstep').start()
self.timers('forward_microstep').stop()
self.timers('backward_microstep').start()
self.timers('backward_microstep').stop()
self.timers('backward_inner_microstep').start()
self.timers('backward_inner_microstep').stop()
self.timers('backward_allreduce_microstep').start()
self.timers('backward_allreduce_microstep').stop()
self.timers('backward_allreduce').start()
self.timers('backward_allreduce').stop()
self.timers('step_microstep').start()
self.timers('step_microstep').stop()
def set_has_attention_mask(self, value):
assert isinstance(value, bool)
self.has_attention_mask = value
def _build_data_iter(self, dataset):
sampler = torch.utils.data.distributed.DistributedSampler(dataset,
num_replicas=self.dp_world_size,
rank=self.mpu.get_data_parallel_rank(),
shuffle=False)
# Build a loader and make it repeating.
pipe_dataloader = self.deepspeed_io(dataset, data_sampler=sampler)
pipe_dataloader = RepeatingLoader(pipe_dataloader)
self.set_dataloader(pipe_dataloader)
def _exec_reduce_tied_grads(self):
# We need to run this first to write to self.averaged_gradients;
# since this class turns `enable_backward_allreduce` off,
# `self.overlapping_partition_gradients_reduce_epilogue()` defined in the DeepSpeedEngine
# never actually runs. I suspect this is because of efficiency problems; get_flat_partition in
# stage2.py might do something expensive; someone will have to look into that later. But
# in the meantime, this fixes ZeRO2 + Pipelining enough to run a demo. Further profiling
# needed to decide if it actually breaks everything.
# (see https://github.com/EleutherAI/gpt-neox/issues/62#issuecomment-761471944)
if self.zero_optimization_partition_gradients():
self.optimizer.overlapping_partition_gradients_reduce_epilogue()
weight_group_list = self.module.get_tied_weights_and_groups()
for weight, group in weight_group_list:
grad = weight._hp_grad if self.bfloat16_enabled() else weight.grad
dist.all_reduce(grad, group=group)
def _exec_reduce_grads(self):
self._force_grad_boundary = True
if self.pipeline_enable_backward_allreduce:
if self.bfloat16_enabled():
if self.zero_optimization_stage() < ZeroStageEnum().gradients:
self._bf16_reduce_grads()
else:
raise NotImplementedError("PP+BF16 only work for ZeRO Stage 1")
else:
self.allreduce_gradients(bucket_size=MEMORY_OPT_ALLREDUCE_SIZE)
self._force_grad_boundary = False
def _bf16_reduce_grads(self):
# Make our own list of gradients from the optimizer's FP32 grads
grads = []
self.buffered_allreduce_fallback(grads=self.optimizer.get_grads_for_reduction(),
elements_per_buffer=MEMORY_OPT_ALLREDUCE_SIZE)
def _reserve_pipe_buffers(self, num_buffers):
"""Ensure that each pipeline buffer has at least ``num_buffers`` slots.
This method only reserves slots and does not allocate tensors.
Args:
num_buffers (int): The number of buffers to reserve.
"""
if self.num_pipe_buffers >= num_buffers:
return
num_added = num_buffers - self.num_pipe_buffers
for key in self.pipe_buffers:
self.pipe_buffers[key].extend([None] * num_added)
self.num_pipe_buffers = num_buffers
def reset_activation_shape(self):
"""Reset the buffers when the shape of activation and gradient change.
For example, for curriculum learning that changes the seqlen of each
sample, we need to call this whenever the seqlen is going to change.
"""
self.first_output_send = True
self.pipe_recv_buf = None
self.grad_layer = None
self.meta_buffer = None
def train_batch(self, data_iter=None):
"""Progress the pipeline to train the next batch of data. The engine will ingest
``self.train_batch_size()`` total samples collectively across all workers.
An iterator that over training data should be provided as an argument
unless ``deepspeed.initialize()`` was provided a training set. In that event,
the training data will automatically be read.
.. warning::
A total of ``self.gradient_accumulation_steps()`` entries will be pulled
from ``data_iter`` by each pipeline. There must be sufficient
data left in ``data_iter`` or else a ``StopIteration`` will halt training.
DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader`
that wraps data loaders to automatically restart upon a ``StopIteration``.
Args:
data_iter (Iterator, optional): Iterator of training data.
Returns:
The arithmetic mean of the losses computed this batch.
"""
if not torch._C.is_grad_enabled():
raise RuntimeError(f'train_batch() requires gradients enabled. Use eval_batch() instead.')
# Curriculum learning could change activation shape
if self.curriculum_enabled_legacy():
new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \
self.global_steps + 1)
if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step:
self.reset_activation_shape()
self.curriculum_scheduler_legacy.first_step = False
elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \
self.global_steps):
self.reset_activation_shape()
if data_iter:
self.set_dataiterator(data_iter)
self.module.train()
self.total_loss = None
self._compute_loss = True
# Do the work
self.timers('train_batch').start()
sched = schedule.TrainSchedule(micro_batches=self.micro_batches,
stages=self.num_stages,
stage_id=self.stage_id)
self._exec_schedule(sched)
self.agg_train_loss = self._aggregate_total_loss()
self.timers('train_batch').stop()
if self.global_steps % self.steps_per_print() == 0:
if self.global_rank == 0:
elapsed = self.timers('train_batch').elapsed(reset=True) / 1000.0
iter_time = elapsed / self.steps_per_print()
tput = self.train_batch_size() / iter_time
print(f'steps: {self.global_steps} '
f'loss: {self.agg_train_loss:0.4f} '
f'iter time (s): {iter_time:0.3f} '
f'samples/sec: {tput:0.3f}')
# Monitoring
if self.global_rank == 0 and self.monitor.enabled:
self.summary_events = [(f'Train/Samples/train_loss', self.agg_train_loss.mean().item(),
self.global_samples)]
self.monitor.write_events(self.summary_events)
if self.wall_clock_breakdown() and self.global_steps % self.steps_per_print() == 0:
self.timers.log(['pipe_send_output', 'pipe_send_grad', 'pipe_recv_input', 'pipe_recv_grad'])
# TODO: should return precisely what loss returned and allow others to be queried?
return self.agg_train_loss
def eval_batch(self, data_iter, return_logits=False, compute_loss=True, reduce_output='avg'):
"""Evaluate the pipeline on a batch of data from ``data_iter``. The
engine will evaluate ``self.train_batch_size()`` total samples
collectively across all workers.
This method is equivalent to:
.. code-block:: python
module.eval()
with torch.no_grad():
output = module(batch)
.. warning::
A total of ``self.gradient_accumulation_steps()`` entries will be pulled
from ``data_iter`` by each pipeline. There must be sufficient
data left in ``data_iter`` or else a ``StopIteration`` will halt training.
DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader`
that wraps data loaders to automatically restart upon a ``StopIteration``.
Args:
data_iter (Iterator): Iterator of data to evaluate.
Returns:
The arithmetic mean of the losses computed this batch.
"""
self.eval_return_logits = return_logits
self.module.eval()
# Curriculum learning could change activation shape
if self.curriculum_enabled_legacy():
new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \
self.global_steps + 1)
if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step:
self.reset_activation_shape()
self.curriculum_scheduler_legacy.first_step = False
elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \
self.global_steps):
self.reset_activation_shape()
eval_output = None
self._compute_loss = compute_loss
# Use the provided data iterator
train_iterator = self.data_iterator
self.set_dataiterator(data_iter)
# Do the work
sched = schedule.InferenceSchedule(micro_batches=self.micro_batches,
stages=self.num_stages,
stage_id=self.stage_id)
# prevent dead-lock with multiple evals sequence
dist.barrier()
with torch.no_grad():
self._exec_schedule(sched)
if self.is_last_stage():
eval_output = self._reduce_outputs(self.fwd_outputs, reduce=reduce_output)
if compute_loss:
eval_output = self._bcast_pipe_scalar(eval_output)
if self.global_rank == 0 and self.monitor.enabled:
self.summary_events = [(f'Train/Samples/eval_loss', eval_output.mean().item(), self.global_samples)]
self.monitor.write_events(self.summary_events)
# Restore the training iterator
self.set_dataiterator(train_iterator)
# Reset any buffers that may have been populated during the forward passes.
#ds_checkpointing.reset()
self.eval_return_logits = False
if return_logits:
outputs = self.outputs
self.outputs = None
return eval_output, outputs
return eval_output
def set_train_batch_size(self, train_batch_size):
"""Adjust the global batch size by increasing or decreasing the number of
micro-batches (i.e., gradient accumulation steps). The size of each micro-batch
(i.e., ``train_micro_batch_size_per_gpu``) is not changed.
Args:
train_batch_size (int): The new global batch size for training.
Raises:
ValueError: if ``train_batch_size`` is not divisible by the
configured micro-batch size and data parallelism.
"""
super().set_train_batch_size(train_batch_size)
self.micro_batches = self.gradient_accumulation_steps()
def is_first_stage(self):
"""True if this process is in the first stage in the pipeline."""
return self.stage_id == 0
def is_last_stage(self):
"""True if this process is in the last stage in the pipeline."""
return self.stage_id == self.num_stages - 1
def _reduce_outputs(self, outputs, reduce='avg', reduce_dp=True):
if reduce is None:
return outputs
if reduce.lower() == 'avg':
# first sum over all microbatches
if torch.is_tensor(outputs[0]):
reduced = sum(outputs)
else:
assert isinstance(outputs, (list, tuple))
reduced = [torch.zeros_like(o) for o in outputs[0]]
for idx, out in outputs:
reduced[idx] += out
# Average over the microbatches
reduced = self._scale_loss_by_gas(reduced)
# Average over DP groups
if reduce_dp and self.is_data_parallel:
if torch.is_tensor(reduced):
dist.all_reduce(reduced, group=self.mpu.get_data_parallel_group())
reduced /= self.dp_world_size
else:
for idx in range(len(reduced)):
dist.all_reduce(reduced[idx], group=self.mpu.get_data_parallel_group())
reduced[idx] /= self.dp_world_size
return reduced
else:
raise NotImplementedError(f'reduction type {reduce} not supported.')
def _bcast_pipe_scalar(self, data, src_rank=None, dtype=torch.float32):
# Default to last stage (e.g., for broadcasting loss)
if src_rank is None:
src_rank = self.grid.stage_to_global(self.num_stages - 1)
assert src_rank in self.grid.pp_group
if self.global_rank == src_rank:
result = data.clone().detach().type(dtype).to(self.device)
else:
result = torch.Tensor([0.]).type(dtype).to(self.device)
dist.broadcast(tensor=result, src=src_rank, group=self.mpu.get_pipe_parallel_group())
return result
def _aggregate_total_loss(self):
# Scale loss, average among DP ranks, and bcast loss to the rest of my DP group
if self.is_last_stage():
loss = self._scale_loss_by_gas(self.total_loss)
self.dp_group_loss = loss.clone().detach()
## Average loss across all data-parallel groups
agg_loss = self.dp_group_loss.clone().detach()
#print(f'RANK={self.global_rank} bcast SENDER src={self.global_rank} group={self.grid.pp_group}', flush=True)
if self.is_data_parallel:
dist.all_reduce(agg_loss, group=self.mpu.get_data_parallel_group())
agg_loss /= self.dp_world_size
assert self.global_rank in self.grid.pp_group
losses = torch.Tensor([self.dp_group_loss, agg_loss]).to(self.device)
dist.broadcast(tensor=losses, src=self.global_rank, group=self.mpu.get_pipe_parallel_group())
else:
# Get loss from last stage
src_rank = self.grid.stage_to_global(self.num_stages - 1)
assert src_rank in self.grid.pp_group
losses = torch.Tensor([0., 0.]).to(self.device)
dist.broadcast(tensor=losses, src=src_rank, group=self.grid.get_pipe_parallel_group())
self.dp_group_loss = losses[0].clone().detach()
agg_loss = losses[1].clone().detach()
return agg_loss
def set_dataloader(self, loader):
""""""
if self.is_first_stage() or self.is_last_stage():
self.training_dataloader = loader
self.data_iterator = iter(self.training_dataloader)
def set_dataiterator(self, iterator):
""" Store an iterator to sample for training data. """
if self.is_first_stage() or self.is_last_stage():
self.training_dataloader = None
self.data_iterator = iterator
def set_batch_fn(self, fn):
"""Execute a post-processing function on input data.
Args:
fn (function): The function to run.
"""
self.batch_fn = fn
def is_gradient_accumulation_boundary(self):
"""True if the engine is executing a gradient reduction or optimizer step instruction.
This is overridden from :class:`DeepSpeedEngine` to force reductions
and steps when the pipeline engine is instructed to do so.
Returns:
bool: whether reductions and optimizer steps should occur.
"""
return self._force_grad_boundary
def log_for_device(self, *msg):
if LOG_STAGE == self.stage_id or LOG_STAGE == -1:
if DATA_PARALLEL_ID == self.grid.data_parallel_id or DATA_PARALLEL_ID == -1:
print(
f'RANK={dist.get_rank()} '
f'PIPE-ID={self.stage_id} '
f'DATA-ID={self.grid.data_parallel_id} '
f'MBATCH-ID={self.microbatch_id} '
f'STEP-ID={self.log_batch_step_id} '
'::',
*msg,
flush=True)
def tput_log(self, *msg):
if self.global_rank == 0 and self.global_steps % self.steps_per_print() == 0:
print(*msg)
def _next_batch(self):
# If using 3D parallelism, only some first-stage ranks may do IO
batch = None
if self.data_iterator is not None:
batch = next(self.data_iterator)
# Any post-processing, like broadcasting across a slice-parallel group.
if self.batch_fn:
batch = self.batch_fn(batch)
return batch
def _exec_forward_pass(self, buffer_id):
self.tput_timer.start()
self.mem_status('BEFORE FWD', reset_max=True)
if isinstance(self.pipe_buffers['inputs'][buffer_id], tuple):
inputs = tuple(t.clone() for t in self.pipe_buffers['inputs'][buffer_id])
else:
inputs = self.pipe_buffers['inputs'][buffer_id].clone()
# collect the partitioned input from the previous stage
if self.is_pipe_partitioned and not self.is_first_stage():
part_input = PartitionedTensor.from_meta(meta=inputs[0],
local_part=inputs[1],
group=self.grid.get_slice_parallel_group())
inputs = (part_input.full(), *inputs[2:])
inputs[0].requires_grad = True
# skip mask
#inputs[1].requires_grad = True
part_input = None
inputs = inputs[0] if len(inputs) == 1 else inputs
self.pipe_buffers['inputs'][buffer_id] = inputs
# Zero out the gradients each time we use the tensor because only the data in
# tensor changes across batches
self._zero_grads(inputs)
outputs = super().forward(inputs)
# Reset activation checkpointing buffers.
# Need to call this between evaluation iterations
if not self.module.training:
ds_checkpointing.reset()
# Partition the outputs if we are not the last stage
if self.is_pipe_partitioned and not self.is_last_stage():
if isinstance(outputs, tuple):
first_output = outputs[0]
# TODO: Improve pipe partitioning to pass multiple tensors that require grads
assert all([torch.is_tensor(elt) and elt.requires_grad is False for elt in outputs[1:]])
outputs_tail = outputs[1:]
elif torch.is_tensor(outputs):
first_output = outputs
outputs_tail = []
else:
raise ValueError("expecting a tensor or a tuple of tensors")
part = PartitionedTensor(tensor=first_output, group=self.grid.get_slice_parallel_group())
# Clear the large output data, but save the computation graph
first_output.data = torch.zeros(1)
self.pipe_buffers['output_tensors'][buffer_id] = first_output
# Inject the partitioned tensor into the output before sending
outputs = (part.to_meta(), part.data(), *outputs_tail)
part = None
self.pipe_buffers['outputs'][buffer_id] = outputs
# Optionally compute loss on the last device
if self.is_last_stage():
if self._compute_loss and self.module.loss_fn is not None:
labels = self.pipe_buffers['labels'][buffer_id]
self.loss = self.module.loss_fn(outputs, labels)
else:
# Some models just return loss from forward()
self.loss = outputs
if self.eval_return_logits:
self.outputs = outputs
if isinstance(self.loss, torch.Tensor):
self.fwd_outputs.append(self.loss.detach())
if self.total_loss is None:
self.total_loss = torch.zeros_like(self.loss)
self.total_loss += self.loss.detach()
else:
self.fwd_outputs.append([l.detach() for l in self.loss])
if self.total_loss is None:
self.total_loss = [torch.zeros_like(l) for l in self.loss]
for idx, l in enumerate(self.loss):
self.total_loss[idx] += l.detach()
def _exec_backward_pass(self, buffer_id):
assert self.optimizer is not None, "must provide optimizer during " \
"init in order to use backward"
self.mem_status('BEFORE BWD', reset_max=True)
# The last stage just runs backward on the loss using DeepSpeed's typical
# mechanisms.
if self.is_last_stage():
super().backward(self.loss)
self.mem_status('AFTER BWD')
return
outputs = self.pipe_buffers['outputs'][buffer_id]
if self.wall_clock_breakdown():
self.timers('backward_microstep').start()
self.timers('backward').start()
self.timers('backward_inner_microstep').start()
self.timers('backward_inner').start()
# Reconstruct if we previously partitioned the output. We must be
# careful to also restore the computational graph of the tensors we partitioned.
if self.is_pipe_partitioned:
if self.is_grad_partitioned:
part_output = PartitionedTensor.from_meta(meta=outputs[0],
local_part=outputs[1],
group=self.grid.get_slice_parallel_group())
self.pipe_buffers['output_tensors'][buffer_id].data = part_output.full()
outputs = (self.pipe_buffers['output_tensors'][buffer_id], *outputs[2:])
else:
# Already restored from partition
self.pipe_buffers['output_tensors'][buffer_id].data = outputs[0]
outputs = (self.pipe_buffers['output_tensors'][buffer_id], *outputs[1:])
grad_tensors = self.grad_layer
if self.is_grad_partitioned:
#print(f'RANK={self.global_rank} BEFORE-BWD restoring grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}')
part_grad = PartitionedTensor.from_meta(meta=self.grad_layer[0],
local_part=self.grad_layer[1],
group=self.grid.get_slice_parallel_group())
grad_tensors = (part_grad.full(), *grad_tensors[2:])
part_grad = None
#print(f'RANK={self.global_rank} BEFORE-BWD restored grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}')
if self.bfloat16_enabled() and not self.is_last_stage():
# manually call because we don't call optimizer.backward()
self.optimizer.clear_lp_grads()
# This handles either a single tensor or tuple of tensors.
if isinstance(outputs, tuple):
out_tensors = [t for t in outputs if t.is_floating_point()]
assert len(out_tensors) == len(grad_tensors)
torch.autograd.backward(tensors=out_tensors, grad_tensors=grad_tensors)
else:
torch.autograd.backward(tensors=(outputs, ), grad_tensors=(grad_tensors, ))
if self.bfloat16_enabled() and not self.is_last_stage():
# manually call because we don't call optimizer.backward()
self.optimizer.update_hp_grads(clear_lp_grads=False)
# Free up the memory from the output of forward()
self.pipe_buffers['output_tensors'][buffer_id] = None
self.pipe_buffers['outputs'][buffer_id] = None
grad_tensors = None
if self.wall_clock_breakdown():
self.timers('backward_inner').stop()
self.timers('backward_inner_microstep').stop()
self.timers('backward').stop()
self.timers('backward_microstep').stop()
self.mem_status('AFTER BWD')
def _exec_load_micro_batch(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('batch_input').start()
batch = self._next_batch()
if self.is_first_stage():
loaded = None
if torch.is_tensor(batch[0]):
loaded = batch[0].clone().to(self.device).detach()
loaded.requires_grad = loaded.is_floating_point()
else:
assert isinstance(batch[0], (tuple, list))
# Assume list or tuple
loaded = []
for x in batch[0]:
assert torch.is_tensor(x)
mine = x.clone().detach().to(self.device)
mine.requires_grad = mine.is_floating_point()
loaded.append(mine)
loaded = tuple(loaded)
self.pipe_buffers['inputs'][buffer_id] = loaded
if self.is_last_stage():
loaded = batch[1]
if torch.is_tensor(batch[1]):
loaded = batch[1].to(self.device)
elif isinstance(batch[1], tuple):
loaded = []
for x in batch[1]:
assert torch.is_tensor(x)
x = x.to(self.device).detach()
loaded.append(x)
loaded = tuple(loaded)
self.pipe_buffers['labels'][buffer_id] = loaded
if self.wall_clock_breakdown():
self.timers('batch_input').stop()
def _send_tensor_meta(self, buffer, recv_stage):
""" Communicate metadata about upcoming p2p transfers.
Metadata is communicated in this order:
* type (0: tensor, 1: list)
* num_tensors if type=list
foreach tensor in buffer:
* ndims
* shape
"""
send_bytes = 0
if isinstance(buffer, torch.Tensor):
type_tensor = torch.LongTensor(data=[0]).to(self.device)
p2p.send(type_tensor, recv_stage)
send_shape = torch.LongTensor(data=buffer.size()).to(self.device)
send_ndims = torch.LongTensor(data=[len(buffer.size())]).to(self.device)
p2p.send(send_ndims, recv_stage)
p2p.send(send_shape, recv_stage)
send_bytes += _tensor_bytes(buffer)
elif isinstance(buffer, list):
assert (False)
type_tensor = torch.LongTensor(data=[1]).to(self.device)
p2p.send(type_tensor, recv_stage)
count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device)
p2p.send(count_tensor, recv_stage)
for tensor in buffer:
assert isinstance(tensor, torch.Tensor)
send_shape = torch.LongTensor(data=tensor.size()).to(self.device)
send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device)
p2p.send(send_ndims, recv_stage)
p2p.send(send_shape, recv_stage)
send_bytes += _tensor_bytes(tensor)
elif isinstance(buffer, tuple):
type_tensor = torch.LongTensor(data=[2]).to(self.device)
p2p.send(type_tensor, recv_stage)
count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device)
p2p.send(count_tensor, recv_stage)
for idx, tensor in enumerate(buffer):
assert isinstance(tensor, torch.Tensor)
send_shape = torch.LongTensor(data=tensor.size()).to(self.device)
send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device)
send_dtype = torch.LongTensor(data=[self.DTYPE_TO_ID[tensor.dtype]]).to(self.device)
p2p.send(send_dtype, recv_stage)
p2p.send(send_ndims, recv_stage)
p2p.send(send_shape, recv_stage)
# Useful for performance debugging.
'''
new_bytes = _tensor_bytes(tensor)
send_bytes += _tensor_bytes(tensor)
# Useful for performance debugging.
if self.grid.data_parallel_id == 0:
print(
f'STAGE={self.stage_id} pipe-send-volume[{idx}]: shape={send_shape} {new_bytes/1024**2:0.2f}MB'
)
'''
else:
raise NotImplementedError(f'Could not send meta type {type(buffer)}')
# Useful for performance debugging.
'''
if self.grid.data_parallel_id == 0:
print(f'STAGE={self.stage_id} pipe-send-volume: {send_bytes/1024**2:0.2f}MB')
'''
def _recv_tensor_meta(self, send_stage):
"""Receive metadata about upcoming p2p transfers and return allocated buffers.
Metadata is communicated in this order:
* type (0: tensor, 1: list)
* num_tensors if type=list
foreach tensor in buffer:
* ndims
* shape
Returns:
Allocated buffer for receiving from send_stage.
"""
type_tensor = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(type_tensor, send_stage)
recv_type = type_tensor.item()
# A single tensor will be sent.
if recv_type == 0:
recv_ndims = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(recv_ndims, send_stage)
recv_ndims = recv_ndims.item()
recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device)
p2p.recv(recv_shape, send_stage)
recv_shape = recv_shape.tolist()
return self._allocate_buffer(recv_shape, num_buffers=1)[0]
# List or tuple of tensors
elif recv_type == 1 or recv_type == 2:
count_tensor = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(count_tensor, send_stage)
num_tensors = count_tensor.item()
recv_shapes_and_dtypes = []
for idx in range(num_tensors):
recv_dtype = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(recv_dtype, send_stage)
recv_dtype = self.ID_TO_DTYPE[recv_dtype.item()]
recv_ndims = torch.LongTensor(data=[0]).to(self.device)
p2p.recv(recv_ndims, send_stage)
recv_ndims = recv_ndims.item()
recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device)
p2p.recv(recv_shape, send_stage)
recv_shapes_and_dtypes.append((recv_shape.tolist(), recv_dtype))
buffers = self._allocate_buffers(recv_shapes_and_dtypes, num_buffers=1)[0]
# Convert to tuples if requested.
if recv_type == 2:
buffers = tuple(buffers)
return buffers
else:
raise NotImplementedError(f'Could not receive type {type(recv_type)}')
def _exec_send_activations(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_send_output').start()
outputs = self.pipe_buffers['outputs'][buffer_id]
# NCCL does not like to send torch.BoolTensor types, so cast the mask to half().
# We could do char, but with half() we can eventually flatten with other fp16
# messages (TODO)
if self.has_attention_mask or self.has_bool_tensors:
outputs = list(outputs)
outputs[-1] = outputs[-1].half()
outputs = tuple(outputs)
if self.first_output_send:
self.first_output_send = False
self._send_tensor_meta(outputs, self.next_stage)
if isinstance(outputs, torch.Tensor):
p2p.send(outputs, self.next_stage)
elif isinstance(outputs, tuple):
for idx, buffer in enumerate(outputs):
p2p.send(buffer, self.next_stage)
else:
raise NotImplementedError('Could not send output of type '
f'{type(outputs)}')
# Restore the boolean tensor
if self.has_attention_mask or self.has_bool_tensors:
outputs = list(outputs)
outputs[-1] = outputs[-1].bool()
outputs = tuple(outputs)
if self.wall_clock_breakdown():
self.timers('pipe_send_output').stop()
def _exec_send_grads(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_send_grad').start()
inputs = self.pipe_buffers['inputs'][buffer_id]
# Partition the gradient
if self.is_grad_partitioned:
if isinstance(inputs, tuple):
first_input = inputs[0]
assert all([torch.is_tensor(elt) for elt in inputs[1:]])
inputs_grad_tail = [elt.grad for elt in inputs[1:] if elt.grad is not None]
elif torch.is_tensor(inputs):
first_input = inputs
inputs_grad_tail = []
else:
raise ValueError("expecting a tensor or a tuple of tensors")
assert torch.is_tensor(first_input)
part = PartitionedTensor(tensor=first_input.grad, group=self.grid.get_slice_parallel_group())
inputs = (part.to_meta(), part.data(), *inputs_grad_tail)
# XXX Terrible hack
# Drop the attention mask from the input buffer here. It does not have
# a grad that needs to be communicated. We free the buffer immediately
# after, so no need to restore it. The receiver also has a hack that skips
# the recv. This is because NCCL does not let us send torch.BoolTensor :-(.
if self.has_attention_mask or self.has_bool_tensors:
inputs = list(inputs)
inputs.pop()
inputs = tuple(inputs)
if isinstance(inputs, torch.Tensor):
assert inputs.grad is not None
p2p.send(inputs.grad, self.prev_stage)
else:
# XXX terrible hacky branch
if self.is_grad_partitioned:
# First two sends are partitioned gradient
p2p.send(inputs[0], self.prev_stage)
p2p.send(inputs[1], self.prev_stage)
else:
for idx, buffer in enumerate(inputs):
# Skip tensors that will not produce a grad
if not buffer.is_floating_point():
assert buffer.grad is None
continue
assert buffer.grad is not None
p2p.send(buffer.grad, self.prev_stage)
# We can free up the input buffer now
self.pipe_buffers['inputs'][buffer_id] = None
if self.wall_clock_breakdown():
self.timers('pipe_send_grad').stop()
def _exec_recv_activations(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_recv_input').start()
recvd = None
# Allocate the buffer if necessary
if self.pipe_recv_buf is None:
self.pipe_recv_buf = self._recv_tensor_meta(self.prev_stage)
if isinstance(self.pipe_recv_buf, torch.Tensor):
p2p.recv(self.pipe_recv_buf, self.prev_stage)
recvd = self.pipe_recv_buf.clone().detach()
recvd.requires_grad = recvd.is_floating_point()
else:
assert isinstance(self.pipe_recv_buf, tuple)
recvd = [None] * len(self.pipe_recv_buf)
for idx, buffer in enumerate(self.pipe_recv_buf):
assert torch.is_tensor(buffer)
# XXX hardcode meta type
if self.is_pipe_partitioned and idx == 0 and buffer.dtype != torch.long:
if self.meta_buffer is None:
self.meta_buffer = torch.zeros(buffer.size(), dtype=torch.long, device=self.device)
buffer = self.meta_buffer
p2p.recv(buffer, self.prev_stage)
recvd[idx] = buffer.clone().detach()
# NCCL does not like to send torch.BoolTensor types, so un-cast the
# attention mask
if self.has_attention_mask or self.has_bool_tensors:
recvd[-1] = recvd[-1].bool()
recvd = tuple(recvd)
for buffer in recvd:
buffer.requires_grad = buffer.is_floating_point()
self.pipe_buffers['inputs'][buffer_id] = recvd
if self.wall_clock_breakdown():
self.timers('pipe_recv_input').stop()
def _exec_recv_grads(self, buffer_id):
if self.wall_clock_breakdown():
self.timers('pipe_recv_grad').start()
outputs = self.pipe_buffers['outputs'][buffer_id]
# XXX these shapes are hardcoded for Megatron
# Restore partitioned output if it was partitioned and we are sending full gradients
if self.is_pipe_partitioned and not self.is_grad_partitioned:
part_output = PartitionedTensor.from_meta(meta=outputs[0],
local_part=outputs[1],
group=self.grid.get_slice_parallel_group())
outputs[0].data = part_output.full()
outputs = (outputs[0], *outputs[2:])
# save for backward
self.pipe_buffers['outputs'][buffer_id] = outputs
# Allocate gradient if necessary
if self.grad_layer is None:
if isinstance(outputs, torch.Tensor):
s = list(outputs.size())
self.grad_layer = self._allocate_buffer(s, dtype=outputs.dtype, num_buffers=1)[0]
else:
# XXX This is a HACK
# When we exchange activations/gradients, the two pipe stages
# need to issue the send/recv with the same buffer sizes or
# else there is a deadlock. The is_floating_point() filter is
# used to avoid sending gradients for tensors that do not
# produce gradients. When TP>1, we partition the first
# activations/gradients across TP ranks to save communication
# volume and memory. That partitioned tensor is represented as
# two tensors: a 1/TPth chunk of the original data and also a
# small LongTensor storing the metadata used to reconstruct on
# the other side. When combined, the floating point filter also
# filtered out the metadata tensor. This quick (hacky) fix just
# branches on is_grad_partitioned so we don't filter out the
# metadata tensor.
if self.is_grad_partitioned:
sizes_and_dtypes = [(list(t.size()), t.dtype)
for t in outputs[:2]] + [(list(t.size()), t.dtype)
for t in outputs[2:] if t.is_floating_point()]
else:
sizes_and_dtypes = [(list(t.size()), t.dtype) for t in outputs if t.is_floating_point()]
self.grad_layer = self._allocate_buffers(sizes_and_dtypes, num_buffers=1)[0]
if isinstance(self.grad_layer, torch.Tensor):
p2p.recv(self.grad_layer, self.next_stage)
else:
assert isinstance(outputs, tuple)
for idx, buffer in enumerate(self.grad_layer):
# XXX GPT-2 hack
if self.is_grad_partitioned and idx == 0 and buffer.dtype != torch.long:
buffer.data = torch.zeros(buffer.size(), dtype=torch.long, device=self.device)
p2p.recv(buffer, self.next_stage)
if self.wall_clock_breakdown():
self.timers('pipe_recv_grad').stop()
def _exec_optimizer_step(self, lr_kwargs=None):
if self.wall_clock_breakdown():
self.timers('step_microstep').start()
self.timers('step').start()
self.mem_status('BEFORE STEP', reset_max=True)
self._force_grad_boundary = True
self._take_model_step(lr_kwargs)
self._force_grad_boundary = False
self.mem_status('AFTER STEP')
if self.global_rank == 0 and self.monitor.enabled:
self.summary_events = [(f'Train/Samples/lr', self.get_lr()[0], self.global_samples)]
if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'):
self.summary_events.append(
(f'Train/Samples/loss_scale', self.optimizer.cur_scale, self.global_samples))
self.monitor.write_events(self.summary_events)
if self.wall_clock_breakdown():
self.timers('step_microstep').stop()
self.timers('step').stop()
if self.global_steps % self.steps_per_print() == 0:
self.timers.log([
'batch_input', 'forward_microstep', 'backward_microstep', 'backward_inner_microstep',
'backward_allreduce_microstep', 'backward_tied_allreduce_microstep', 'step_microstep'
])
if self.global_steps % self.steps_per_print() == 0:
self.timers.log(['forward', 'backward', 'backward_inner', 'backward_allreduce', 'step'])
def _zero_grads(self, inputs):
if isinstance(inputs, torch.Tensor):
if inputs.grad is not None:
inputs.grad.data.zero_()
else:
for t in inputs:
if t.grad is not None:
t.grad.data.zero_()
def _allocate_zeros(self, shape, **kwargs):
""" Allocate a tensor of zeros on the engine's device.
Arguments:
shape: the shape of the tensor to allocate
kwargs: passed to torch.zeros()
Returns:
A tensor from torch.zeros() allocated on self.device.
"""
if "dtype" not in kwargs:
if self.fp16_enabled():
kwargs["dtype"] = torch.half
if self.bfloat16_enabled():
kwargs["dtype"] = torch.bfloat16
return torch.zeros(shape, device=self.device, **kwargs)
def _allocate_buffer(self, shape, num_buffers=-1, **kwargs):
buffers = []
if num_buffers == -1:
num_buffers = self.num_pipe_buffers
for count in range(num_buffers):
buffers.append(self._allocate_zeros(shape, **kwargs))
return buffers
def _allocate_buffers(self, shapes_and_dtypes, requires_grad=False, num_buffers=-1):
buffers = []
if num_buffers == -1:
num_buffers = self.num_pipe_buffers
for count in range(num_buffers):
buffer = []
for shape, dtype in shapes_and_dtypes:
buffer.append(self._allocate_zeros(shape, dtype=dtype, requires_grad=requires_grad))
buffers.append(buffer)
return buffers
def forward(self, *args, **kwargs):
"""Disabled for pipeline parallel training. See ``train_batch()``. """
raise PipelineError("Only train_batch() is accessible in pipeline mode.")
def backward(self, *args, **kwargs):
"""Disabled for pipeline parallel training. See ``train_batch()``. """
raise PipelineError("Only train_batch() is accessible in pipeline mode.")
def step(self, *args, **kwargs):
"""Disabled for pipeline parallel training. See ``train_batch()``. """
raise PipelineError("Only train_batch() is accessible in pipeline mode.")
def mem_status(self, msg, print_rank=-1, reset_max=False):
return
global mem_alloced, mem_cached
if not self.global_steps == 0 or not self.global_steps == 9:
#return
pass
if self.mpu.get_data_parallel_rank() != 0:
return
if self.global_rank != 0:
return
rank = self.global_rank
if print_rank != -1 and rank != print_rank:
return
get_accelerator().synchronize()
if reset_max:
get_accelerator().reset_max_memory_cached()
get_accelerator().reset_max_memory_allocated()
new_alloced = get_accelerator().memory_allocated()
new_cached = get_accelerator().memory_cached()
delta_alloced = new_alloced - mem_alloced
delta_cached = new_cached - mem_cached
mem_cached = new_cached
mem_alloced = new_alloced
max_alloced = get_accelerator().max_memory_allocated()
max_cached = get_accelerator().max_memory_cached()
# convert to GB for printing
new_alloced /= 1024**3
new_cached /= 1024**3
delta_alloced /= 1024**3
delta_cached /= 1024**3
max_alloced /= 1024**3
max_cached /= 1024**3
print(
f'RANK={rank} STAGE={self.stage_id} STEP={self.global_steps} MEMSTATS', msg,
f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) '
f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)')
def module_state_dict(self):
"""Override hack to save a pipe model and return the directory path of the save.
This method should only be called by DeepSpeed's ``save_checkpoint()``. The
recommended way of saving a ``PipelineModule`` outside of ``save_checkpoint()``
is ``save_state_dict()``.
Returns:
None
"""
assert isinstance(self.module, PipelineModule)
assert self._curr_ckpt_path is not None, \
"PipelineEngine expects module_state_dict() to be called from save_checkpoint()"
self.module.save_state_dict(self._curr_ckpt_path, checkpoint_engine=self.checkpoint_engine)
return None
def load_module_state_dict(self, checkpoint, strict=True, custom_load_fn=None):
"""Override hack to instead use a directory path.
This is important because pipeline models checkpoint by layer instead of rank.
If ``state_dict`` is not ``None`` or a ``str``, we revert to ``super()`` expecting a ``dict``.
Args:
state_dict (str, None): unused
strict (bool, optional): Strict state loading. Defaults to True.
"""
assert custom_load_fn is None, "custom_load_fn not supported w. pipeline parallelism"
state_dict = checkpoint['module']
if (state_dict is not None) and (not isinstance(state_dict, str)):
super().load_module_state_dict(state_dict, strict)
return
self.module.load_state_dir(load_dir=self._curr_ckpt_path,
strict=strict,
checkpoint_engine=self.checkpoint_engine)
# A map of PipeInstruction types to methods. Each method will be executed with the
# kwargs provided to the PipeInstruction from the scheduler.
_INSTRUCTION_MAP = {
schedule.OptimizerStep: _exec_optimizer_step,
schedule.ReduceGrads: _exec_reduce_grads,
schedule.ReduceTiedGrads: _exec_reduce_tied_grads,
schedule.LoadMicroBatch: _exec_load_micro_batch,
schedule.ForwardPass: _exec_forward_pass,
schedule.BackwardPass: _exec_backward_pass,
schedule.SendActivation: _exec_send_activations,
schedule.RecvActivation: _exec_recv_activations,
schedule.SendGrad: _exec_send_grads,
schedule.RecvGrad: _exec_recv_grads,
}
def _exec_schedule(self, pipe_schedule):
# Reserve and reset buffers.
self._reserve_pipe_buffers(pipe_schedule.num_pipe_buffers())
self.fwd_outputs = []
# For each step in the schedule
for step_cmds in pipe_schedule:
# For each instruction in the step
for cmd in step_cmds:
if type(cmd) not in self._INSTRUCTION_MAP:
raise RuntimeError(f'{self.__class__.__name__} does not understand instruction {repr(cmd)}')
# Equivalent to: self._exec_forward_pass(buffer_id=0)
self._exec_instr = MethodType(self._INSTRUCTION_MAP[type(cmd)], self)
self._exec_instr(**cmd.kwargs) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/pipe/engine.py | engine.py |
# DeepSpeed Team
import os
import glob
import re as regex
from functools import partial
import torch
import torch.nn as nn
from deepspeed import comm as dist
from deepspeed.utils import logger
from .. import utils as ds_utils
from ..activation_checkpointing import checkpointing
from .topology import PipeDataParallelTopology, PipelineParallelGrid
from deepspeed.runtime.state_dict_factory import SDLoaderFactory
from deepspeed.accelerator import get_accelerator
class PipelineError(Exception):
"""Errors related to the use of deepspeed.PipelineModule """
class LayerSpec:
"""Building block for specifying pipeline-parallel modules.
LayerSpec stores the type information and parameters for each stage in a
PipelineModule. For example:
.. code-block:: python
nn.Sequence(
torch.nn.Linear(self.in_dim, self.hidden_dim, bias=False),
torch.nn.Linear(self.hidden_hidden, self.out_dim)
)
becomes
.. code-block:: python
layer_specs = [
LayerSpec(torch.nn.Linear, self.in_dim, self.hidden_dim, bias=False),
LayerSpec(torch.nn.Linear, self.hidden_hidden, self.out_dim)]
]
"""
def __init__(self, typename, *module_args, **module_kwargs):
self.typename = typename
self.module_args = module_args
self.module_kwargs = module_kwargs
if not issubclass(typename, nn.Module):
raise RuntimeError('LayerSpec only supports torch.nn.Module types.')
if dist.is_initialized():
self.global_rank = dist.get_rank()
else:
self.global_rank = -1
def __repr__(self):
return ds_utils.call_to_str(self.typename.__name__, self.module_args, self.module_kwargs)
def build(self, log=False):
"""Build the stored specification."""
if log:
logger.info(f'RANK={self.global_rank} building {repr(self)}')
return self.typename(*self.module_args, **self.module_kwargs)
class TiedLayerSpec(LayerSpec):
def __init__(self, key, typename, *module_args, forward_fn=None, tied_weight_attr='weight', **module_kwargs):
super().__init__(typename, *module_args, **module_kwargs)
self.key = key
self.forward_fn = forward_fn
self.tied_weight_attr = tied_weight_attr
class PipelineModule(nn.Module):
"""Modules to be parallelized with pipeline parallelism.
The key constraint that enables pipeline parallelism is the
representation of the forward pass as a sequence of layers
and the enforcement of a simple interface between them. The
forward pass is implicitly defined by the module ``layers``. The key
assumption is that the output of each layer can be directly fed as
input to the next, like a ``torch.nn.Sequence``. The forward pass is
implicitly:
.. code-block:: python
def forward(self, inputs):
x = inputs
for layer in self.layers:
x = layer(x)
return x
.. note::
Pipeline parallelism is not compatible with ZeRO-2 and ZeRO-3.
Args:
layers (Iterable): A sequence of layers defining pipeline structure. Can be a ``torch.nn.Sequential`` module.
num_stages (int, optional): The degree of pipeline parallelism. If not specified, ``topology`` must be provided.
topology (``deepspeed.runtime.pipe.ProcessTopology``, optional): Defines the axes of parallelism axes for training. Must be provided if ``num_stages`` is ``None``.
loss_fn (callable, optional): Loss is computed ``loss = loss_fn(outputs, label)``
seed_layers(bool, optional): Use a different seed for each layer. Defaults to False.
seed_fn(type, optional): The custom seed generating function. Defaults to random seed generator.
base_seed (int, optional): The starting seed. Defaults to 1234.
partition_method (str, optional): The method upon which the layers are partitioned. Defaults to 'parameters'.
activation_checkpoint_interval (int, optional): The granularity activation checkpointing in terms of number of layers. 0 disables activation checkpointing.
activation_checkpoint_func (callable, optional): The function to use for activation checkpointing. Defaults to ``deepspeed.checkpointing.checkpoint``.
checkpointable_layers(list, optional): Checkpointable layers may not be checkpointed. Defaults to None which does not additional filtering.
"""
def __init__(self,
layers,
num_stages=None,
topology=None,
loss_fn=None,
seed_layers=False,
seed_fn=None,
base_seed=1234,
partition_method='parameters',
activation_checkpoint_interval=0,
activation_checkpoint_func=checkpointing.checkpoint,
checkpointable_layers=None):
super().__init__()
if num_stages is None and topology is None:
raise RuntimeError('must provide num_stages or topology')
self.micro_offset = 0
self.loss_fn = loss_fn
self.checkpointable_layers = checkpointable_layers
if checkpointable_layers is not None:
assert isinstance(checkpointable_layers, list), "param `checkpointable_layers` must be type of list."
self.seed_layers = seed_layers
self.seed_fn = seed_fn
self.base_seed = base_seed
if dist.get_rank() == 0:
try:
seed_str = self.seed_fn.__name__
except AttributeError:
seed_str = None
print(f'SEED_LAYERS={self.seed_layers} BASE_SEED={self.base_seed} SEED_FN={seed_str}')
# Setup world info
self.world_group = dist.new_group(ranks=range(dist.get_world_size()))
self.global_rank = dist.get_rank(group=self.world_group)
self.world_size = dist.get_world_size(group=self.world_group)
self.local_rank = int(os.environ.get("LOCAL_RANK", None))
assert self.local_rank != None
if topology:
self._topo = topology
self.num_stages = self._topo.get_dim('pipe')
else:
self.num_stages = num_stages
if topology is None:
if self.world_size % self.num_stages != 0:
raise RuntimeError(
f'num_stages ({self.num_stages}) must divide distributed world size ({self.world_size})')
dp = self.world_size // num_stages
topology = PipeDataParallelTopology(num_pp=num_stages, num_dp=dp)
self._topo = topology
# Construct communicators for pipeline topology
self._grid = PipelineParallelGrid(process_group=self.world_group, topology=self._topo)
self.stage_id = self._topo.get_coord(self.global_rank).pipe
# Initialize partition information
self._layer_specs = list(layers)
self._num_layers = len(self._layer_specs)
self._local_start = 0
self._local_stop = None
self._partition_layers(method=partition_method)
self.forward_funcs = []
self.fwd_map = {}
self.tied_modules = nn.ModuleDict()
self.tied_weight_attrs = {}
# Offset the random seed by the stage ID.
#newseed = get_accelerator().initial_seed() + self._grid.get_stage_id()
#ds_utils.set_random_seed(newseed)
#with torch.random.fork_rng(devices=[get_accelerator().current_device_name()]):
self._build()
self.to(get_accelerator().device_name(self.local_rank))
self.tied_comms = self._index_tied_modules()
self._synchronize_tied_weights()
self.activation_checkpoint_interval = activation_checkpoint_interval
self.activation_checkpoint_func = activation_checkpoint_func
def _build(self):
specs = self._layer_specs
for local_idx, layer in enumerate(specs[self._local_start:self._local_stop]):
layer_idx = local_idx + self._local_start
if self.seed_layers:
if self.seed_fn:
self.seed_fn(self.base_seed + layer_idx)
else:
ds_utils.set_random_seed(self.base_seed + layer_idx)
# Recursively build PipelineModule objects
if isinstance(layer, PipelineModule):
raise NotImplementedError('RECURSIVE BUILD NOT YET IMPLEMENTED')
# LayerSpec objects contain an nn.Module that should be allocated now.
elif isinstance(layer, nn.Module):
name = str(layer_idx)
self.forward_funcs.append(layer)
self.fwd_map.update({name: len(self.forward_funcs) - 1})
self.add_module(name, layer)
# TiedLayerSpec objects contain an nn.Module that should be allocated now.
elif isinstance(layer, TiedLayerSpec):
# Build and register the module if we haven't seen it before.
if layer.key not in self.tied_modules:
self.tied_modules[layer.key] = layer.build()
self.tied_weight_attrs[layer.key] = layer.tied_weight_attr
if layer.forward_fn is None:
# Just use forward()
self.forward_funcs.append(self.tied_modules[layer.key])
else:
# User specified fn with args (module, input)
self.forward_funcs.append(partial(layer.forward_fn, self.tied_modules[layer.key]))
# LayerSpec objects contain an nn.Module that should be allocated now.
elif isinstance(layer, LayerSpec):
module = layer.build()
name = str(layer_idx)
self.forward_funcs.append(module)
self.fwd_map.update({name: len(self.forward_funcs) - 1})
self.add_module(name, module)
# Last option: layer may be a functional (e.g., lambda). We do nothing in
# that case and just use it in forward()
else:
self.forward_funcs.append(layer)
# All pipeline parameters should be considered as model parallel in the context
# of our FP16 optimizer
for p in self.parameters():
p.ds_pipe_replicated = False
def _count_layer_params(self):
"""Count the trainable parameters in individual layers.
This routine will only build one layer at a time.
Returns:
A list of the number of parameters in each layer.
"""
param_counts = [0] * len(self._layer_specs)
for idx, layer in enumerate(self._layer_specs):
if isinstance(layer, LayerSpec):
l = layer.build()
params = filter(lambda p: p.requires_grad, l.parameters())
param_counts[idx] = sum(p.numel() for p in params)
elif isinstance(layer, nn.Module):
params = filter(lambda p: p.requires_grad, layer.parameters())
param_counts[idx] = sum(p.numel() for p in params)
return param_counts
def _find_layer_type(self, layername):
idxs = []
typeregex = regex.compile(layername, regex.IGNORECASE)
for idx, layer in enumerate(self._layer_specs):
name = None
if isinstance(layer, LayerSpec):
name = layer.typename.__name__
elif isinstance(layer, nn.Module):
name = layer.__class__.__name__
else:
try:
name = layer.__name__
except AttributeError:
continue
if typeregex.search(name):
idxs.append(idx)
if len(idxs) == 0:
raise RuntimeError(f"Partitioning '{layername}' found no valid layers to partition.")
return idxs
def forward(self, forward_input):
# We need to offset the seed by the microbatch ID. Save it in a local var to
# ensure it is preserved in the closure. Otherwise checkpointed forward funcs
# will see a different offset.
self.micro_offset += 1
def exec_range_func(start, end):
''' Helper function to be used with checkpoint()
Adapted from torch.utils.checkpoint:checkpoint_sequential()
'''
local_micro_offset = self.micro_offset + 1
def exec_func(*inputs):
# Single tensor inputs need to be unwrapped
if len(inputs) == 1:
inputs = inputs[0]
for idx, layer in enumerate(self.forward_funcs[start:end]):
self.curr_layer = idx + self._local_start
if self.seed_layers:
new_seed = (self.base_seed * local_micro_offset) + self.curr_layer
if self.seed_fn:
self.seed_fn(new_seed)
else:
ds_utils.set_random_seed(new_seed)
inputs = layer(inputs)
return inputs
return exec_func
if self.activation_checkpoint_interval == 0:
func = exec_range_func(0, len(self.forward_funcs))
x = func(forward_input)
else:
num_layers = len(self.forward_funcs)
x = forward_input
for start_idx in range(0, num_layers, self.activation_checkpoint_interval):
end_idx = min(start_idx + self.activation_checkpoint_interval, num_layers)
funcs = self.forward_funcs[start_idx:end_idx]
# Since we either pass tensors or tuples of tensors without unpacking, we
# need to be careful not to double-wrap tensors with tuple.
if not isinstance(x, tuple):
x = (x, )
if self._is_checkpointable(funcs):
x = self.activation_checkpoint_func(exec_range_func(start_idx, end_idx), *x)
else:
x = exec_range_func(start_idx, end_idx)(*x)
return x
def _partition_layers(self, method='uniform'):
num_stages = self._topo.get_dim('pipe')
stage_id = self._topo.get_coord(self.global_rank).pipe
if self.global_rank == 0:
logger.info(f'Partitioning pipeline stages with method {method}')
method = method.lower()
# Each stage gets a simple uniform number of layers.
if method == 'uniform':
num_layers = len(self._layer_specs)
self.parts = ds_utils.partition_uniform(num_items=num_layers, num_parts=num_stages)
elif method == 'parameters':
param_counts = self._count_layer_params()
self.parts = ds_utils.partition_balanced(weights=param_counts, num_parts=num_stages)
elif method.startswith('type:'):
layertype = method.split(':')[1]
binary_weights = [0] * len(self._layer_specs)
for idx in self._find_layer_type(layertype):
binary_weights[idx] = 1
self.parts = ds_utils.partition_balanced(weights=binary_weights, num_parts=num_stages)
elif method == 'profile':
raise NotImplementedError(f'Partitioning method {method} not implemented.')
else:
raise NotImplementedError(f'Partitioning method {method} not implemented.')
# Print some information on the partitioning.
if self.global_rank == 0:
for stage in range(num_stages):
start = self.parts[stage]
stop = self.parts[stage + 1]
print(f'stage={stage} layers={stop - start}')
for idx, layer in enumerate(self._layer_specs[start:stop]):
name = str(layer)
if isinstance(layer, LayerSpec):
name = layer.typename.__name__
if isinstance(layer, nn.Module):
name = layer.__class__.__name__
else:
try:
name = layer.__name__
except AttributeError:
pass
print(f' {idx+start:2d}: {name}')
if self.loss_fn:
try:
print(f' loss: {self.loss_fn.__name__}')
except AttributeError:
print(f' loss: {self.loss_fn.__class__.__name__}')
self._set_bounds(start=self.parts[stage_id], stop=self.parts[stage_id + 1])
def allreduce_tied_weight_gradients(self):
'''All reduce the gradients of the tied weights between tied stages'''
for key, comm in self.tied_comms.items():
weight = getattr(self.tied_modules[key], comm['weight_attr'])
dist.all_reduce(weight.grad, group=comm['group'])
def get_tied_weights_and_groups(self):
weight_group_list = []
for key, comm in self.tied_comms.items():
weight = getattr(self.tied_modules[key], comm['weight_attr'])
weight_group_list.append((weight, comm['group']))
return weight_group_list
def _synchronize_tied_weights(self):
for key, comm in self.tied_comms.items():
dist.broadcast(
getattr(comm['module'], comm['weight_attr']),
src=min(comm['ranks']),
group=comm['group'],
)
def _index_tied_modules(self):
''' Build communication structures for tied modules. '''
tied_comms = {}
if self._topo.get_dim('pipe') == 1:
return tied_comms
specs = self._layer_specs
tie_keys = set(s.key for s in specs if isinstance(s, TiedLayerSpec))
for key in tie_keys:
# Find the layers that the tied module appears in
tied_layers = []
for idx, layer in enumerate(specs):
if isinstance(layer, TiedLayerSpec) and layer.key == key:
tied_layers.append(idx)
# Find all stages with this tied module
# TODO: Would be nice to remove the nested data/model parallelism loops and
# TODO: instead generalize in some way, since we really just care about the
# TODO: stage that owns the tied layer. Then loop over each (dp, mp, ...)
# TODO: fiber to generate process groups.
tied_stages = set(self.stage_owner(idx) for idx in tied_layers)
for dp in range(self._grid.data_parallel_size):
for mp in range(self._grid.get_slice_parallel_world_size()):
tied_ranks = []
for s in sorted(tied_stages):
if self._grid.get_slice_parallel_world_size() > 1:
tied_ranks.append(self._grid.stage_to_global(stage_id=s, data=dp, model=mp))
else:
tied_ranks.append(self._grid.stage_to_global(stage_id=s, data=dp))
group = dist.new_group(ranks=tied_ranks)
# Record this tied module if we own a local copy of it.
if self.global_rank in tied_ranks:
assert key in self.tied_modules
if key in self.tied_modules:
tied_comms[key] = {
'ranks': tied_ranks,
'group': group,
'weight_attr': self.tied_weight_attrs[key],
'module': self.tied_modules[key],
}
# Only count the tied module once in the eyes of the FP16 optimizer
if self.global_rank != tied_ranks[0]:
for p in self.tied_modules[key].parameters():
p.ds_pipe_replicated = True
'''
if len(tied_comms) > 0:
print(f'RANK={self.global_rank} tied_comms={tied_comms}')
'''
return tied_comms
def partitions(self):
return self.parts
def stage_owner(self, layer_idx):
assert 0 <= layer_idx < self._num_layers
for stage in range(self._topo.get_dim('pipe')):
if self.parts[stage] <= layer_idx < self.parts[stage + 1]:
return stage
raise RuntimeError(f'Layer {layer_idx} not owned? parts={self.parts}')
def _set_bounds(self, start=None, stop=None):
"""Manually define the range of layers that will be built on this process.
These boundaries are treated as list slices and so start is inclusive and stop is
exclusive. The default of None for both results in all layers being built
locally.
"""
self._local_start = start
self._local_stop = stop
def set_checkpoint_interval(self, interval):
assert interval >= 0
self.checkpoint_interval = interval
def topology(self):
""" ProcessTopology object to query process mappings. """
return self._topo
def mpu(self):
return self._grid
def num_pipeline_stages(self):
return self._topo.get_dim('pipe')
def ckpt_prefix(self, checkpoints_path, tag):
"""Build a prefix for all checkpoint files written by this module. """
# All checkpoint files start with this
rank_name = 'module'
# Data parallelism is omitted from the naming convention because we are agnostic
# to this in the checkpoint.
omit_dims = frozenset(['data'])
axes = [a for a in self._grid._topo.get_axis_names() if a not in omit_dims]
for dim in axes:
rank = getattr(self._grid._topo.get_coord(rank=self.global_rank), dim)
rank_name += f'-{dim}_{rank:02d}'
ckpt_name = os.path.join(checkpoints_path, str(tag), rank_name)
return ckpt_name
def ckpt_layer_path(self, ckpt_dir, local_layer_idx):
"""Customize a prefix for a specific pipeline module layer. """
idx = local_layer_idx + self._local_start
layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}')
rank_repr = self._grid._topo.get_rank_repr(rank=self.global_rank)
if rank_repr != '':
layer_ckpt_path += f'-{rank_repr}'
layer_ckpt_path += '-model_states.pt'
return layer_ckpt_path
def ckpt_layer_path_list(self, ckpt_dir, local_layer_idx):
"""Get all ckpt file list for a specific pipeline module layer. """
idx = local_layer_idx + self._local_start
layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}-')
layer_ckpt_path += "*model_states.pt"
ckpt_files = glob.glob(layer_ckpt_path)
ckpt_files.sort()
return ckpt_files
def save_state_dict(self, save_dir, checkpoint_engine):
# Processes having the same model parallel rank on different data parallel instances
# have identical layer weights. We can distribute the task of saving the layer weights
# among the data parallel ranks. For example, if a pipeline stage has 9 layers and
# if there are 2 data parallel instances, rank 0 will save the first 5 layers and
# rank 1 will save the last 4.
dp_rank = self._grid.data_parallel_id
dp_size = self._grid.data_parallel_size
num_layers = len(self.forward_funcs)
if self.checkpoint_parallel_write_pipeline:
# spread layers evenly across data parallel ranks
offsets = ds_utils.partition_uniform(num_layers, dp_size)
start, end = offsets[dp_rank], offsets[dp_rank + 1]
else:
# data parallel rank 0 writes all layers
if dp_rank != 0:
return
start, end = 0, num_layers
layer_list = self.forward_funcs[start:end]
checkpoint_engine.makedirs(save_dir, exist_ok=True)
for idx, layer in enumerate(layer_list):
model_ckpt_path = self.ckpt_layer_path(save_dir, start + idx)
if not hasattr(layer, 'state_dict'):
continue
# We pass cloned tensors to torch.save() to avoid checkpoint bloat which occurs because torch.save()
# saves the underlying storage rather than the slice of the storage corresponding to individual tensors.
# This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers.
# Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size.
# It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat.
# See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing
orig_state_dict = layer.state_dict()
final_state_dict = type(orig_state_dict)({k: v.clone() for k, v in orig_state_dict.items()})
checkpoint_engine.save(final_state_dict, model_ckpt_path)
def load_state_dir(self, load_dir, checkpoint_engine, strict=True):
for idx, layer in enumerate(self.forward_funcs):
# Functions, etc. will not have state_dicts
if not hasattr(layer, 'load_state_dict'):
continue
# get all checkpoint files for the layer.
model_ckpt_list = self.ckpt_layer_path_list(load_dir, idx)
mp_rank = self._grid.get_slice_parallel_rank()
mp_world_size = self._grid.get_slice_parallel_world_size()
sd_loader = SDLoaderFactory.get_sd_loader(model_ckpt_list,
version=2.0,
checkpoint_engine=checkpoint_engine)
load_path, checkpoint, _ = sd_loader.load(mp_world_size, mp_rank, module_key=None, is_pipe_parallel=True)
layer.load_state_dict(checkpoint)
# if self._grid.data_parallel_id == 0:
# logger.info(
# f'RANK={self.global_rank} Loaded layer={idx+self._local_start} file={load_path}'
# )
self._synchronize_tied_weights()
def _is_checkpointable(self, funcs):
# This is an unfortunate hack related to torch and deepspeed activation checkpoint implementations.
# Some layers like torch.nn.Embedding will not receive grads if checkpointed, which breaks things.
# I presume it's related to the discrete inputs that cannot require_grad? Need to revisit.
if self.__class__.__name__ in ('GPTModelPipe', 'GPT2ModelPipe'):
return all('ParallelTransformerLayerPipe' in f.__class__.__name__ for f in funcs)
if self.checkpointable_layers is not None:
return all(f.__class__.__name__ in self.checkpointable_layers for f in funcs)
params = [f.parameters() for f in funcs if isinstance(f, torch.nn.Module)]
return any(len(list(p)) > 0 for p in params) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/pipe/module.py | module.py |
# DeepSpeed Team
import os
import torch
import torch_nebula
from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \
CheckpointEngine
from deepspeed.utils import logger, log_dist
from deepspeed.nebula.constants import *
def _get_tag_from_path(path):
return os.path.basename(os.path.dirname(path))
class NebulaCheckpointEngine(CheckpointEngine):
def __init__(self, config_params=None):
super().__init__(config_params)
self.checkpoint = None
self.tag_flag = None
self.enable_nebula_load = config_params.enable_nebula_load
self.nebula_load_path = config_params.load_path
if self.nebula_load_path is None:
self.nebula_load_path = config_params.persistent_storage_path
nebula_config_params = {
NEBULA_PERSISTENT_STORAGE_PATH: config_params.persistent_storage_path,
NEBULA_PERSISTENT_TIME_INTERVAL: config_params.persistent_time_interval,
NEBULA_NUM_OF_VERSION_IN_RETENTION: config_params.num_of_version_in_retention,
}
torch_nebula.init(**nebula_config_params)
def create(self, tag):
log_dist(f"[Nebula] Start Checkpoint for tag:{tag}", ranks=[0])
# -2 means: customer needs to explicitly tell nebula
# current checkpoint is complete by commit methond.
self.checkpoint = torch_nebula.Checkpoint(tag, -2)
def save(self, state_dict, path: str):
log_dist(f"[Nebula] Create dummy files for loading.")
torch.save("", path)
tag = _get_tag_from_path(path)
partititon_name = os.path.basename(path)
logger.info(f"[Nebula] Saving {partititon_name} under tag {tag}...")
self.checkpoint.save(partititon_name, state_dict)
logger.info(f"[Nebula] Saved {partititon_name} under tag {tag}.")
return None
def load(self, path: str, map_location=None):
tag = _get_tag_from_path(path)
first_load_flag = self.tag_flag is None or self.tag_flag == tag
if not self.enable_nebula_load and first_load_flag:
self.tag_flag = tag
logger.info(f"[Nebula] Disable nebula load. Loading checkpoint from {path} ...")
partition = torch.load(path, map_location=map_location)
logger.info(f"[Nebula] Disable nebula load. Loaded checkpoint from {path} .")
return partition
partititon_name = os.path.basename(path)
logger.info(f"[Nebula] Loading {path} under tag {tag} from nebula path {self.nebula_load_path}...")
checkpoint = None
if tag in (None, 'latest', 'latest_universal'):
# In some cases, there is the inconsistent tag between deepspeed metadata (latest file)
# and nebula metadata, will lead to the failure on loading with deepspeed tag. Then we
# will try to load the valid latest checkpoint from nebula(tier3 > tier1). So, in summary
# when met failure loading for given tag, the loading priority would be like:
# nebula tier3 latest > nebula tier1 latest.
checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path)
else:
checkpoint = torch_nebula.get_checkpoint(tag=tag, persist_path=self.nebula_load_path)
if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''):
logger.info(
f"Unable to find valid checkpoint tag:{tag} from Nebula, try to get latest checkpoint again from nebula {self.nebula_load_path} path!"
)
# nebula tier3 latest
checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path)
if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''):
logger.info(
f"Unable to find latest checkpoint from Nebula tier3, try to get latest checkpoint again from nebula tier1 path!"
)
# nebula tier1 latest
checkpoint = torch_nebula.get_latest_checkpoint()
logger.warning(f"Unable to find valid checkpoint from Nebula under tag:{tag}.")
return None
tag = checkpoint.tag
self.tag_flag = -1
partition = checkpoint.load(partititon_name, map_location=map_location)
logger.info(f"[Nebula] Loaded {path} under tag {tag} from {self.nebula_load_path}.")
return partition
def commit(self, tag):
# nebula commit will be call when all files under give tag are ready to be persisted in the async way.
logger.info(f"[Nebula] all files for {tag} are saved in tier1. It is ready to start persisting")
commit_rls = self.checkpoint.commit()
if not commit_rls:
logger.error(f"[Nebula] failed to commit the checkpoint, please check the log.")
return False
return commit_rls | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py | nebula_checkpoint_engine.py |
# DeepSpeed Team
"""
Copyright NVIDIA/apex
This file is adapted from FP16_Optimizer in NVIDIA/apex
"""
from deepspeed.moe.utils import split_params_grads_into_shared_and_expert_params
import torch
from torch._utils import _flatten_dense_tensors
from deepspeed.runtime import DeepSpeedOptimizer
from deepspeed.runtime.utils import get_global_norm, CheckOverflow, get_weight_norm
from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE
from deepspeed.utils import logger
from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT
from deepspeed.accelerator import get_accelerator
from deepspeed import comm as dist
class FP16_UnfusedOptimizer(DeepSpeedOptimizer):
"""
FP16 Optimizer without weight fusion to support LAMB optimizer
For usage example please see, TODO: DeepSpeed V2 Tutorial
"""
def __init__(self,
init_optimizer,
deepspeed=None,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True,
mpu=None,
clip_grad=0.0,
fused_lamb_legacy=False):
self.fused_lamb_legacy = fused_lamb_legacy
self._global_grad_norm = 0.
if dist.get_rank() == 0:
logger.info(f'Fused Lamb Legacy : {self.fused_lamb_legacy} ')
if not get_accelerator().is_available():
raise SystemError("Cannot use fp16 without accelerator.")
self.optimizer = init_optimizer
# param groups
self.fp16_groups = []
self.fp32_groups = []
# loop to deal with groups
for i, param_group in enumerate(self.optimizer.param_groups):
#fp16 weights that represents the actual model weights
self.fp16_groups.append(param_group['params'])
#creating a fp32 copy of the weights that will be updated first then
#copied to fp16 weights
fp32_group = [p.clone().float().detach() for p in param_group['params']]
#in case the internal optimizer needs it
for p in fp32_group:
p.requires_grad = True
#setting the param groups in the optimizer to point to fp32
#note these are not the weights used by the model
#the model uses the fp16 version that we added to fp16_group
self.fp32_groups.append(fp32_group)
param_group['params'] = self.fp32_groups[i]
# we may have a way of fusing dynamic scale. Do not support for now
if dynamic_loss_scale:
self.dynamic_loss_scale = True
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = 2.0
if dynamic_loss_args is None:
self.cur_scale = 1.0 * 2**16
self.scale_window = 1000
self.min_loss_scale = 0.25
else:
self.cur_scale = dynamic_loss_args[INITIAL_LOSS_SCALE]
self.scale_window = dynamic_loss_args[SCALE_WINDOW]
self.min_loss_scale = dynamic_loss_args[MIN_LOSS_SCALE]
else:
self.dynamic_loss_scale = False
self.cur_iter = 0
self.cur_scale = static_loss_scale
self.custom_loss_scaler = False
self.external_loss_scale = None
self.verbose = verbose
self.clip_grad = clip_grad
self.norm_type = 2
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR <= 4:
self.clip_grad_norm = torch.nn.utils.clip_grad_norm
else:
self.clip_grad_norm = torch.nn.utils.clip_grad_norm_
self.mpu = mpu
self.overflow = False
self.overflow_checker = CheckOverflow(self.fp16_groups, mpu=self.mpu, deepspeed=deepspeed)
self.initialize_optimizer_states()
def zero_grad(self, set_to_none=False):
"""
Zero FP16 parameter grads.
"""
# FP32 grad should never exist outside of the step function
# For speed, set model fp16 grad to None by default
for group in self.fp16_groups:
for p in group:
if set_to_none:
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def step_fused_lamb(self, closure=None):
"""
Not supporting closure.
"""
# First compute norm for all group so we know if there is overflow
grads_groups_flat = []
grads_groups = []
norm_groups = []
expert_norm_groups = []
for i, group in enumerate(self.fp16_groups):
grads = [
torch.zeros(p.size(), dtype=p.dtype, device=p.device) if p.grad is None else p.grad for p in group
]
grads_groups.append(grads)
grads_groups_flat.append(_flatten_dense_tensors(grads))
grads_for_norm, expert_grads_for_norm = split_params_grads_into_shared_and_expert_params(group)
norm_group_value = 0.0
if len(grads_for_norm) > 0:
norm_group_value = get_weight_norm(_flatten_dense_tensors(grads_for_norm), mpu=self.mpu)
norm_groups.append(norm_group_value)
expert_norm_group_value = 0.0
if len(expert_grads_for_norm) > 0:
expert_norm_group_value = get_weight_norm(_flatten_dense_tensors(expert_grads_for_norm), mpu=self.mpu)
expert_norm_groups.append(expert_norm_group_value)
self.overflow = self.overflow_checker.check_using_norm(norm_groups + expert_norm_groups)
prev_scale = self.cur_scale
self._update_scale(self.overflow)
if self.overflow:
if self.verbose:
logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss "
"scale: {}, reducing to {}".format(prev_scale, self.cur_scale))
return self.overflow
self._global_grad_norm = get_global_norm(norm_list=norm_groups)
combined_scale = self.unscale_and_clip_grads(self._global_grad_norm, apply_scale=False)
self.optimizer.step(grads=grads_groups, output_params=self.fp16_groups, scale=combined_scale)
for fp32_group, fp16_group in zip(self.fp32_groups, self.fp16_groups):
for idx, (fp32_param, fp16_param) in enumerate(zip(fp32_group, fp16_group)):
#remove the fp32 grad
fp32_param.grad = None
#copy data from fp32 to fp16
fp16_param.data.copy_(fp32_param.data)
return self.overflow
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
def get_lr(self):
"""Return the current learning rate."""
return self.optimizer.param_groups[0]["lr"]
def override_loss_scale(self, loss_scale):
if loss_scale != self.external_loss_scale:
logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}')
self.custom_loss_scaler = True
self.external_loss_scale = loss_scale
def step(self, closure=None):
"""
Not supporting closure.
"""
if self.fused_lamb_legacy:
return self.step_fused_lamb()
self.overflow = self.overflow_checker.check()
prev_scale = self.cur_scale
self._update_scale(self.overflow)
if self.overflow:
if self.verbose:
logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss "
"scale: {}, reducing to {}".format(prev_scale, self.cur_scale))
return self.overflow
norm_groups = []
for i, group in enumerate(self.fp16_groups):
grads_for_norm, _ = split_params_grads_into_shared_and_expert_params(group)
norm_group_value = 0.0
if len(grads_for_norm) > 0:
norm_group_value = get_weight_norm(grads_for_norm, mpu=self.mpu)
norm_groups.append(norm_group_value)
# copying gradients to fp32 to wor k with fp32 parameters
for fp32_param, fp16_param in zip(self.fp32_groups[i], self.fp16_groups[i]):
if fp16_param.grad is None:
fp32_param.grad = torch.zeros(fp16_param.size(), dtype=fp32_param.dtype, device=fp32_param.device)
else:
fp32_param.grad = fp16_param.grad.to(fp32_param.dtype)
self._global_grad_norm = get_global_norm(norm_list=norm_groups)
self.unscale_and_clip_grads(self._global_grad_norm)
self.optimizer.step()
for fp32_group, fp16_group in zip(self.fp32_groups, self.fp16_groups):
for idx, (fp32_param, fp16_param) in enumerate(zip(fp32_group, fp16_group)):
#remove the fp32 grad
fp32_param.grad = None
#copy data from fp32 to fp16
fp16_param.data.copy_(fp32_param.data)
return self.overflow
def unscale_and_clip_grads(self, total_norm, apply_scale=True):
# compute combined scale factor for this group
combined_scale = self.cur_scale
if self.clip_grad > 0.:
# norm is in fact norm*scale
clip = ((total_norm / self.cur_scale) + 1e-6) / self.clip_grad
if clip > 1:
combined_scale = clip * self.cur_scale
if apply_scale:
for group in self.fp32_groups:
for param in group:
if param.grad is not None:
param.grad.data.mul_(1. / combined_scale)
return combined_scale
def backward(self, loss, create_graph=False, retain_graph=False):
"""
:attr:`backward` performs the following steps:
1. fp32_loss = loss.float()
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
"""
if self.custom_loss_scaler:
scaled_loss = self.external_loss_scale * loss
scaled_loss.backward()
else:
scaled_loss = (loss.float()) * self.cur_scale
scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph)
def _update_scale(self, skip):
if self.dynamic_loss_scale:
prev_scale = self.cur_scale
if skip:
self.cur_scale = max(self.cur_scale / self.scale_factor, self.min_loss_scale)
self.last_overflow_iter = self.cur_iter
if self.verbose:
logger.info("Grad overflow on iteration: %s", self.cur_iter)
logger.info(f"Reducing dynamic loss scale from {prev_scale} to {self.cur_scale}")
else:
# Ensure self.scale_window updates since last overflow
stable_interval = (self.cur_iter - self.last_overflow_iter) - 1
if (stable_interval > 0) and (stable_interval % self.scale_window == 0):
self.cur_scale *= self.scale_factor
if self.verbose:
logger.info(f"No Grad overflow for {self.scale_window} iterations")
logger.info(f"Increasing dynamic loss scale from {prev_scale} to {self.cur_scale}")
else:
if skip:
logger.info("Grad overflow on iteration %s", self.cur_iter)
logger.info("Using static loss scale of %s", self.cur_scale)
self.cur_iter += 1
return
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
if self.custom_loss_scaler:
return self.external_loss_scale
else:
return self.cur_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['cur_scale'] = self.cur_scale
state_dict['cur_iter'] = self.cur_iter
if state_dict['dynamic_loss_scale']:
state_dict['last_overflow_iter'] = self.last_overflow_iter
state_dict['scale_factor'] = self.scale_factor
state_dict['scale_window'] = self.scale_window
state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict()
state_dict['fp32_groups'] = self.fp32_groups
return state_dict
# Refresh fp32 master params from fp16 copies
def refresh_fp32_params(self):
for current_group, saved_group in zip(self.fp32_groups, self.fp16_groups):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
def load_state_dict(self, state_dict, load_optimizer_states=True):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.cur_scale = state_dict['cur_scale']
self.cur_iter = state_dict['cur_iter']
if state_dict['dynamic_loss_scale']:
self.last_overflow_iter = state_dict['last_overflow_iter']
self.scale_factor = state_dict['scale_factor']
self.scale_window = state_dict['scale_window']
if load_optimizer_states:
self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current_group, saved_group in zip(self.fp32_groups, state_dict['fp32_groups']):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
def __repr__(self):
return repr(self.optimizer)
def initialize_optimizer_states(self):
for i, group in enumerate(self.fp16_groups):
for param in group:
param.grad = torch.zeros(param.size(),
dtype=param.dtype,
device=get_accelerator().current_device_name())
for i, group in enumerate(self.fp32_groups):
for param in group:
param.grad = torch.zeros(param.size(),
dtype=param.dtype,
device=get_accelerator().current_device_name())
self.optimizer.step()
for i, group in enumerate(self.fp16_groups):
for param in group:
param.grad = None
for i, group in enumerate(self.fp32_groups):
for param in group:
param.grad = None | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/fp16/unfused_optimizer.py | unfused_optimizer.py |
# DeepSpeed Team
"""
Copyright NVIDIA/apex
This file is adapted from FP16_Optimizer in NVIDIA/apex
"""
import torch
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from deepspeed.runtime import DeepSpeedOptimizer
from deepspeed.runtime.utils import get_global_norm, get_grad_norm, CheckOverflow, get_weight_norm
from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE
from deepspeed.utils import groups, logger, log_dist
from deepspeed import comm as dist
from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, CLIP_GRAD
from deepspeed.accelerator import get_accelerator
class FP16_Optimizer(DeepSpeedOptimizer):
"""
FP16 Optimizer for training fp16 models. Handles loss scaling.
For usage example please see, TODO: DeepSpeed V2 Tutorial
"""
def __init__(self,
init_optimizer,
deepspeed=None,
static_loss_scale=1.0,
dynamic_loss_scale=False,
initial_dynamic_scale=2**32,
dynamic_loss_args=None,
verbose=True,
mpu=None,
clip_grad=0.0,
fused_adam_legacy=False,
has_moe_layers=False,
timers=None):
self.fused_adam_legacy = fused_adam_legacy
self.timers = timers
self.deepspeed = deepspeed
self.has_moe_layers = has_moe_layers
self.using_pipeline = self.deepspeed.pipeline_parallelism
if not get_accelerator().is_available():
raise SystemError("Cannot use fp16 without accelerator.")
self.optimizer = init_optimizer
# param flattened by groups
self.fp16_groups = []
self.fp16_groups_flat = []
self.fp32_groups_flat = []
self._global_grad_norm = 0.
# loop to deal with groups
for i, param_group in enumerate(self.optimizer.param_groups):
# push this group to list before modify
self.fp16_groups.append(param_group['params'])
# init fp16 weight buffer, flattened
self.fp16_groups_flat.append(_flatten_dense_tensors([p.clone().detach() for p in self.fp16_groups[i]]))
# set model fp16 weight to slices of flattened buffer
updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i])
for p, q in zip(self.fp16_groups[i], updated_params):
p.data = q.data
# init master weight, flattened
self.fp32_groups_flat.append(self.fp16_groups_flat[i].clone().float().detach())
# modify optimizer of have flat master weight
self.fp32_groups_flat[i].requires_grad = True # keep this in case internal optimizer uses it
param_group['params'] = [self.fp32_groups_flat[i]]
# we may have a way of fusing dynamic scale. Do not support for now
if dynamic_loss_scale:
self.dynamic_loss_scale = True
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = 2
if dynamic_loss_args is None:
self.cur_scale = initial_dynamic_scale
self.scale_window = 1000
self.min_loss_scale = 1
else:
self.cur_scale = dynamic_loss_args[INITIAL_LOSS_SCALE]
self.scale_window = dynamic_loss_args[SCALE_WINDOW]
self.min_loss_scale = dynamic_loss_args[MIN_LOSS_SCALE]
else:
self.dynamic_loss_scale = False
self.cur_iter = 0
self.cur_scale = static_loss_scale
self.verbose = verbose
self.custom_loss_scaler = False
self.external_loss_scale = None
self.clip_grad = clip_grad
self.norm_type = 2
self.step_count = 0
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR <= 4:
self.clip_grad_norm = torch.nn.utils.clip_grad_norm
else:
self.clip_grad_norm = torch.nn.utils.clip_grad_norm_
#model parallel object
self.mpu = mpu
self.overflow = False
self.overflow_checker = CheckOverflow(self.fp16_groups, mpu=self.mpu, deepspeed=deepspeed)
self.initialize_optimizer_states()
def initialize_optimizer_states(self):
for i, group in enumerate(self.fp16_groups):
self.fp32_groups_flat[i].grad = torch.zeros(self.fp32_groups_flat[i].size(),
device=self.fp32_groups_flat[i].device)
self.optimizer.step()
for i, group in enumerate(self.fp16_groups):
self.fp32_groups_flat[i].grad = None
return
def zero_grad(self, set_to_none=False):
"""
Zero FP16 parameter grads.
"""
# For speed, set model fp16 grad to None by default
for group in self.fp16_groups:
for p in group:
if set_to_none:
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def step_fused_adam(self, closure=None):
"""
Not supporting closure.
"""
# First compute norm for all group so we know if there is overflow
grads_groups_flat = []
norm_groups = []
for i, group in enumerate(self.fp16_groups):
grads_groups_flat.append(
_flatten_dense_tensors([
torch.zeros(p.size(), dtype=p.dtype, device=p.device) if p.grad is None else p.grad for p in group
]))
norm_groups.append(get_weight_norm(grads_groups_flat[i], mpu=self.mpu))
self.overflow = self.overflow_checker.check_using_norm(norm_groups)
prev_scale = self.cur_scale
self._update_scale(self.overflow)
if self.overflow:
if self.verbose:
logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss "
"scale: {}, reducing to {}".format(prev_scale, self.cur_scale))
return self.overflow
scaled_grad_norm = get_global_norm(norm_list=norm_groups)
combined_scale = self.unscale_and_clip_grads(grads_groups_flat, scaled_grad_norm, apply_scale=False)
# Stash unscaled gradient norm
self._global_grad_norm = scaled_grad_norm / self.cur_scale
# norm is in fact norm*cur_scale
self.optimizer.step(grads=[[g] for g in grads_groups_flat],
output_params=[[p] for p in self.fp16_groups_flat],
scale=combined_scale,
grad_norms=norm_groups)
# TODO: we probably don't need this? just to be safe
for i in range(len(norm_groups)):
updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i])
for p, q in zip(self.fp16_groups[i], updated_params):
p.data = q.data
return self.overflow
def start_timers(self, name_list):
if self.timers is not None:
for name in name_list:
self.timers(name).start()
def stop_timers(self, name_list):
if self.timers is not None:
for name in name_list:
self.timers(name).stop()
def log_timers(self, name_list):
if self.timers is not None:
self.timers.log(name_list)
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
def get_lr(self):
"""Return the current learning rate."""
return self.optimizer.param_groups[0]["lr"]
def override_loss_scale(self, loss_scale):
if loss_scale != self.external_loss_scale:
logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}')
self.custom_loss_scaler = True
self.external_loss_scale = loss_scale
def step(self, closure=None):
"""
Not supporting closure.
"""
if self.fused_adam_legacy:
return self.step_fused_adam()
COMPUTE_NORM = "compute_norm"
OVERFLOW_CHECK = 'overflow_check'
OVERFLOW_TIMERS = [COMPUTE_NORM, OVERFLOW_CHECK]
UNSCALE_AND_CLIP = 'unscale_and_clip'
BASIC_STEP = 'basic_step'
UPDATE_FP16 = 'update_fp16'
STEP_TIMERS = OVERFLOW_TIMERS + [UNSCALE_AND_CLIP, BASIC_STEP, UPDATE_FP16]
# First determine if there is overflow.
self.start_timers([OVERFLOW_CHECK])
fp16_params = []
for i, group in enumerate(self.fp16_groups):
fp16_params.extend([p for p in group if p.grad is not None])
self.overflow = self.overflow_checker.has_overflow(fp16_params)
self.stop_timers([OVERFLOW_CHECK])
prev_scale = self.cur_scale
self._update_scale(self.overflow)
if self.overflow:
if self.verbose:
log_dist(
"Overflow detected. Skipping step. Attempted loss "
f"scale: {prev_scale}, reducing to {self.cur_scale}",
ranks=[0])
# Clear gradients
for i, group in enumerate(self.fp16_groups):
for p in group:
p.grad = None
self.log_timers(OVERFLOW_TIMERS)
return self.overflow
grads_groups_flat = []
for i, group in enumerate(self.fp16_groups):
data_type = self.fp32_groups_flat[i].dtype
grads_groups_flat.append(
_flatten_dense_tensors([
torch.zeros(p.size(), dtype=data_type, device=p.device) if p.grad is None else p.grad.to(data_type)
for p in group
]))
for p in group:
p.grad = None
self.fp32_groups_flat[i].grad = grads_groups_flat[i]
self.start_timers([COMPUTE_NORM])
all_groups_norm = get_grad_norm(self.fp32_groups_flat, mpu=self.mpu)
self.stop_timers([COMPUTE_NORM])
if self.has_moe_layers:
all_groups_norm = self._get_norm_with_moe_layers(all_groups_norm)
scaled_global_grad_norm = get_global_norm(norm_list=[all_groups_norm])
# Stash unscaled gradient norm
self._global_grad_norm = scaled_global_grad_norm / self.cur_scale
self.start_timers([UNSCALE_AND_CLIP])
self.unscale_and_clip_grads(grads_groups_flat, scaled_global_grad_norm)
self.stop_timers([UNSCALE_AND_CLIP])
self.start_timers([BASIC_STEP])
self.optimizer.step()
self.stop_timers([BASIC_STEP])
#get rid of the fp32 gradients. Not needed anymore
for group in self.fp32_groups_flat:
group.grad = None
self.start_timers([UPDATE_FP16])
for i in range(len(self.fp16_groups)):
updated_params = _unflatten_dense_tensors(self.fp32_groups_flat[i], self.fp16_groups[i])
for p, q in zip(self.fp16_groups[i], updated_params):
p.data.copy_(q.data)
self.stop_timers([UPDATE_FP16])
self.log_timers(STEP_TIMERS)
self.step_count += 1
return self.overflow
def _get_norm_with_moe_layers(self, all_groups_norm):
#all_groups_norm_old = all_groups_norm
# Need to allreduce (avg) the norms across different ranks because moe params will not be synced during allreduce
if self.using_pipeline:
pg = self.deepspeed.mpu.get_data_parallel_group()
else:
pg = groups._get_data_parallel_group()
scaled_norm = all_groups_norm * 1.0 / float(dist.get_world_size(group=pg))
scaled_norm_tensor = torch.tensor(scaled_norm, device=self.fp32_groups_flat[0].device, dtype=torch.float)
dist.all_reduce(scaled_norm_tensor, group=pg)
all_groups_norm = scaled_norm_tensor.item()
#print(f"old = {all_groups_norm_old} and new = {all_groups_norm} at rank: {deepspeed.comm.get_rank()}")
return all_groups_norm
def unscale_and_clip_grads(self, grad_groups_flat, total_norm, apply_scale=True):
# compute combined scale factor for this group
combined_scale = self.cur_scale
if self.clip_grad > 0.:
# norm is in fact norm*scale
clip = ((total_norm / self.cur_scale) + 1e-6) / self.clip_grad
if clip > 1:
combined_scale = clip * self.cur_scale
if apply_scale:
for grad in grad_groups_flat:
grad.data.mul_(1. / combined_scale)
return combined_scale
def backward(self, loss, create_graph=False, retain_graph=False):
"""
:attr:`backward` performs the following steps:
1. fp32_loss = loss.float()
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
"""
if self.custom_loss_scaler:
scaled_loss = self.external_loss_scale * loss
scaled_loss.backward()
else:
scaled_loss = (loss.float()) * self.cur_scale
scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph)
def _update_scale(self, skip):
if self.dynamic_loss_scale:
prev_scale = self.cur_scale
if skip:
self.cur_scale = max(self.cur_scale / self.scale_factor, self.min_loss_scale)
self.last_overflow_iter = self.cur_iter
if self.verbose:
logger.info(f"\nGrad overflow on iteration {self.cur_iter}")
logger.info(f"Reducing dynamic loss scale from {prev_scale} to {self.cur_scale}")
else:
# Ensure self.scale_window updates since last overflow
stable_interval = (self.cur_iter - self.last_overflow_iter) - 1
if (stable_interval > 0) and (stable_interval % self.scale_window == 0):
self.cur_scale *= self.scale_factor
if self.verbose:
logger.info(f"No Grad overflow for {self.scale_window} iterations")
logger.info(f"Increasing dynamic loss scale from {prev_scale} to {self.cur_scale}")
else:
if skip:
logger.info("Grad overflow on iteration: %s", self.cur_iter)
logger.info("Using static loss scale of: %s", self.cur_scale)
self.cur_iter += 1
return
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['cur_scale'] = self.cur_scale
state_dict['cur_iter'] = self.cur_iter
if state_dict['dynamic_loss_scale']:
state_dict['last_overflow_iter'] = self.last_overflow_iter
state_dict['scale_factor'] = self.scale_factor
state_dict['scale_window'] = self.scale_window
state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict()
state_dict['fp32_groups_flat'] = self.fp32_groups_flat
state_dict[CLIP_GRAD] = self.clip_grad
return state_dict
# Refresh fp32 master params from fp16 copies
def refresh_fp32_params(self):
for current, saved in zip(self.fp32_groups_flat, self.fp16_groups_flat):
current.data.copy_(saved.data)
def load_state_dict(self, state_dict, load_optimizer_states=True):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.cur_scale = state_dict['cur_scale']
self.cur_iter = state_dict['cur_iter']
if state_dict['dynamic_loss_scale']:
self.last_overflow_iter = state_dict['last_overflow_iter']
self.scale_factor = state_dict['scale_factor']
self.scale_window = state_dict['scale_window']
if load_optimizer_states:
self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT])
self.clip_grad = state_dict[CLIP_GRAD]
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current, saved in zip(self.fp32_groups_flat, state_dict['fp32_groups_flat']):
current.data.copy_(saved.data)
def __repr__(self):
return repr(self.optimizer)
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
if self.custom_loss_scaler:
return self.external_loss_scale
else:
return self.cur_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/fp16/fused_optimizer.py | fused_optimizer.py |
# DeepSpeed Team
"""
Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Taken and modified for DeepSpeed from:
https://github.com/NVIDIA/Megatron-LM/blob/master/fp16/loss_scaler.py
Commit: 93ab4bea59dc5cbf97c079d313741866af4deac9
"""
import torch
from deepspeed import comm as dist
from deepspeed.utils import logger
INITIAL_LOSS_SCALE = 'init_scale'
SCALE_WINDOW = 'scale_window'
DELAYED_SHIFT = 'delayed_shift'
MIN_LOSS_SCALE = 'min_scale'
# item() is a recent addition, so this helps with backward compatibility.
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
return t[0]
class LossScalerBase:
"""LossScalarBase
Base class for a loss scaler
"""
def __init__(self, cur_scale):
self.cur_scale = cur_scale
self.dynamic = False
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def update_scale(self, overflow):
pass
def backward(self, loss, retain_graph=False):
scaled_loss = loss * self.loss_scale
scaled_loss.backward(retain_graph=retain_graph)
# print(f'LossScalerBackward: {scaled_loss=}')
class LossScaler(LossScalerBase):
"""
Class that manages a static loss scale. This class is intended to interact with
:class:`FP16_Optimizer`, and should not be directly manipulated by the user.
Use of :class:`LossScaler` is enabled via the ``static_loss_scale`` argument to
:class:`FP16_Optimizer`'s constructor.
Args:
scale (float, optional, default=1.0): The loss scale.
"""
def __init__(self, scale=1):
super(LossScaler, self).__init__(scale)
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
return False
class DynamicLossScaler(LossScalerBase):
"""
Class that manages dynamic loss scaling. It is recommended to use :class:`DynamicLossScaler`
indirectly, by supplying ``dynamic_loss_scale=True`` to the constructor of
:class:`FP16_Optimizer`. However, it's important to understand how :class:`DynamicLossScaler`
operates, because the default options can be changed using the
the ``dynamic_loss_args`` argument to :class:`FP16_Optimizer`'s constructor.
Loss scaling is designed to combat the problem of underflowing gradients encountered at long
times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss
scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are
encountered, :class:`DynamicLossScaler` informs :class:`FP16_Optimizer` that an overflow has
occurred.
:class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch,
and :class:`DynamicLossScaler` adjusts the loss scale to a lower value.
If a certain number of iterations occur without overflowing gradients detected,
:class:`DynamicLossScaler` increases the loss scale once more.
In this way :class:`DynamicLossScaler` attempts to "ride the edge" of
always using the highest loss scale possible without incurring overflow.
Args:
init_scale (float, optional, default=2**32): Initial loss scale attempted by :class:`DynamicLossScaler.`
scale_factor (float, optional, default=2.0): Factor used when adjusting the loss scale. If an overflow is encountered, the loss scale is readjusted to loss scale/``scale_factor``. If ``scale_window`` consecutive iterations take place without an overflow, the loss scale is readjusted to loss_scale*``scale_factor``.
scale_window (int, optional, default=1000): Number of consecutive iterations without an overflow to wait before increasing the loss scale.
"""
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000,
min_scale=1,
delayed_shift=1,
consecutive_hysteresis=False,
raise_error_at_min_scale=True,
dtype=torch.half):
super(DynamicLossScaler, self).__init__(init_scale)
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
self.min_scale = min_scale
self.delayed_shift = delayed_shift
self.cur_hysteresis = delayed_shift
self.consecutive_hysteresis = consecutive_hysteresis
self.raise_error_at_min_scale = raise_error_at_min_scale
self.dynamic = True
self.dtype = dtype
# `params` is a list / generator of torch.Variable
def has_overflow_serial(self, params):
for p in params:
if p.grad is not None and self._has_inf_or_nan(p.grad.data):
return True
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum in [float('inf'), -float('inf')] or cpu_sum != cpu_sum:
return True
return False
# `overflow` is boolean indicating whether the gradient overflowed
def update_scale(self, overflow):
if overflow:
# self.cur_scale /= self.scale_factor
if self.delayed_shift == 1 or self.cur_hysteresis == 1:
if (self.cur_scale == self.min_scale) and self.raise_error_at_min_scale:
raise Exception(
"Current loss scale already at minimum - cannot decrease scale anymore. Exiting run.")
else:
next_scale = max(self.cur_scale / self.scale_factor, self.min_scale)
if dist.get_rank() == 0:
overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step."
if self.dtype == torch.half:
overflow_msg += f" Attempted loss scale: {int(self.cur_scale)}, reducing to {int(next_scale)}"
logger.info(overflow_msg)
self.cur_scale = next_scale
else:
if dist.get_rank() == 0:
overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step."
if self.dtype == torch.half:
overflow_msg += f" Attempted loss scale: {int(self.cur_scale)}, but hysteresis is {self.cur_hysteresis}. Reducing hysteresis to {self.cur_hysteresis-1}"
logger.info(overflow_msg)
self.cur_hysteresis -= 1
self.last_overflow_iter = self.cur_iter
else:
if self.consecutive_hysteresis:
self.cur_hysteresis = self.delayed_shift
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
if not self.consecutive_hysteresis:
self.cur_hysteresis = self.delayed_shift
self.cur_scale *= self.scale_factor
self.cur_iter += 1
# Although loss scaling is only defined for fp16, yet for backwards compatibility
# we still create a scaler for other dtypes (fp32, bf16) which does not perform any scaling.
def CreateLossScaler(dtype, static_loss_scale, dynamic_scaling, dynamic_loss_args):
if dtype == torch.half and dynamic_scaling:
if dynamic_loss_args is None:
return DynamicLossScaler(dtype=dtype)
return DynamicLossScaler(dtype=dtype, **dynamic_loss_args)
loss_scale_value = static_loss_scale if dtype == torch.half else 1.0
return LossScaler(scale=loss_scale_value)
##############################################################
# Example usage below here -- assuming it's in a separate file
##############################################################
"""
TO-DO separate out into an example.
if __name__ == "__main__":
import torch
from torch.autograd import Variable
from dynamic_loss_scaler import DynamicLossScaler
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in), requires_grad=False)
y = Variable(torch.randn(N, D_out), requires_grad=False)
w1 = Variable(torch.randn(D_in, H), requires_grad=True)
w2 = Variable(torch.randn(H, D_out), requires_grad=True)
parameters = [w1, w2]
learning_rate = 1e-6
optimizer = torch.optim.SGD(parameters, lr=learning_rate)
loss_scaler = DynamicLossScaler()
for t in range(500):
y_pred = x.mm(w1).clamp(min=0).mm(w2)
loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale
print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale))
print('Iter {} scaled loss: {}'.format(t, loss.data[0]))
print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale))
# Run backprop
optimizer.zero_grad()
loss.backward()
# Check for overflow
has_overflow = DynamicLossScaler.has_overflow(parameters)
# If no overflow, unscale grad and update as usual
if not has_overflow:
for param in parameters:
param.grad.data.mul_(1. / loss_scaler.loss_scale)
optimizer.step()
# Otherwise, don't do anything -- ie, skip iteration
else:
print('fp16 dynamic loss scale overflow!')
# Update loss scale for next iteration
loss_scaler.update_scale(has_overflow)
""" | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/fp16/loss_scaler.py | loss_scaler.py |
# DeepSpeed Team
import types
import torch
import numpy as np
from deepspeed.accelerator import get_accelerator
from deepspeed import comm as dist
class OnebitAdam(torch.optim.Optimizer):
"""Implements the 1-bit Adam algorithm. Currently GPU-only.
For usage example please see https://www.deepspeed.ai/tutorials/onebit-adam/
For technical details please read https://arxiv.org/abs/2102.02888
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
freeze_step (int, optional): Number of steps for warmup (uncompressed)
stage before we start using compressed communication. (default 100000)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in 1-bit Adam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
cuda_aware (boolean, required): Set True if the underlying MPI implementation
supports CUDA-Aware communication. (default: False)
comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl')
.. _Adam\\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
deepspeed=None,
lr=1e-3,
freeze_step=100000,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.,
max_grad_norm=0.,
amsgrad=False,
cuda_aware=False,
comm_backend_name='nccl'):
if amsgrad:
raise RuntimeError('1-bit Adam does not support the AMSGrad variant.')
defaults = dict(lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(OnebitAdam, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
assert (dist.is_initialized())
self.comm_time = 0.0
self.step_time = 0.0
self.ave_step = 1
self.bk_time = 0.0
self.deepspeed = deepspeed
self.adam_freeze_key = False
self.initialize = False
self.freeze_step = freeze_step
self.cuda_aware = cuda_aware
self.using_pipeline = False
self.comm_backend_name = comm_backend_name
# Empty initializer. Set handle based on the comm backend as follows.
self.comm_backend_handle = None
if self.comm_backend_name == 'nccl':
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
assert (
(TORCH_MAJOR == 1 and TORCH_MINOR >= 8) or TORCH_MAJOR >= 2
), "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend"
assert dist.is_initialized() == True, "Please initialize the torch distributed backend."
from deepspeed.runtime.comm.nccl import NcclBackend
self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
self.comm_backend_handle = NcclBackend(self.deepspeed.mpu)
elif self.comm_backend_name == 'mpi':
from deepspeed.runtime.comm.mpi import MpiBackend
self.comm_backend_handle = MpiBackend(cuda_aware)
self.size = self.comm_backend_handle.size
self.divider = int(self.size * 8 / np.gcd(self.size, 8))
def step(self, closure=None, grads=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
gather_time = 0
allgather_time = 0
all_time = 0
if self.adam_freeze_key is False:
v_diff_buffer = 0.0
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
for group, grads_this_group in zip(self.param_groups, grads_group):
if grads_this_group is None:
grads_this_group = [None] * len(group['params'])
bias_correction = 1 if group['bias_correction'] else 0
for p, grad in zip(group['params'], grads_this_group):
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('1-bit Adam does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if not self.initialize or (self.adam_freeze_key and 'worker_error' not in state.keys()):
state['tensor_size'] = torch.numel(p.data)
state['corrected_tensor_size'] = state['tensor_size']
if state['tensor_size'] % (self.size * self.divider) != 0:
state['corrected_tensor_size'] += ((self.size * self.divider) - (state['tensor_size'] %
(self.size * self.divider)))
state['server_chunk_size'] = state['corrected_tensor_size'] // self.size
get_accelerator().empty_cache()
state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device)
state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device)
get_accelerator().empty_cache()
self.adam_freeze_key = True
if not self.initialize and dist.get_rank() == 0:
print("Cupy Buffers Initialized Successfully.")
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if self.adam_freeze_key is False:
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
grad = None
if self.initialize:
update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
else:
if 'non_freeze' in group.keys() and group['non_freeze'] is True:
dist.all_reduce(grad)
grad.mul_(1 / dist.get_world_size())
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
grad = None
else:
if self.initialize is True:
exp_avg.mul_(beta1).add_(1 - beta1, grad)
grad = None
if self.size > 1:
exp_avg.set_(
self.comm_backend_handle.compressed_allreduce(exp_avg, state['worker_error'],
state['server_error'],
self.deepspeed.local_rank))
# Because 1-bit compression cannot represent exact zero, it is required to
# provide a momentum mask for those params that have constant exact zeros in their
# momentums, otherwise the compression error would keep accumulating.
# For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight
# always have exact zeros in its momentum for row 129 to 512, because it only
# learns up to seq length 128 while the model supports up to 512 seq length.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
if 'exp_avg_mask' in group:
if exp_avg.device != group['exp_avg_mask'].device:
group['exp_avg_mask'] = group['exp_avg_mask'].to(device=exp_avg.device)
exp_avg.mul_(group['exp_avg_mask'])
if self.initialize:
update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
if self.initialize:
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
with torch.no_grad():
p.add_(-group['lr'] * update)
if not self.initialize:
print('Pop out errors', flush=True)
state.pop('worker_error')
state.pop('server_error')
if not self.initialize:
self.adam_freeze_key = False
self.initialize = True
print(f"Finished the initialization step at rank {dist.get_rank()}")
return loss
if self.adam_freeze_key is False:
if state['step'] >= self.freeze_step:
print('OnebitAdam - starting compressed communication')
self.adam_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
return loss
def load_state_dict(self, state_dict):
"""
Overrides load_state_dict() to add special handling when loading checkpoints
"""
# Because at different stage exp_avg_mask may change (e.g.,
# BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask
# in checkpoints but always use the one user provided in training script.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
# Thus here we keep the exp_avg_mask unchanged when loading checkpoint
for i, group in enumerate(self.param_groups):
if 'exp_avg_mask' in group:
state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask']
elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]:
state_dict['param_groups'][i].pop('exp_avg_mask')
super().load_state_dict(state_dict)
if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step:
if dist.get_rank() == 0:
print("Checkpoint loaded and OnebitAdam warmup stage starts/continues.")
if self.adam_freeze_key is True:
self.adam_freeze_key = False
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = True
else:
self.deepspeed.enable_backward_allreduce = True
else:
if dist.get_rank() == 0:
print("Checkpoint loaded and OnebitAdam compression stage starts/continues.")
if self.adam_freeze_key is False:
self.adam_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
# We reset the compression errors when loading checkpoints for 3 reasons:
# 1) The worker and server error at each GPU are distinct, so in current implementation
# only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors.
# If we want to save them correctly we need O(num_gpu*model_size) memory in order to
# gather all the error, which is a very large memory requirement. It's possible to save
# them in a distributed way, but it will make the checkpoint saving/loading much more complicated.
# 2) Even if we are able to save the compression errors correctly, you need to have the
# exact same number of GPUs in order to load them correctly.
# 3) We verified on BERT pre-training that occasionally resetting the compression error
# at checkpoint loading does not affect the convergence.
# However, please avoid frequent checkpoint loading which could break the error
# compensation mechanism thus affect the convergence.
for group in self.param_groups:
for p in group['params']:
if 'worker_error' in self.state[p]:
self.state[p].pop('worker_error')
if 'server_error' in self.state[p]:
self.state[p].pop('server_error') | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/fp16/onebit/adam.py | adam.py |
# DeepSpeed Team
import types
import torch
import numpy as np
from deepspeed.accelerator import get_accelerator
from deepspeed import comm as dist
class ZeroOneAdam(torch.optim.Optimizer):
"""Implements the 0/1 Adam algorithm. Currently GPU-only.
For usage example please see https://www.deepspeed.ai/tutorials/zero-one-adam/
For technical details please read https://arxiv.org/abs/2202.06009
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
var_freeze_step (int, optional): The latest step to update the variance,
using the notation from https://arxiv.org/abs/2202.06009, it denotes the
max{i|i in T_v}. Note that this is different from the freeze step from the
1-bit Adam. The var_freeze_step is usually the end of the learning rate warmup
and thus does not require tuning. (default: 100000)
var_update_scaler (int, optional): The interval to update the variance. Note that
the update policy for variance follows an exponential rule, where var_update_scaler
denotes the kappa in the 0/1 Adam paper. (default: 16)
local_step_scaler (int, optional): The interval to scale the local steps interval
according to the learning rate policy. (default: 32678)
local_step_clipper (int, optional): The largest interval for local steps with
learning rate policy. This corresponds to the variable H in the 0/1 Adam paper.
(default: 16)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in 0/1 Adam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
cuda_aware (boolean, required): Set True if the underlying MPI implementation
supports CUDA-Aware communication. (default: False)
comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl')
.. _Adam\\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
deepspeed=None,
lr=1e-3,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.,
max_grad_norm=0.,
var_freeze_step=100000,
var_update_scaler=16,
local_step_scaler=32678,
local_step_clipper=16,
amsgrad=False,
cuda_aware=False,
comm_backend_name='nccl'):
if amsgrad:
raise RuntimeError('0/1 Adam does not support the AMSGrad variant.')
defaults = dict(lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(ZeroOneAdam, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
assert (dist.is_initialized())
self.deepspeed = deepspeed
self.initialize = False
self.cuda_aware = cuda_aware
self.using_pipeline = False
self.var_freeze_step = var_freeze_step
self.var_update_scaler = var_update_scaler
self.local_step_scaler = local_step_scaler
self.local_step_clipper = local_step_clipper
self.freeze_key = False
self.reinitial_error_buffer = False
self.comm_backend_name = comm_backend_name
# Empty initializer. Set handle based on the comm backend as follows.
self.comm_backend_handle = None
if self.comm_backend_name == 'nccl':
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
assert (
(TORCH_MAJOR == 1 and TORCH_MINOR >= 8) or TORCH_MAJOR >= 2
), "Please use torch 1.8 or greater to enable NCCL backend in 0/1 Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend"
assert dist.is_initialized() == True, "Please initialize the torch distributed backend."
from deepspeed.runtime.comm.nccl import NcclBackend
self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
self.comm_backend_handle = NcclBackend(self.deepspeed.mpu)
elif self.comm_backend_name == 'mpi':
from deepspeed.runtime.comm.mpi import MpiBackend
self.comm_backend_handle = MpiBackend(cuda_aware)
self.size = self.comm_backend_handle.size
self.divider = int(self.size * 8 / np.gcd(self.size, 8))
def step(self, closure=None, grads=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients. (default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
for group, grads_this_group in zip(self.param_groups, grads_group):
if grads_this_group is None:
grads_this_group = [None] * len(group['params'])
bias_correction = 1 if group['bias_correction'] else 0
for p, grad in zip(group['params'], grads_this_group):
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('0/1 Adam does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if not self.initialize or 'worker_error' not in state.keys():
# Some scalars to help scale the variance update/local step policies
state['var_interval'] = 1
state['var_counter'] = 0
state['local_step_interval'] = 1
state['local_step_counter'] = 0
state['lrs'] = 0
state['tensor_size'] = torch.numel(p.data)
state['corrected_tensor_size'] = state['tensor_size']
if state['tensor_size'] % (self.size * self.divider) != 0:
state['corrected_tensor_size'] += ((self.size * self.divider) - (state['tensor_size'] %
(self.size * self.divider)))
state['server_chunk_size'] = state['corrected_tensor_size'] // self.size
get_accelerator().empty_cache()
state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device)
state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device)
# Accumulation of momentum, i.e., the u variable in the 0/1 Adam paper
state['momentum_accumulator'] = torch.zeros_like(p.data)
get_accelerator().empty_cache()
# self.freeze_key = True
if not self.initialize and dist.get_rank() == 0:
print("Cupy Buffers Initialized Successfully.")
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
comm_buffer = state['momentum_accumulator']
beta1, beta2 = group['betas']
state['step'] += 1
if self.initialize:
if self.freeze_key is False:
if state['step'] % state['var_interval'] == 0:
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
else:
if self.size > 1:
with torch.no_grad():
grad_onebit = self.comm_backend_handle.compressed_allreduce(
grad, state['worker_error'], state['server_error'], self.deepspeed.local_rank)
if 'exp_avg_mask' in group:
if grad_onebit.device != group['exp_avg_mask'].device:
group['exp_avg_mask'] = group['exp_avg_mask'].to(device=grad_onebit.device)
grad_onebit.mul_(group['exp_avg_mask'])
exp_avg.mul_(beta1).add_(1 - beta1, grad_onebit)
else:
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
state['lrs'] += group['lr']
grad = None
if not self.initialize:
if self.size > 1:
comm_buffer.set_(
self.comm_backend_handle.compressed_allreduce(comm_buffer, state['worker_error'],
state['server_error'],
self.deepspeed.local_rank))
if 'exp_avg_mask' in group:
if comm_buffer.device != group['exp_avg_mask'].device:
group['exp_avg_mask'] = group['exp_avg_mask'].to(device=comm_buffer.device)
comm_buffer.mul_(group['exp_avg_mask'])
if self.initialize:
update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
with torch.no_grad():
p.data.add_(-group['lr'] * update)
if self.freeze_key is True:
comm_buffer.add_(-group['lr'] * update)
if state['step'] % state['local_step_interval'] == 0 and self.freeze_key:
with torch.no_grad():
p.data.add_(-1 * comm_buffer)
comm_buffer.mul_(exp_avg_sq.sqrt() + group['eps'])
if self.size > 1:
comm_buffer.copy_(
self.comm_backend_handle.compressed_allreduce(comm_buffer, state['worker_error'],
state['server_error'],
self.deepspeed.local_rank))
if 'exp_avg_mask' in group:
if comm_buffer.device != group['exp_avg_mask'].device:
group['exp_avg_mask'] = group['exp_avg_mask'].to(device=comm_buffer.device)
comm_buffer.mul_(group['exp_avg_mask'])
exp_avg.zero_().add_(comm_buffer / state['lrs'], alpha=-1)
p.data.add_(comm_buffer / (exp_avg_sq.sqrt() + group['eps']))
comm_buffer.zero_()
state['lrs'] = 0
# According to 0/1 Adam theory, a fixed variance would allow more accurate estimation of momentum
# However, in practice, we can also disable the manual freezing of variance, since the interval of
# updating variance will increase exponentially, so that it has negligible effect on the estimation.
if self.freeze_key is False:
if state['step'] % state['var_interval'] == 0:
state['var_counter'] += 1
if state['var_counter'] == self.var_update_scaler:
state['var_counter'] = 0
state['var_interval'] *= 2
if (state['step'] + 1) % state['var_interval'] == 0:
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = True
else:
self.deepspeed.enable_backward_allreduce = True
else:
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
else:
state['local_step_counter'] += 1
if state['local_step_counter'] == self.local_step_scaler:
state['local_step_counter'] = 0
state['local_step_interval'] = min(self.local_step_clipper,
state['local_step_interval'] * 2)
if not self.initialize:
print('Pop out errors', flush=True)
self.freeze_key = False
state.pop('worker_error')
state.pop('server_error')
if not self.initialize:
self.initialize = True
print(f"Finished the initialization step at rank {dist.get_rank()}")
return loss
if self.state[self.param_groups[0]['params'][0]]['step'] > self.var_freeze_step:
self.freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
if self.freeze_key is True and self.reinitial_error_buffer is False:
# We need to reinitialize the error buffers when local step > 1 since
# the errors will be logged for different metrics (gradient vs. accumulated momentum).
for group in self.param_groups:
for p in group['params']:
self.state[p]['worker_error'].zero_()
self.state[p]['server_error'].zero_()
self.reinitial_error_buffer = True
return loss
def load_state_dict(self, state_dict):
"""
Overrides load_state_dict() to add special handling when loading checkpoints
"""
# Because at different stage exp_avg_mask may change (e.g.,
# BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask
# in checkpoints but always use the one user provided in training script.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
# Thus here we keep the exp_avg_mask unchanged when loading checkpoint
for i, group in enumerate(self.param_groups):
if 'exp_avg_mask' in group:
state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask']
elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]:
state_dict['param_groups'][i].pop('exp_avg_mask')
super().load_state_dict(state_dict)
if self.state[self.param_groups[0]['params'][0]]['step'] < self.var_freeze_step:
self.var_freeze_key = False
if (self.state[self.param_groups[0]['params'][0]]['step'] +
1) % self.state[self.param_groups[0]['params'][0]]['var_interval'] == 0:
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = True
else:
self.deepspeed.enable_backward_allreduce = True
else:
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
else:
self.var_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
self.reinitial_error_buffer = False
for group in self.param_groups:
for p in group['params']:
if 'worker_error' in self.state[p]:
self.state[p].pop('worker_error')
if 'server_error' in self.state[p]:
self.state[p].pop('server_error')
if 'momentum_accumulator' in self.state[p]:
self.state[p].pop('momentum_accumulator') | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/fp16/onebit/zoadam.py | zoadam.py |
# DeepSpeed Team
import types
import torch
import numpy as np
from deepspeed import comm as dist
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from deepspeed.accelerator import get_accelerator
class OnebitLamb(torch.optim.Optimizer):
"""Implements the 1-bit Lamb algorithm. Currently GPU-only.
For usage example please see https://www.deepspeed.ai/tutorials/onebit-lamb/
For technical details please see our paper https://arxiv.org/abs/2104.06069.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
freeze_step (int, optional): Number of steps for warmup (uncompressed)
stage before we start using compressed communication. (default 100000)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0)
min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in 1-bit Lamb!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
cuda_aware (boolean, required): Set True if the underlying MPI implementation
supports CUDA-Aware communication. (default: False)
comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl')
coeff_beta (float, optional): coefficient used for computing
running averages of lamb coefficient (default: 0.9) note that you may want to
increase or decrease this beta depending on the freeze_step you choose, as
1/(1 - coeff_beta) should be smaller than or equal to freeze_step
factor_max (float, optional): maximum value of scaling factor to the frozen lamb
coefficient during compression stage (default: 4.0)
factor_min (float, optional): minimum value of scaling factor to the frozen lamb
coefficient during compression stage (default: 0.5)
factor_threshold (float, optional): threshold of how much the scaling factor can
fluctuate between steps (default: 0.1)
.. _Large Batch Optimization for Deep Learning\\: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
.. _Adam\\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
deepspeed=None,
lr=1e-3,
freeze_step=100000,
bias_correction=True,
betas=(0.9, 0.999),
eps=1e-8,
eps_inside_sqrt=False,
weight_decay=0.,
max_grad_norm=0.,
max_coeff=10.0,
min_coeff=0.01,
amsgrad=False,
cuda_aware=False,
comm_backend_name='nccl',
coeff_beta=0.9,
factor_max=4.0,
factor_min=0.5,
factor_threshold=0.1):
if amsgrad:
raise RuntimeError('1-bit Lamb does not support the AMSGrad variant.')
defaults = dict(lr=lr,
bias_correction=bias_correction,
betas=betas,
eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm,
max_coeff=max_coeff,
min_coeff=min_coeff)
super(OnebitLamb, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
assert (dist.is_initialized())
self.deepspeed = deepspeed
self.lamb_freeze_key = False
self.initialize = False
self.freeze_step = freeze_step
self.cuda_aware = cuda_aware
self.coeff_beta = coeff_beta
self.factor_max = factor_max
self.factor_min = factor_min
self.factor_threshold = factor_threshold
self.using_pipeline = False
self.comm_backend_name = comm_backend_name
# Empty initializer. Set handle based on the comm backend as follows.
self.comm_backend_handle = None
if self.comm_backend_name == 'nccl':
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
assert (
(TORCH_MAJOR == 1 and TORCH_MINOR >= 8) or TORCH_MAJOR >= 2
), "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend"
assert dist.is_initialized() == True, "Please initialize the torch distributed backend."
from deepspeed.runtime.comm.nccl import NcclBackend
self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce')
self.comm_backend_handle = NcclBackend(self.deepspeed.mpu)
elif self.comm_backend_name == 'mpi':
from deepspeed.runtime.comm.mpi import MpiBackend
self.comm_backend_handle = MpiBackend(cuda_aware)
self.size = self.comm_backend_handle.size
self.divider = int(self.size * 8 / np.gcd(self.size, 8))
self.exp_avg_flat = []
self.dummy_exp_avg = {}
self.corrected_tensor_sizes = []
self.server_chunk_sizes = []
self.worker_errors = []
self.server_errors = []
self.lamb_coeffs = []
def step(self, closure=None, grads=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None] * len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
#remove the previous stats
del self.lamb_coeffs[:]
if self.lamb_freeze_key:
exp_avg_last_step = []
for group in self.param_groups:
exp_avg_last_step.append([self.state[p]['exp_avg'].detach().clone() for p in group['params']])
if 'scaling_coeff' not in self.state[self.param_groups[0]['params'][0]]:
# Compute the scaling_coeff for each momentum at the end of warmup stage.
# This is used to reduce compression error during compression stage.
momentum_scales = []
for group in self.param_groups:
momentum_scales.append([
(torch.norm(self.state[p]['exp_avg']) / np.sqrt(torch.numel(self.state[p]['exp_avg']))).item()
for p in group['params']
])
united_scale = sum([sum(x) for x in momentum_scales]) / sum([len(x) for x in momentum_scales])
for i, group in enumerate(self.param_groups):
for j, p in enumerate(group['params']):
self.state[p]['scaling_coeff'] = united_scale / momentum_scales[i][j]
for group, grads_this_group in zip(self.param_groups, grads_group):
if grads_this_group is None:
grads_this_group = [None] * len(group['params'])
bias_correction = 1 if group['bias_correction'] else 0
for p, grad in zip(group['params'], grads_this_group):
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('1-bit Lamb does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0 or (len(state) == 1 and 'scaling_coeff' in state.keys()):
state['step'] = 0
state['lamb_coeff_freeze'] = 0.0
state['last_factor'] = 1.0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
state['exp_avg_sq_fresh'] = torch.zeros_like(p.data)
if not self.initialize:
self.lamb_freeze_key = True
exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state[
'exp_avg_sq_fresh']
beta1, beta2 = group['betas']
max_coeff = group['max_coeff']
min_coeff = group['min_coeff']
state['step'] += 1
if self.lamb_freeze_key is False:
# warmup stage, baseline Lamb optimization
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if state['step'] == self.freeze_step:
exp_avg_sq_fresh.data = exp_avg_sq.detach().clone()
grad = None
if self.initialize:
weight_norm = p.data.pow(2).sum().sqrt()
update = exp_avg / (exp_avg_sq.sqrt() + group['eps'])
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
update_norm = update.pow(2).sum().sqrt()
lamb_coeff = 1.0
if weight_norm != 0 and update_norm != 0:
lamb_coeff = (weight_norm / update_norm).item()
if lamb_coeff > max_coeff:
lamb_coeff = max_coeff
if lamb_coeff < min_coeff:
lamb_coeff = min_coeff
if lamb_coeff != 1.0:
state['lamb_coeff_freeze'] = self.coeff_beta * state['lamb_coeff_freeze'] + (
1 - self.coeff_beta) * lamb_coeff
self.lamb_coeffs.append(lamb_coeff)
with torch.no_grad():
p.add_(-group['lr'] * lamb_coeff * update)
else:
# compression stage, update each momentum locally, then
# communicate based on the compressed_allreduce below
if self.initialize:
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg.mul_(self.state[p]['scaling_coeff'])
grad = None
# init fused momentum
if len(self.exp_avg_flat) == 0:
momentum_groups = []
tensor_size = 0
for group in self.param_groups:
for p in group['params']:
momentum_groups.append(self.state[p]['exp_avg'])
tensor_size += torch.numel(p.data)
corrected_tensor_size = tensor_size
if tensor_size % (self.size * self.divider) != 0:
difference = ((self.size * self.divider) - (tensor_size % (self.size * self.divider)))
corrected_tensor_size += difference
self.dummy_exp_avg[0] = torch.zeros(difference, device=momentum_groups[0].data.device)
momentum_groups.append(self.dummy_exp_avg[0])
self.corrected_tensor_sizes.append(corrected_tensor_size)
self.server_chunk_sizes.append(corrected_tensor_size // self.size)
self.exp_avg_flat.append(_flatten_dense_tensors([p.detach().clone() for p in momentum_groups]))
updated_params = _unflatten_dense_tensors(self.exp_avg_flat[0], momentum_groups)
for p, q in zip(momentum_groups, updated_params):
p.data = q.data
if self.initialize and len(self.worker_errors) == 0:
get_accelerator().empty_cache()
for i in range(len(self.exp_avg_flat)):
self.worker_errors.append(
torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device))
self.server_errors.append(torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device))
get_accelerator().empty_cache()
if self.lamb_freeze_key:
if self.size > 1:
for i in range(len(self.exp_avg_flat)):
if not self.initialize:
get_accelerator().empty_cache()
self.worker_errors.append(
torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device))
self.server_errors.append(
torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device))
get_accelerator().empty_cache()
if dist.get_rank() == 0:
print("Cupy Buffers Initialized Successfully.")
self.comm_backend_handle.compressed_allreduce(self.exp_avg_flat[i], self.worker_errors[0],
self.server_errors[0], self.deepspeed.local_rank)
if dist.get_rank() == 0:
print('Pop out errors', flush=True)
del self.worker_errors[:]
del self.server_errors[:]
else:
self.comm_backend_handle.compressed_allreduce(self.exp_avg_flat[i], self.worker_errors[i],
self.server_errors[i], self.deepspeed.local_rank)
if self.lamb_freeze_key and self.initialize:
for i, group in enumerate(self.param_groups):
bias_correction = 1 if group['bias_correction'] else 0
for j, p in enumerate(group['params']):
state = self.state[p]
exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state[
'exp_avg_sq_fresh']
beta1, beta2 = group['betas']
exp_avg.div_(self.state[p]['scaling_coeff'])
# Because 1-bit compression cannot represent exact zero, it is required to
# provide a momentum mask for those params that have constant exact zeros in their
# momentums, otherwise the compression error would keep accumulating.
# For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight
# always have exact zeros in its momentum for row 129 to 512, because it only
# learns up to seq length 128 while the model supports up to 512 seq length.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py about how
# to add this exp_avg_mask for BERT pre-training.)
if 'exp_avg_mask' in group:
if exp_avg.device != group['exp_avg_mask'].device:
group['exp_avg_mask'] = group['exp_avg_mask'].to(device=exp_avg.device)
exp_avg.mul_(group['exp_avg_mask'])
grad_reconstruct = ((exp_avg - exp_avg_last_step[i][j] * beta1) / (1 - beta1))
exp_avg_sq_fresh.mul_(beta2).addcmul_(1 - beta2, grad_reconstruct, grad_reconstruct)
denom = exp_avg_sq.sqrt() + group['eps']
update_prelim = exp_avg / denom
if group['weight_decay'] > 0.0:
update = update_prelim + group['weight_decay'] * p.data
else:
update = update_prelim
lamb_coeff = 1.0
update_norm = update.pow(2).sum().sqrt()
denom_real = exp_avg_sq_fresh.sqrt() + group['eps']
factor = (denom / denom_real).max().item()
if group['weight_decay'] > 0.0:
update_ratio = min(1.0, (update_prelim.pow(2).sum().sqrt() / update_norm).item())
factor = factor * update_ratio + (1.0 - update_ratio)
if factor > self.factor_max:
factor = self.factor_max
if factor < self.factor_min:
factor = self.factor_min
if factor > state['last_factor'] * (1.0 + self.factor_threshold):
factor = state['last_factor'] * (1.0 + self.factor_threshold)
if factor < state['last_factor'] * (1.0 - self.factor_threshold):
factor = state['last_factor'] * (1.0 - self.factor_threshold)
state['last_factor'] = factor
lamb_coeff = state['lamb_coeff_freeze'] * factor
self.lamb_coeffs.append(lamb_coeff)
with torch.no_grad():
p.add_(-group['lr'] * lamb_coeff * update)
del exp_avg_last_step[:]
exp_avg_last_step = None
if not self.initialize:
self.lamb_freeze_key = False
self.initialize = True
print(f"Finished the initialization step at rank {dist.get_rank()}")
return loss
if self.lamb_freeze_key is False:
if state['step'] >= self.freeze_step:
print('OnebitLamb - starting compressed communication')
self.lamb_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
return loss
def load_state_dict(self, state_dict):
"""
Overrides load_state_dict() to add special handling when loading checkpoints
"""
# Because at different stage exp_avg_mask may change (e.g.,
# BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask
# in checkpoints but always use the one user provided in training script.
# (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.)
# Thus here we keep the exp_avg_mask unchanged when loading checkpoint
for i, group in enumerate(self.param_groups):
if 'exp_avg_mask' in group:
state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask']
elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]:
state_dict['param_groups'][i].pop('exp_avg_mask')
super().load_state_dict(state_dict)
# need to reset the fused momentum since loading states will break the linking
del self.exp_avg_flat[:]
self.dummy_exp_avg.clear()
del self.corrected_tensor_sizes[:]
del self.server_chunk_sizes[:]
if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step:
if dist.get_rank() == 0:
print("Checkpoint loaded and OnebitLamb warmup stage starts/continues.")
if self.lamb_freeze_key is True:
self.lamb_freeze_key = False
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = True
else:
self.deepspeed.enable_backward_allreduce = True
for group in self.param_groups:
for p in group['params']:
self.state[p]['lamb_coeff_freeze'] = 0.0
self.state[p]['last_factor'] = 1.0
if 'scaling_coeff' in self.state[p]:
self.state[p].pop('scaling_coeff')
else:
if dist.get_rank() == 0:
print("Checkpoint loaded and OnebitLamb compression stage starts/continues.")
if self.lamb_freeze_key is False:
self.lamb_freeze_key = True
if self.using_pipeline:
self.deepspeed.pipeline_enable_backward_allreduce = False
else:
self.deepspeed.enable_backward_allreduce = False
# We reset the compression errors when loading checkpoints for 3 reasons:
# 1) The worker and server error at each GPU are distinct, so in current implementation
# only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors.
# If we want to save them correctly we need O(num_gpu*model_size) memory in order to
# gather all the error, which is a very large memory requirement. It's possible to save
# them in a distributed way, but it will make the checkpoint saving/loading much more complicated.
# 2) Even if we are able to save the compression errors correctly, you need to have the
# exact same number of GPUs in order to load them correctly.
# 3) We verified on BERT pre-training that occasionally resetting the compression error
# at checkpoint loading does not affect the convergence.
# However, please avoid frequent checkpoint loading which could break the error
# compensation mechanism thus affect the convergence.
del self.worker_errors[:]
del self.server_errors[:]
def get_lamb_coeffs(self):
return self.lamb_coeffs | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/fp16/onebit/lamb.py | lamb.py |
# DeepSpeed Team
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import sys
from typing import List
import deepspeed
import torch
from deepspeed import comm as dist
from deepspeed.runtime.zero.mics_utils import (MiCS_CommGroups, create_mics_comm_groups, scale_tensors)
from deepspeed.runtime.zero.parameter_offload import (DeepSpeedZeRoOffload, is_zero_param)
from deepspeed.runtime.zero.partition_parameters import Init, AllGatherCoalescedHandle, ZeroParamStatus
from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3
from deepspeed.utils import instrument_w_nvtx, log_dist
from torch import Tensor
from torch.nn import Parameter
def has_hierarchical_all_gather_groups(comm_groups: MiCS_CommGroups):
result = False
if comm_groups.param_intra_node_group is not None and comm_groups.param_inter_node_shard_group is not None:
result = True
return result
class MiCS_AllGatherCoalescedHandle(AllGatherCoalescedHandle):
""" This handle assumes that no need to
copy data out from a contiguous tensor
"""
def __init__(self, allgather_handle, params: List[Parameter], partitions: List[Tensor], world_size: int) -> None:
super().__init__(allgather_handle, params, partitions, world_size)
def wait(self) -> None:
"""
"""
# let the current stream to op
instrument_w_nvtx(self.allgather_handle.wait)()
if self.complete:
return
for _, param in enumerate(self.params):
assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight"
param.ds_status = ZeroParamStatus.AVAILABLE
self.complete = True
class MiCS_Init(Init):
def __init__(self,
module=None,
data_parallel_group=None,
mem_efficient_linear=True,
remote_device=None,
pin_memory=False,
config_dict_or_path=None,
config=None,
enabled=True,
dtype=None,
mpu=None):
"""A context manager to partition the model parameters during the model
construction with MiCS partition strategy. Model states are partitioned
to the number of devices specified via ``mics_shard_size`` field in the
deepspeed config json file. The context manager also introduces
hierarchical communication method to reduce the cost of inter-node
communications, which can be enabled with
``mics_hierarchical_params_gather`` field in deepspeed config.
Args:
module (``torch.nn.Module``, optional): If provided, partition the model as
if it was constructed in the context.
data_parallel_group (``deepspeed.comm`` process group, optional):
The group of processes to partition among. Defaults to all processes.
mem_efficient_linear (bool, optional): Replace
torch.nn.functional.linear with an implementation that allows
DeepSpeed to partition parameters. Defaults to ``True``.
remote_device (string, optional): The initial device to store model
weights e.g., ``cpu``, ``nvme``. Passing ``"cpu"`` will create the model in CPU
memory. The model may still be moved to GPU based on the
offload settings for training. Defaults to param offload device if a config is
defined, otherwise GPU.
pin_memory (bool, optional): Potentially increase performance by
using pinned memory for model weights. ``remote_device`` must be
``"cpu"``. Defaults to pin_memory value in config, otherwise ``False``.
config_dict_or_path (dict or ``json file``, optional): If provided, provides configuration
for swapping fp16 params to NVMe.
config (dict or ``json file``, optional): Deprecated, use config_dict_or_path instead.
enabled (bool, optional): If ``False``, this context has no
effect. Defaults to ``True``.
dtype (``dtype``, optional): Can be used to change the data type of the parameters.
Supported options are ``torch.half`` and ``torch.float``. Defaults to ``None``
mpu (``object``, optional): A model parallelism unit object that implements get_{model,data}_parallel_{rank,group,world_size}.
This context follows the same logic as ``deepspeed.zero.Init()``, but
with the modification for partition size of each parameter.
Examples
--------
#. Allocate a model and partition it among all processes:
.. code-block:: python
# the config_dict_or_path is required to let the context manager know
# how partition the parameters.
# The configuration has to include the field ``mics_shard_size``
with deepspeed.zero.MiCS_Init(config_dict_or_path=ds_config):
model = MyLargeModel()
#. Allocate a model in pinned CPU memory and partition it among a subgroup of processes:
.. code-block:: python
with deepspeed.zero.MiCS_Init(data_parallel_group=mpu.get_data_parallel_group(),
remote_device="cpu",
pin_memory=True
config_dict_or_path=ds_config):
model = MyLargeModel()
#. Partition an already-allocated model in CPU memory:
.. code-block:: python
model = deepspeed.zero.MiCS_Init(module=model,
config_dict_or_path=ds_config)
"""
assert config_dict_or_path is not None, "Must provide configuration for MiCS Initialization"
_ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, mpu)
if not dist.is_initialized():
dist.init_distributed()
assert dist.is_initialized(), "Parameters cannot be scattered without initializing deepspeed.comm"
self.mics_comm_groups = create_mics_comm_groups(
_ds_config.mics_shard_size,
data_parallel_group,
hierarchical_allgather=_ds_config.mics_hierarchial_params_gather,
mpu=mpu)
super().__init__(module, data_parallel_group, mem_efficient_linear, remote_device, pin_memory,
config_dict_or_path, config, enabled, dtype, mpu)
def _convert_to_deepspeed_param(self, param):
super()._convert_to_deepspeed_param(param)
# attach communication groups to every param
param.comm = self.mics_comm_groups
# record existing all_gather_coalesced implementation
# so that we can fallback later
old_all_gather_coalesced = param.all_gather_coalesced
def _param_all_gather_coalesced(params, safe_mode=False, param_buffers=None):
""""""
mics_comm_groups: MiCS_CommGroups = params[0].comm
hierarchical_all_gather = has_hierarchical_all_gather_groups(mics_comm_groups)
if dist.has_coalescing_manager() and hierarchical_all_gather:
return self._hierarchical_all_gather_params(params, param_buffers)
elif dist.has_coalescing_manager():
return self._flat_all_gather_with_coalescing_manager(params, param_buffers)
else:
return old_all_gather_coalesced(params, safe_mode)
# change the all_gather_coalesced method
param.all_gather_coalesced = _param_all_gather_coalesced
def _pre_all_gather(self, params, params_buffers=None):
# fetches from nvme if the partition is not available and in nvme
self._ensure_availability_of_partitioned_params(params)
for param in params:
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(param.ds_summary())
param.ds_status = ZeroParamStatus.INFLIGHT
# ensure that each rank has params in same order. the allgather
# is done by flattening the parameter list into a single tensor that
# can be allgathered in a single call - this means that if each rank
# gives a list of the same parameters in a different order we will
# silently get incorrect parameter values, and have very difficult
# to debug correctness issues.
params = sorted(params, key=lambda p: p.ds_id)
return params, params_buffers
def _flat_all_gather_with_coalescing_manager(self, params, params_buffers=None):
""""""
# must have to change the status of the param
# and ensure they are on the device
params, params_buffers = self._pre_all_gather(params, params_buffers)
mics_comm_groups: MiCS_CommGroups = params[0].comm
param_shard_size = mics_comm_groups.param_shard_size
output_tensors = []
input_tensors = []
for i, p in enumerate(params):
t_size = p.ds_tensor.ds_numel * param_shard_size
if params_buffers is not None and params_buffers[i] is not None:
assert params_buffers[i].numel(
) == t_size, f'params_to_gather_buffers[{i}] size {params_buffers[i].numel()} does not match with t_size {t_size}'
flat_out = params_buffers[i]
else:
flat_out = torch.empty(t_size, dtype=p.dtype, device=self.local_device, requires_grad=False).view(-1)
output_tensors.append(flat_out)
_flat_input = p.ds_tensor.data.view(-1)
input_tensors.append(_flat_input)
all_gather_handle = dist.all_gather_coalesced(output_tensors,
input_tensors,
group=mics_comm_groups.param_shard_group,
async_op=True)
for idx, param in enumerate(params):
param.data = output_tensors[idx].narrow(0, 0, param.ds_numel).view(param.ds_shape).data
return MiCS_AllGatherCoalescedHandle(allgather_handle=all_gather_handle,
params=params,
partitions=[],
world_size=param_shard_size)
def _hierarchical_all_gather_params(self, params, params_buffers=None):
""""""
params, params_buffers = self._pre_all_gather(params, params_buffers)
mics_comm_groups: MiCS_CommGroups = params[0].comm
local_rank = dist.get_rank(group=mics_comm_groups.param_intra_node_group)
inter_node_comm_group = mics_comm_groups.param_inter_node_shard_group
intra_node_comm_group = mics_comm_groups.param_intra_node_group
param_shard_size = mics_comm_groups.param_shard_size
inter_node_size = dist.get_world_size(group=inter_node_comm_group)
intra_node_size = dist.get_world_size(group=intra_node_comm_group)
param_tensors = []
for i, p in enumerate(params):
param_size = p.ds_tensor.ds_numel * param_shard_size
if params_buffers is not None and params_buffers[i] is not None:
assert params_buffers[i].numel(
) == param_size, f'param_buffers[{i}] size {params_buffers[i].numel()} does not match with param_size {param_size}'
param_tensor = params_buffers[i]
else:
param_tensor = torch.empty(param_size, dtype=p.dtype, device=self.local_device,
requires_grad=False).view(-1)
param_tensors.append(param_tensor)
# inter node all-gather
inter_outputs = []
inter_inputs = []
for i, p in enumerate(params):
inter_size = p.ds_tensor.ds_numel * inter_node_size
_out = param_tensors[i].narrow(0, local_rank * inter_size, inter_size)
inter_outputs.append(_out)
inter_inputs.append(p.ds_tensor.data.view(-1).to(self.local_device))
# sync enqueue
dist.all_gather_coalesced(inter_outputs, inter_inputs, group=inter_node_comm_group, async_op=False)
# intra node all-gather
intra_outputs = []
intra_inputs = []
for i, p in enumerate(params):
# partition param into multiple chunks for allgather
# because inter-node all-gather outputs are in a continues memory
# while in param memory, those inter-node data are placed in different
# location.
# each chunk is an intra-node output
param_chunk = param_tensors[i].view(
(inter_node_size, intra_node_size, p.ds_tensor.ds_numel)).narrow(1, local_rank, 1)
param_chunk.copy_(inter_outputs[i].detach().clone().view(param_chunk.size()))
output_chunks = torch.chunk(param_tensors[i], inter_node_size)
for j, _out in enumerate(output_chunks):
intra_chunk_size = intra_node_size * p.ds_tensor.ds_numel
local_offset = local_rank * p.ds_tensor.ds_numel
_in = param_tensors[i].narrow(0, j * intra_chunk_size + local_offset, p.ds_tensor.ds_numel)
intra_outputs.append(_out)
intra_inputs.append(_in)
all_gather_handle = dist.all_gather_coalesced(intra_outputs,
intra_inputs,
group=intra_node_comm_group,
async_op=True)
for i, param in enumerate(params):
param.data = param_tensors[i].narrow(0, 0, param.ds_numel).view(param.ds_shape).data
return MiCS_AllGatherCoalescedHandle(
allgather_handle=all_gather_handle,
params=params,
partitions=[],
world_size=param_shard_size,
)
def get_partition_dp_group(self, param):
return param.comm.param_shard_group
def get_partition_rank(self):
return self.mics_comm_groups.param_shard_rank
@property
def num_partitions(self):
return self.mics_comm_groups.param_shard_size
class MiCS_Offload(DeepSpeedZeRoOffload):
""" Wrapper to change the behavior for parameter sharding
"""
def __init__(self,
module,
timers,
ds_config,
overlap_comm=True,
prefetch_bucket_size=50000000,
max_reuse_distance=1000000000,
max_live_parameters=1000000000,
param_persistence_threshold=100000,
model_persistence_threshold=sys.maxsize,
offload_param_config=None,
mpu=None):
super().__init__(module, timers, ds_config, overlap_comm, prefetch_bucket_size, max_reuse_distance,
max_live_parameters, param_persistence_threshold, model_persistence_threshold,
offload_param_config, mpu)
def _convert_to_zero_parameters(self, ds_config, module, mpu):
""" overload the parent class function for convert the parameters
"""
log_dist(f'Convert to zero parameters from MiCS Offload manager', ranks=[0])
non_zero_params = [p for p in module.parameters() if not is_zero_param(p)]
if non_zero_params:
zero_params = [p for p in module.parameters() if is_zero_param(p)]
if zero_params:
zero_params[0].convert_to_zero_parameters(param_list=non_zero_params)
else:
group = None
if mpu:
group = mpu.get_data_parallel_group()
MiCS_Init(module=module,
data_parallel_group=group,
dtype=self.dtype,
config_dict_or_path=ds_config,
remote_device=self.offload_device,
pin_memory=self.offload_param_pin_memory,
mpu=mpu)
class MiCS_Optimizer(DeepSpeedZeroOptimizer_Stage3):
"""
MiCS Optimizer
"""
def __init__(self,
module,
init_optimizer,
timers,
ds_config,
static_loss_scale=1,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True,
contiguous_gradients=True,
reduce_bucket_size=500000000,
prefetch_bucket_size=50000000,
max_reuse_distance=1000000000,
max_live_parameters=1000000000,
param_persistence_threshold=100000,
model_persistence_threshold=sys.maxsize,
dp_process_group=None,
reduce_scatter=True,
overlap_comm=False,
offload_optimizer_config=None,
offload_param_config=None,
sub_group_size=1000000000000,
mpu=None,
clip_grad=0,
communication_data_type=torch.float16,
postscale_gradients=True,
gradient_predivide_factor=1,
gradient_accumulation_steps=1,
elastic_checkpoint=False,
aio_config=None):
log_dist("Init MiCS optimizer", ranks=[0])
super().__init__(module, init_optimizer, timers, ds_config, static_loss_scale, dynamic_loss_scale,
dynamic_loss_args, verbose, contiguous_gradients, reduce_bucket_size, prefetch_bucket_size,
max_reuse_distance, max_live_parameters, param_persistence_threshold,
model_persistence_threshold, dp_process_group, reduce_scatter, overlap_comm,
offload_optimizer_config, offload_param_config, sub_group_size, mpu, clip_grad,
communication_data_type, postscale_gradients, gradient_predivide_factor,
gradient_accumulation_steps, elastic_checkpoint, aio_config)
first_param = next(module.parameters())
# overload the dp_process_group and partition_count
self.dp_process_group = first_param.comm.param_shard_group
self.partition_count = first_param.comm.param_shard_size
def initialize_ds_offload(self, module, timers, ds_config, overlap_comm, prefetch_bucket_size, max_reuse_distance,
max_live_parameters, param_persistence_threshold, model_persistence_threshold,
offload_optimizer_config, mpu):
return MiCS_Offload(module, timers, ds_config, overlap_comm, prefetch_bucket_size, max_reuse_distance,
max_live_parameters, param_persistence_threshold, model_persistence_threshold,
offload_optimizer_config, mpu)
def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None:
grad_buffers = super().partition_grads(params_to_release, grad_partitions)
# perform all-reduce among replication groups
# the function will perform accumulation boundary check
self.allreduce_mics_shard_grads(params_to_release, grad_buffers)
@instrument_w_nvtx
def allreduce_mics_shard_grads(self, params, partitioned_grads_buffers: List[Tensor]):
"""
"""
# TODO: improve the condition check
if not self.is_gradient_accumulation_boundary or \
len(partitioned_grads_buffers) == 0:
return
mics_comm_groups: MiCS_CommGroups = params[0].comm
param_repli_group = mics_comm_groups.param_repli_group
param_repli_size = mics_comm_groups.param_repli_size
if param_repli_size is None or param_repli_size <= 1:
return
if not partitioned_grads_buffers[0].is_cuda:
raise RuntimeError("Local sharding has no support for CPU offloading")
if dist.has_all_reduce_coalesced():
scale_tensors(partitioned_grads_buffers, param_repli_size)
dist.all_reduce_coalesced(tensors=partitioned_grads_buffers, group=param_repli_group)
else:
# manually coalescing all-reduce
aggregated_buffer: Tensor = torch.cat(partitioned_grads_buffers)
aggregated_buffer.div_(param_repli_size)
dist.all_reduce(aggregated_buffer, group=param_repli_group)
offset = 0
for grad_buff in partitioned_grads_buffers:
grad_buff.view(-1).copy_(aggregated_buffer.narrow(0, offset, grad_buff.numel()))
offset += grad_buff.numel()
# TODO: Support different/changing load/save DP degree.
def load_state_dict(self,
state_dict_list,
load_optimizer_states=True,
load_from_fp32_weights=False,
checkpoint_folder=None):
r""" Loading the MiCS checkpoints
TODO: move the implementation from zhen/merged_ds_master branch
"""
raise NotImplementedError("Not implemented for loading MiCS checkpoints") | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/mics.py | mics.py |
# DeepSpeed Team
from pydantic import Field, validator
from enum import Enum
from pathlib import Path
from deepspeed.runtime.config_utils import DeepSpeedConfigModel, pp_int
class OffloadDeviceEnum(str, Enum):
""" Enum for valid offload devices """
none = "none"
cpu = "cpu"
nvme = "nvme"
class DeepSpeedZeroOffloadParamConfig(DeepSpeedConfigModel):
""" Set options for parameter offload. Valid only with stage 3. """
device: OffloadDeviceEnum = "none"
"""
Device memory to offload model parameters. Supported options are `cpu` and
`nvme`.
"""
nvme_path: Path = None
""" Filesystem path for NVMe device for parameter offloading. """
buffer_count: int = Field(5, ge=0)
""" Number of buffers in buffer pool for parameter offloading to NVMe. """
buffer_size: int = Field(pp_int(1e8), ge=0)
""" Size of buffers in buffer pool for parameter offloading to NVMe. """
max_in_cpu: int = Field(pp_int(1e9), ge=0)
"""
Number of parameter elements to maintain in CPU memory when offloading to
NVMe is enabled.
"""
pin_memory: bool = False
"""
Offload to page-locked CPU memory. This could boost throughput at the cost
of extra memory overhead.
"""
class DeepSpeedZeroOffloadOptimizerConfig(DeepSpeedConfigModel):
""" Set options for optimizer offload. Valid with stage 1, 2, and 3. """
device: OffloadDeviceEnum = "none"
"""
Device memory to offload optimizer state. Supported options are `cpu` and
`nvme`. Optimizer computation is offload to CPU regardless of device option.
"""
nvme_path: Path = None
""" Filesystem path for NVMe device for optimizer state offloading. """
buffer_count: int = Field(4, ge=0)
"""
Number of buffers in buffer pool for optimizer state offloading to NVMe.
This should be at least the number of states maintained per parameter by
the optimizer. For example, Adam optimizer has 4 states (parameter,
gradient, momentum, and variance).
"""
pin_memory: bool = False
"""
Offload to page-locked CPU memory. This could boost throughput at the cost
of extra memory overhead.
"""
pipeline_read: bool = False
"""
For tile-based optimizer step processing, overlap read of next tile with
computation of current tile. Used in ZeRO-Infinity.
"""
pipeline_write: bool = False
"""
For tile-based optimizer step processing, overlap write of previous tile
with computation of current tile.
"""
fast_init: bool = False
""" Enable fast optimizer initialization when offloading to NVMe. """
@validator("pipeline_read", "pipeline_write", always=True)
def set_pipeline(cls, field_value, values):
values["pipeline"] = field_value or values.get("pipeline", False)
return field_value | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/offload_config.py | offload_config.py |
# DeepSpeed Team
import torch
import deepspeed
from deepspeed.runtime.utils import partition_uniform as partition
def split_tensor_along_last_dim(tensor, partitions, contiguous_split_chunks=False):
"""Split a tensor along its last dimension. Adapted from Megatron-LM.
Arguments:
tensor: input tensor.
partitions: list of partition sizes to supply to torch.split
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
# Split.
tensor_list = torch.split(tensor, partitions, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
class TiledLinear(torch.nn.Module):
def __init__(self,
in_features,
out_features,
bias=True,
in_splits=1,
out_splits=1,
input_is_already_split=False,
combine_out_splits=True,
linear_cls=torch.nn.Linear,
init_linear=None,
**kwargs):
"""A replacement for ``torch.nn.Linear`` that works with ZeRO-3 to reduce
memory requirements via tiling.
TiledLinear breaks the input and output dimensions of a linear layer
into tiles that are processed in sequence. This class enables huge
linear layers when combined with ZeRO-3 because inactive tiles can be
partitioned and offloaded.
.. note::
We recommend using as few tiles as necessary. Tiling
significantly reduces memory usage, but can reduce throughput
for inexpensive layers. This due to the smaller kernels having
less parallelism and lower arithmetic intensity, while
introducing more frequent synchronization and communication.
Args:
in_features (int): See ``torch.nn.Linear``
out_features (int): See ``torch.nn.Linear``
bias (bool, optional): See ``torch.nn.Linear``
in_splits (int, optional): The number of tiles along the input dimension. Defaults to 1.
out_splits (int, optional): The number of tiles along the output dimension. Defaults to 1.
input_is_already_split (bool, optional): If set to ``True``, assume that the ``input_`` in
to ``forward()`` is already split into ``in_splits`` chunks. Defaults to ``False``.
combine_out_splits (bool, optional): If set to ``False``, do not combine the ``out_splits`` outputs
into a single tensor. Defaults to ``True``.
linear_cls (class, optional): The underlying class to build individual tiles.
Defaults to ``torch.nn.Linear``.
init_linear (``torch.nn.Linear``, optional): If set, copy the parameters of
``init_linear``. Useful for debugging. Defaults to ``None``.
kwargs (dict, optional): additional keyword arguments to provide to ``linear_cls()``.
Raises:
RuntimeError: ``in_splits`` must be within the range [1, in_features).
RuntimeError: ``out_splits`` must be within the range of [1, out_features).
"""
super().__init__()
if (in_splits < 1) or (in_splits > in_features):
raise RuntimeError('in splits must be in range [1, in_features].')
if (out_splits < 1) or (out_splits > out_features):
raise RuntimeError('out splits must be in range [1, out_features].')
# global, not necessarily local
self.in_features = in_features
self.out_features = out_features
self.use_bias = bias
self.out_splits = out_splits
self.in_splits = in_splits
self.input_is_already_split = input_is_already_split
self.combine_out_splits = combine_out_splits
# Build partition-lists. These are CSR-style splits [0, part0, part1, ..., features]
# For example, row_parts[p] gives the start of partition p and row_parts[p+1]
# is the exclusive end.
self.in_parts = partition(num_items=in_features, num_parts=in_splits)
self.out_parts = partition(num_items=out_features, num_parts=out_splits)
assert len(self.out_parts) == out_splits + 1
assert len(self.in_parts) == in_splits + 1
assert self.out_parts[0] == 0
assert self.out_parts[out_splits] == out_features
assert self.in_parts[in_splits] == in_features
self.linears = torch.nn.ModuleList()
for out_id in range(out_splits):
self.linears.append(torch.nn.ModuleList())
local_out_dim = self.out_parts[out_id + 1] - self.out_parts[out_id]
for in_id in range(in_splits):
#if input_size is split, we only need one bias
local_bias = bias if in_id == (in_splits - 1) else False
local_in_dim = self.in_parts[in_id + 1] - self.in_parts[in_id]
local = linear_cls(local_in_dim, local_out_dim, bias=local_bias, **kwargs)
self.linears[out_id].append(local)
# Optionally initialize with a known tensor
if init_linear is not None:
self.copy_params_from(init_linear)
def forward(self, input_):
if self.in_splits > 1 and not self.input_is_already_split:
input_parts = partition(input_.shape[-1], self.in_splits)
split_sizes = [input_parts[p + 1] - input_parts[p] for p in range(self.in_splits)]
inputs = self._split_global_input(input_, split_sizes)
elif self.in_splits > 1:
inputs = input_
assert len(
inputs) == self.in_splits, f"Col splits {self.in_splits} does not match input splits {len(inputs)}"
else:
# no splits
inputs = [input_]
outputs = [None] * self.out_splits
for out_id in range(self.out_splits):
for in_id in range(self.in_splits):
local_output = self.linears[out_id][in_id](inputs[in_id])
outputs[out_id] = self._reduce_local_output(in_id=in_id,
out_id=out_id,
current_out=outputs[out_id],
new_out=local_output)
if self.combine_out_splits:
return self._combine_output_splits(outputs)
return outputs
def _split_global_input(self, input, split_sizes):
"""Partition an input tensor along the last dimension, aligned with given splits.
Subclasses should override this method to account for new input types.
Args:
input (List[Tensor]): The tensor to partition along the last dimension.
split_sizes (List[int]): The size of each partition.
Returns:
List[Any]: A list of the chunks of ``input``.
"""
return split_tensor_along_last_dim(input, split_sizes)
def _reduce_local_output(self, in_id, out_id, current_out, new_out):
"""Reduce (sum) a new local result into the existing local results.
Subclasses should override this method.
For a given ``out_id``, this method is called ``in_id-1`` times. The first input
split is a simple assignment.
Args:
in_id (int): The input split that produced ``new_out``.
out_id (int): The output split that produced ``new_out``.
current_out (Any): The reduced form of all previous ``out_id`` results.
new_out (Any): The local result from forward (``in_id``, ``out_id``)e
Returns:
Any: The combined result of ``current_out`` and ``new_out``.
"""
if current_out is None:
#this clone is necessary to preserve auto grad
#there is some issue with inplace update for outputs that are views
return new_out.clone()
else:
return current_out + new_out
def _combine_output_splits(self, outputs):
"""Join the splits of the output into a single result.
Args:
outputs (List[Any]): The reduced outputs for each output split.
Returns:
Any: The combined outputs.
"""
assert len(outputs) == self.out_splits
return torch.cat(outputs, dim=-1)
@torch.no_grad()
def copy_params_from(self, other):
"""Copy the weight and bias data from ``other``.
This is especially useful for reproducible initialization and testing.
Equivalent to:
.. code-block:: python
with torch.no_grad():
self.weight.copy_(other.weight)
if self.bias is not None:
self.bias.copy_(other.bias)
.. note::
If ZeRO-3 is enabled, this is a collective operation and the
updated parameters of data-parallel rank 0 will be visible on all
ranks. See :class:`deepspeed.zero.GatheredParameters` for more
information.
Args:
other (``torch.nn.Linear``): the linear layer to copy from.
"""
assert hasattr(other, 'weight')
assert other.weight.size() == (self.out_features, self.in_features)
if self.use_bias:
assert hasattr(other, 'bias')
assert other.bias is not None
assert other.bias.size() == (self.out_features, )
else:
assert other.bias is None
for row in range(self.out_splits):
rstart = self.out_parts[row]
rstop = self.out_parts[row + 1]
for col in range(self.in_splits):
cstart = self.in_parts[col]
cstop = self.in_parts[col + 1]
local = self.linears[row][col]
global_weight = other.weight[rstart:rstop, cstart:cstop]
with deepspeed.zero.GatheredParameters(local.weight, modifier_rank=0):
local.weight.copy_(global_weight)
if local.bias is not None:
with deepspeed.zero.GatheredParameters(local.bias, modifier_rank=0):
local.bias.data.copy_(other.bias[rstart:rstop].data)
class TiledLinearReturnBias(TiledLinear):
"""Wrapper for a Linear class that returns its own bias parameter, such as
used by Megatron-LM.
"""
def _reduce_local_output(self, in_id, out_id, current_out, new_out):
"""Reduces output tensors, but not the returned bias. """
if current_out is not None:
old_tensor, old_bias = current_out
else:
old_tensor, old_bias = None, None
assert isinstance(new_out, tuple)
assert len(new_out) == 2
tensor, bias = new_out
assert tensor is not None
tensor = super()._reduce_local_output(in_id=in_id, out_id=out_id, current_out=old_tensor, new_out=tensor)
if bias is None:
bias = old_bias
return tensor, bias
def _combine_output_splits(self, outputs):
# stack output tensors
tensors = [o[0] for o in outputs]
tensor = super()._combine_output_splits(tensors)
# stack biases if applicable
biases = [o[1] for o in outputs if o[1] is not None]
if len(biases) > 0:
bias = super()._combine_output_splits(biases)
else:
bias = None
return tensor, bias | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/tiling.py | tiling.py |
# DeepSpeed Team
import torch
import os
from deepspeed import comm as dist
from packaging import version as pkg_version
from collections import OrderedDict
from deepspeed.runtime import ZeROOptimizer
from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler
from deepspeed.runtime.utils import (bwc_tensor_model_parallel_rank, get_global_norm, empty_cache, see_memory_usage,
inf, is_model_parallel_parameter, align_dense_tensors, all_gather_dp_groups)
from deepspeed.runtime.zero.config import ZeroStageEnum
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.ops.adam import DeepSpeedCPUAdam
from deepspeed.utils import logger
from deepspeed.moe.utils import is_moe_param
from deepspeed.git_version_info import version
from deepspeed.runtime.constants import PIPE_REPLICATED
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import UtilsBuilder
from deepspeed.checkpoint.constants import (DS_VERSION, GROUP_PADDINGS, PARTITION_COUNT,
SINGLE_PARTITION_OF_FP32_GROUPS, BASE_OPTIMIZER_STATE, CLIP_GRAD,
ZERO_STAGE, PARAM_SLICE_MAPPINGS)
from deepspeed.utils import link_hp_params
from deepspeed.checkpoint import enable_universal_checkpoint
# Toggle this to true to enable correctness test
# with gradient partitioning and without
pg_correctness_test = False
def input(msg):
return
def split_half_float_double(tensors):
device_type = get_accelerator().device_name()
dtypes = [
"torch.{}.HalfTensor".format(device_type), "torch.{}.FloatTensor".format(device_type),
"torch.{}.DoubleTensor".format(device_type), "torch.{}.BFloat16Tensor".format(device_type)
]
buckets = []
for i, dtype in enumerate(dtypes):
bucket = [t for t in tensors if t.type() == dtype]
if bucket:
buckets.append(bucket)
return buckets
def isclose(a, b, rtol=1e-09, atol=0.0):
return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol)
def lcm(x, y):
from fractions import gcd # or can import gcd from `math` in Python 3
return x * y // gcd(x, y)
def get_alignment_padding(tensor_list, alignment):
num_elements = sum([tensor.numel() for tensor in tensor_list])
remainder = num_elements % alignment
return (alignment - remainder) if remainder else remainder
def move_to_cpu(tensor_list):
for tensor in tensor_list:
tensor.data = tensor.data.cpu()
def print_rank_msg(msg):
print(f"rank {dist.get_rank()} - {msg}")
def _get_padded_tensor(src_tensor, size):
if src_tensor.numel() >= size:
return src_tensor
padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device)
slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel())
slice_tensor.data.copy_(src_tensor.data)
return padded_tensor
class DeepSpeedZeroOptimizer(ZeROOptimizer):
"""
DeepSpeedZeroOptimizer designed to reduce the memory footprint
required for training large deep learning models.
For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models
https://arxiv.org/abs/1910.02054
For usage examples, refer to TODO: DeepSpeed Tutorial
"""
def __init__(self,
init_optimizer,
param_names,
timers,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True,
contiguous_gradients=True,
reduce_bucket_size=500000000,
allgather_bucket_size=5000000000,
dp_process_group=None,
expert_parallel_group=None,
expert_data_parallel_group=None,
reduce_scatter=True,
overlap_comm=False,
cpu_offload=False,
mpu=None,
clip_grad=0.0,
communication_data_type=torch.float16,
postscale_gradients=True,
gradient_predivide_factor=1.0,
gradient_accumulation_steps=1,
ignore_unused_parameters=True,
partition_grads=True,
round_robin_gradients=False,
has_moe_layers=False,
fp16_master_weights_and_gradients=False,
elastic_checkpoint=False):
if dist.get_rank() == 0:
logger.info(f"Reduce bucket size {reduce_bucket_size}")
logger.info(f"Allgather bucket size {allgather_bucket_size}")
logger.info(f"CPU Offload: {cpu_offload}")
logger.info(f'Round robin gradient partitioning: {round_robin_gradients}')
# The fused optimizer does all the work. We need this layer for two reason:
# 1. maintain same user API from apex.fp16_utils
# 2. keep common stuff here in case we need to add ne552w fused optimizer later
self.elastic_checkpoint = elastic_checkpoint
self.param_names = param_names
self.mpu = mpu
# differences from apex.fp16_utils:
# - assume all model params in fp16
# - assume all params requires grad
# - flat by groups, not keeping state. TODO: remove state explicitly?
# - master grad and unflat master weight never exist. TODO: a way to save out unflat master?
if not get_accelerator().is_available():
raise SystemError("Cannot use fp16 without accelerator.")
self.optimizer = init_optimizer
# Load pre-built or JIT compile (un)flatten ops
util_ops = UtilsBuilder().load()
self.flatten = util_ops.flatten
self.unflatten = util_ops.unflatten
# ZeRO stage 1 (False) or 2 (True)
self.partition_gradients = partition_grads
self.zero_stage_string = "ZeRO-2" if partition_grads else "ZeRO-1"
self.timers = timers
self.reduce_scatter = reduce_scatter
self.overlap_comm = overlap_comm
self.cpu_offload = cpu_offload
self.deepspeed_adam_offload = cpu_offload
self.device = get_accelerator().current_device_name() if not self.cpu_offload else 'cpu'
self.dp_process_group = dp_process_group
#expert parallel group
self.ep_process_group = expert_parallel_group
#data parallel group for experts
self.expert_dp_process_group = expert_data_parallel_group
#data parallel size for non-experts
dp_size = dist.get_world_size(group=self.dp_process_group)
#For MoE models this maybe different for different param group
#It will be modified during MoE setup later in the init
self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))]
self.partition_count = [dp_size for i in range(len(self.optimizer.param_groups))]
self.is_gradient_accumulation_boundary = True
# CPU-Offload requires contiguous gradients
self.contiguous_gradients = contiguous_gradients or cpu_offload
self.has_moe_layers = has_moe_layers
if self.has_moe_layers:
self._configure_moe_settings()
self._global_grad_norm = 0.
if mpu is None:
self.model_parallel_group = None
self.model_parallel_world_size = 1
self.model_parallel_rank = 0
else:
self.model_parallel_group = mpu.get_model_parallel_group()
self.model_parallel_world_size = mpu.get_model_parallel_world_size()
self.model_parallel_rank = bwc_tensor_model_parallel_rank(mpu)
self.overflow = False
self.clip_grad = clip_grad
self.communication_data_type = communication_data_type
self.gradient_predivide_factor = gradient_predivide_factor
self.postscale_gradients = postscale_gradients
self.gradient_accumulation_steps = gradient_accumulation_steps
self.micro_step_id = 0
self.ignore_unused_parameters = ignore_unused_parameters
self.round_robin_gradients = round_robin_gradients
self.extra_large_param_to_reduce = None
self.fp16_master_weights_and_gradients = fp16_master_weights_and_gradients
if self.fp16_master_weights_and_gradients:
assert self.cpu_offload and type(self.optimizer) in [DeepSpeedCPUAdam], \
f"fp16_master_and_gradients requires optimizer to support keeping fp16 master and gradients while keeping the optimizer states in fp32."\
f"Currently only supported using ZeRO-Offload with DeepSpeedCPUAdam. But current setting is ZeRO-Offload:{self.cpu_offload} and optimizer type {type(self.optimizer)}." \
f"Either disable fp16_master_weights_and_gradients or enable {self.zero_stage_string} Offload with DeepSpeedCPUAdam."
if self.reduce_scatter:
valid_reduce_scatter_dtypes = (torch.float16, torch.bfloat16, torch.float32)
assert self.communication_data_type in valid_reduce_scatter_dtypes, f"{self.zero_stage_string} supports {valid_reduce_scatter_dtypes} communication_data_type with reduce scatter enabled. Got: '{self.communication_data_type}'"
assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with {self.zero_stage_string} with reduce scatter enabled"
assert self.postscale_gradients, "pre-scale gradients is not yet supported with {self.zero_stage_string} with reduce scatter enabled"
# param flattened by groups
self.bit16_groups = []
self.bit16_groups_flat = []
# param partitioned by data parallel degree
# this will contain a list of equal sized tensors
# each of which will be updated by a different process
self.parallel_partitioned_bit16_groups = []
# a single 32-bit partition of the parallel partitioned parameters
# that this process will update
self.single_partition_of_fp32_groups = []
# param partition info
# These are the parameters in each group that will not be updated by this process directly
self.params_not_in_partition = []
# These are the parameters that will be updated by this process directly
self.params_in_partition = []
# Offset from the first parameter in the the self.params_in_partition
# the parameter boundaries may not align with partition boundaries
# so we need to keep track of the offset
self.first_offset = []
# number of elements per partition in each group
self.partition_size = []
# align nccl all-gather send buffers to 4-byte boundary
self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2
assert (
allgather_bucket_size % self.nccl_start_alignment_factor == 0
), f"allgather_bucket_size must be a multiple of nccl_start_alignment_factor, {self.nccl_start_alignment_factor} "
self.all_reduce_print = False
self.dtype = self.optimizer.param_groups[0]['params'][0].dtype
self.round_robin_bit16_groups = []
self.round_robin_bit16_indices = []
# Use different parallel to do all_to_all_reduce related things
# padding on each partition for alignment purposes
self.groups_padding = []
# loop to deal with groups
for i, param_group in enumerate(self.optimizer.param_groups):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
# push this group to list before modify
# TODO: Explore simplification that avoids the extra book-keeping by pushing the reordered group
trainable_parameters = [param for param in param_group['params'] if param.requires_grad]
self.bit16_groups.append(trainable_parameters)
# not sure why apex was cloning the weights before flattening
# removing cloning here
see_memory_usage(f"Before moving param group {i} to CPU")
# move all the parameters to cpu to free up GPU space for creating flat buffer
move_to_cpu(self.bit16_groups[i])
empty_cache()
see_memory_usage(f"After moving param group {i} to CPU", force=False)
# Reorder group parameters for load balancing of gradient partitioning during backward among ranks.
# This ensures that gradients are reduced in a fashion such that ownership round robins among the ranks.
# For example, rather than 3 gradients (g_n+2, g_n+1, g_n) that are reduced consecutively belonging
# to the same rank, instead they will belong to 3 ranks (r_m+2, r_m+1, r_m).
if self.round_robin_gradients:
round_robin_tensors, round_robin_indices = self._round_robin_reorder(
self.bit16_groups[i], dist.get_world_size(group=self.real_dp_process_group[i]))
else:
round_robin_tensors = self.bit16_groups[i]
round_robin_indices = list(range(len(self.bit16_groups[i])))
self.round_robin_bit16_groups.append(round_robin_tensors)
self.round_robin_bit16_indices.append(round_robin_indices)
# create flat buffer in CPU and move to GPU
self.bit16_groups_flat.append(
self.flatten_dense_tensors_aligned(
self.round_robin_bit16_groups[i],
self.nccl_start_alignment_factor * dist.get_world_size(group=self.real_dp_process_group[i])).to(
get_accelerator().current_device_name()))
see_memory_usage(f"After flattening and moving param group {i} to GPU", force=False)
# Record padding required for alignment
if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1:
padding = self.bit16_groups_flat[i].numel() - sum(
[t.numel() for t in self.round_robin_bit16_groups[i]])
else:
padding = 0
self.groups_padding.append(padding)
if dist.get_rank(group=self.real_dp_process_group[i]) == 0:
see_memory_usage(f"After Flattening and after emptying param group {i} cache", force=False)
# set model bit16 weight to slices of flattened buffer
self._update_model_bit16_weights(i)
# divide the flat weights into near equal partition equal to the data parallel degree
# each process will compute on a different part of the partition
data_parallel_partitions = self.get_data_parallel_partitions(self.bit16_groups_flat[i], i)
self.parallel_partitioned_bit16_groups.append(data_parallel_partitions)
# verify that data partition start locations are 4-byte aligned
for partitioned_data in data_parallel_partitions:
assert (partitioned_data.data_ptr() % (2 * self.nccl_start_alignment_factor) == 0)
# A partition of the fp32 master weights that will be updated by this process.
# Note that the params in single_partition_of_fp32_groups is cloned and detached
# from the origin params of the model.
if not fp16_master_weights_and_gradients:
self.single_partition_of_fp32_groups.append(self.parallel_partitioned_bit16_groups[i][partition_id].to(
self.device).clone().float().detach())
else:
self.single_partition_of_fp32_groups.append(self.parallel_partitioned_bit16_groups[i][partition_id].to(
self.device).clone().half().detach())
# Set local optimizer to have flat params of its own partition.
# After this, the local optimizer will only contain its own partition of params.
# In that case, the local optimizer only saves the states(momentum, variance, etc.) related to its partition's params(zero stage1).
self.single_partition_of_fp32_groups[
i].requires_grad = True # keep this in case internal optimizer uses it
param_group['params'] = [self.single_partition_of_fp32_groups[i]]
partition_size = len(self.bit16_groups_flat[i]) / dist.get_world_size(group=self.real_dp_process_group[i])
params_in_partition, params_not_in_partition, first_offset = self.get_partition_info(
self.round_robin_bit16_groups[i], partition_size, partition_id)
self.partition_size.append(partition_size)
self.params_in_partition.append(params_in_partition)
self.params_not_in_partition.append(params_not_in_partition)
self.first_offset.append(first_offset)
for rank in range(dist.get_world_size()):
if dist.get_rank() == rank:
print(
f"Rank: {rank} partition count {self.partition_count} and sizes{[(p.numel(), self.is_moe_param_group[i] if hasattr(self, 'is_moe_param_group') else False) for i,p in enumerate(self.single_partition_of_fp32_groups)]} "
)
dist.barrier()
self.reduce_bucket_size = int(reduce_bucket_size)
self.allgather_bucket_size = int(allgather_bucket_size)
self.reduction_event = get_accelerator().Event(enable_timing=False, blocking=False)
self.reduction_stream = get_accelerator().Stream()
self.cpu_computation_stream = get_accelerator().Stream()
self.copy_grad_stream = get_accelerator().Stream()
self.callback_queued = False
self.param_dict = {}
# map between param_id and bool to specify if a param is in this partition
self.is_param_in_current_partition = {}
self.grads_in_ipg_bucket = []
self.params_in_ipg_bucket = []
self.elements_in_ipg_bucket = 0
self.params_already_reduced = []
self._release_ipg_buffers()
self.previous_reduced_grads = None
self.ipg_bucket_has_moe_params = False
# simplified param id
self.param_id = {}
#interesting code: unique ids being assigned to individual parameters
largest_param_numel = 0
count = 0
for i, params_group in enumerate(self.bit16_groups):
for param in params_group:
unique_id = id(param)
self.param_id[unique_id] = count
self.param_dict[count] = param
self.params_already_reduced.append(False)
if param.numel() > largest_param_numel:
largest_param_numel = param.numel()
count = count + 1
for param_group in self.params_in_partition:
for param in param_group:
self.is_param_in_current_partition[self.get_param_id(param)] = True
for param_group in self.params_not_in_partition:
for param in param_group:
self.is_param_in_current_partition[self.get_param_id(param)] = False
if self.cpu_offload:
self.accumulated_grads_in_cpu = {}
self.norm_for_param_grads = {}
self.local_overflow = False
self.grad_position = {}
self.temp_grad_buffer_for_cpu_offload = get_accelerator().pin_memory(
torch.zeros(largest_param_numel, device=self.device, dtype=self.dtype))
self.temp_grad_buffer_for_gpu_offload = torch.zeros(largest_param_numel,
device=get_accelerator().current_device_name(),
dtype=self.dtype)
for i, params_group in enumerate(self.bit16_groups):
self.get_grad_position(i, self.params_in_partition[i], self.first_offset[i], self.partition_size[i])
# mapping from parameter to partition that it belongs to
self.param_to_partition_ids = {}
# stores if a partition has been reduced in this step
self.is_partition_reduced = {}
# number of grads in partition that still need to be computed
self.remaining_grads_in_partition = {}
# total number of grads in partition
self.total_grads_in_partition = {}
# stores if a grad in a partition has been computed or not
self.is_grad_computed = {}
# stores the offset at which a parameter gradient needs to be inserted in a partition
self.grad_partition_insertion_offset = {}
# the offset in the gradient at which it must be inserted at the beginning of the partition
self.grad_start_offset = {}
# will store the averaged gradients required by this partition
self.averaged_gradients = {}
# For cpu_offload, will store the averaged gradients required by this partition
self.offload_gradient_dict = {}
# store index of first parameter in each partition
self.first_param_index_in_partition = {}
# initializes all data structures for implementing gradient partitioning
self.initialize_gradient_partitioning_data_structures()
# resets the data structure value for the next backward propagation
self.reset_partition_gradient_structures()
# creates backward hooks for gradient partitioning
if self.partition_gradients or self.overlap_comm:
self.create_reduce_and_remove_grad_hooks()
self.custom_loss_scaler = False
self.external_loss_scale = None
# we may have a way of fusing dynamic scale. Do not support for now
self.loss_scaler = CreateLossScaler(dtype=self.dtype,
static_loss_scale=static_loss_scale,
dynamic_scaling=dynamic_loss_scale,
dynamic_loss_args=dynamic_loss_args)
self.dynamic_loss_scale = self.loss_scaler.dynamic
see_memory_usage("Before initializing optimizer states", force=True)
self.initialize_optimizer_states()
see_memory_usage("After initializing optimizer states", force=True)
if dist.get_rank() == 0:
logger.info(f"optimizer state initialized")
if dist.get_rank(group=self.dp_process_group) == 0:
see_memory_usage(f"After initializing ZeRO optimizer", force=True)
self._link_all_hp_params()
self._enable_universal_checkpoint()
self._param_slice_mappings = self._create_param_mapping()
def _enable_universal_checkpoint(self):
for lp_param_group in self.bit16_groups:
enable_universal_checkpoint(param_list=lp_param_group)
def _create_param_mapping(self):
param_mapping = []
for i, _ in enumerate(self.optimizer.param_groups):
param_mapping_per_group = OrderedDict()
for lp in self.bit16_groups[i]:
if lp._hp_mapping is not None:
lp_name = self.param_names[lp]
param_mapping_per_group[lp_name] = lp._hp_mapping.get_hp_fragment_address()
param_mapping.append(param_mapping_per_group)
return param_mapping
def _link_all_hp_params(self):
dp_world_size = dist.get_world_size(group=self.dp_process_group)
if self.cpu_offload:
self._get_offload_gradient_dict()
for i, _ in enumerate(self.optimizer.param_groups):
# Link bit16 and fp32 params in partition
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
partition_size = self.bit16_groups_flat[i].numel() // dp_world_size
flat_hp_partition = self.single_partition_of_fp32_groups[i]
link_hp_params(lp_param_list=self.bit16_groups[i],
flat_hp_partition=flat_hp_partition,
gradient_dict=self.averaged_gradients,
offload_gradient_dict=self.offload_gradient_dict,
use_offload=self.cpu_offload,
param_group_index=i,
partition_start=partition_id * partition_size,
partition_size=partition_size,
partition_optimizer_state=self.optimizer.state[flat_hp_partition],
dp_group=self.real_dp_process_group[i])
def is_moe_group(self, group):
return 'moe' in group and group['moe']
def _configure_moe_settings(self):
# if we're using ZeRO stage 2, ensure contiguous gradients are used
if self.partition_gradients:
assert self.contiguous_gradients, "Contiguous Gradients in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE"
# NOTE: To run ZeRO stage 1 with MoE, we need to set self.contiguous_gradients to True or ignore the assertion
if not self.partition_gradients and not self.contiguous_gradients:
logger.warn(
"ZeRO Stage 1 has not been thoroughly tested with MoE. This configuration is still experimental.")
assert self.reduce_scatter, "Reduce Scatter in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE"
assert any(
[self.is_moe_group(group) for group in self.optimizer.param_groups]
), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer"
self.is_moe_param_group = []
for i, group in enumerate(self.optimizer.param_groups):
if self.is_moe_group(group):
assert all([is_moe_param(param)
for param in group['params']]), "All params in MoE group must be MoE params"
self.real_dp_process_group[i] = self.expert_dp_process_group[group['name']]
self.partition_count[i] = dist.get_world_size(group=self.expert_dp_process_group[group['name']])
self.is_moe_param_group.append(True)
else:
self.is_moe_param_group.append(False)
assert self.expert_dp_process_group is not None, "Expert data parallel group should be configured with MoE"
assert self.ep_process_group is not None, "Expert parallel group should be configured with MoE"
def _update_model_bit16_weights(self, group_index):
updated_params = self.unflatten(self.bit16_groups_flat[group_index],
self.round_robin_bit16_groups[group_index])
for p, q in zip(self.round_robin_bit16_groups[group_index], updated_params):
p.data = q.data
# set model fp16 weight to slices of reordered flattened buffer
for param_index, param in enumerate(self.bit16_groups[group_index]):
new_index = self.round_robin_bit16_indices[group_index][param_index]
param.data = self.round_robin_bit16_groups[group_index][new_index].data
def _round_robin_reorder(self, tensor_list, num_partitions):
# disable round robin if need to debug something
# return tensor_list, list(range(len(tensor_list)))
partition_tensors = {}
for i, tensor in enumerate(tensor_list):
j = i % num_partitions
if not j in partition_tensors:
partition_tensors[j] = []
partition_tensors[j].append((i, tensor))
reordered_tensors = []
reordered_indices = {}
for partition_index in partition_tensors.keys():
for i, (original_index, tensor) in enumerate(partition_tensors[partition_index]):
reordered_indices[original_index] = len(reordered_tensors)
reordered_tensors.append(tensor)
return reordered_tensors, reordered_indices
def _release_ipg_buffers(self):
if self.contiguous_gradients:
self.ipg_buffer = None
self.grads_in_partition = None
self.grads_in_partition_offset = 0
def initialize_optimizer_states(self):
for i, group in enumerate(self.bit16_groups):
single_grad_partition = torch.zeros(int(self.partition_size[i]),
dtype=self.single_partition_of_fp32_groups[i].dtype,
device=self.device)
self.single_partition_of_fp32_groups[i].grad = get_accelerator().pin_memory(
single_grad_partition) if self.cpu_offload else single_grad_partition
# Initialize the optimizer states with the flattended fp32 partition.
# State initialization for the Adagrad optimizer occurs at construction as opposed to other optimizers
# which do lazy initialization of the state at the first call to step.
if isinstance(self.optimizer, torch.optim.Adagrad):
self.optimizer = torch.optim.Adagrad(self.single_partition_of_fp32_groups, **self.optimizer.defaults)
else:
self.optimizer.step()
if not self.cpu_offload:
for group in self.single_partition_of_fp32_groups:
group.grad = None #class init
return
#########################################################################
#################### ZeRO Stage 1 - reduce gradients ####################
#########################################################################
def reduce_gradients(self, pipeline_parallel=False):
world_size = dist.get_world_size(self.dp_process_group)
my_rank = dist.get_rank(self.dp_process_group)
# with PP we must create ipg buffer, since backward is handled outside zero
if pipeline_parallel and self.contiguous_gradients:
self.ipg_buffer = []
buf_0 = torch.empty(int(self.reduce_bucket_size),
dtype=self.dtype,
device=get_accelerator().current_device_name())
self.ipg_buffer.append(buf_0)
self.ipg_index = 0
if not self.overlap_comm:
for i, group in enumerate(self.bit16_groups):
for param in group:
if param.grad is not None:
self.reduce_ready_partitions_and_remove_grads(param, i)
# reduce any pending grads in either hook/non-hook case
self.overlapping_partition_gradients_reduce_epilogue()
#########################################################################
#########################ZeRO Partition Gradients########################
#########################################################################
def get_first_param_index(self, group_id, param_group, partition_id):
for index, param in enumerate(param_group):
param_id = self.get_param_id(param)
if partition_id in self.param_to_partition_ids[group_id][param_id]:
return index
return None
def initialize_gradient_partitioning_data_structures(self):
for i, param_group in enumerate(self.round_robin_bit16_groups):
total_partitions = dist.get_world_size(group=self.real_dp_process_group[i])
self.param_to_partition_ids[i] = {}
self.is_partition_reduced[i] = {}
self.total_grads_in_partition[i] = {}
self.remaining_grads_in_partition[i] = {}
self.is_grad_computed[i] = {}
self.grad_partition_insertion_offset[i] = {}
self.grad_start_offset[i] = {}
self.first_param_index_in_partition[i] = {}
for partition_id in range(total_partitions):
self.is_grad_computed[i][partition_id] = {}
self.grad_partition_insertion_offset[i][partition_id] = {}
self.grad_start_offset[i][partition_id] = {}
self.total_grads_in_partition[i][partition_id] = 0
self.initialize_gradient_partition(i, param_group, partition_id)
self.is_partition_reduced[i][partition_id] = False
self.first_param_index_in_partition[i][partition_id] = self.get_first_param_index(
i, param_group, partition_id)
def independent_gradient_partition_epilogue(self):
self.report_ipg_memory_usage(f"In ipg_epilogue before reduce_ipg_grads", 0)
self.reduce_ipg_grads()
self.report_ipg_memory_usage(f"In ipg_epilogue after reduce_ipg_grads", 0)
# if dist.get_rank() == 0:
# logger.info("Params already reduced %s", self.params_already_reduced)
for i in range(len(self.params_already_reduced)):
self.params_already_reduced[i] = False
if self.overlap_comm:
get_accelerator().synchronize()
# It is safe to clear previously reduced grads of other partitions
self._clear_previous_reduced_grads()
if self.cpu_offload is False:
for i, _ in enumerate(self.bit16_groups):
if not i in self.averaged_gradients or self.averaged_gradients[i] is None:
self.averaged_gradients[i] = self.get_flat_partition(
self.params_in_partition[i],
self.first_offset[i],
self.partition_size[i],
dtype=self.dtype,
device=get_accelerator().current_device_name(),
return_tensor_list=True)
else:
avg_new = self.get_flat_partition(self.params_in_partition[i],
self.first_offset[i],
self.partition_size[i],
dtype=self.dtype,
device=get_accelerator().current_device_name(),
return_tensor_list=True)
for accumulated_grad, new_avg_grad in zip(self.averaged_gradients[i], avg_new):
accumulated_grad.add_(new_avg_grad)
self._release_ipg_buffers()
# No need to keep the gradients anymore.
# All gradients required by the step
# are in self.averaged_gradients
self.zero_grad(set_to_none=True)
see_memory_usage(f"End ipg_epilogue")
# resets all partition to no reduced
# sets remaining grads to the total number of grads in each partition
# set is grad computed to false for all grads in partition
def reset_partition_gradient_structures(self):
for i, _ in enumerate(self.bit16_groups):
total_partitions = dist.get_world_size(group=self.real_dp_process_group[i])
for partition_id in range(total_partitions):
self.is_partition_reduced[i][partition_id] = False
self.remaining_grads_in_partition[i][partition_id] = self.total_grads_in_partition[i][partition_id]
for param_id in self.is_grad_computed[i][partition_id]:
self.is_grad_computed[i][partition_id][param_id] = False
def initialize_gradient_partition(self, i, param_group, partition_id):
def set_key_value_list(dictionary, key, value):
if key in dictionary:
dictionary[key].append(value)
else:
dictionary[key] = [value]
def increment_value(dictionary, key):
if key in dictionary:
dictionary[key] += 1
else:
dictionary[key] = 1
partition_size = self.partition_size[i]
start_index = partition_size * partition_id
end_index = partition_size * (partition_id + 1)
current_index = 0
first_offset = 0
# debugging
# self.print_rank_0(f"### this partition id: {partition_id} with start index: {start_index} and end index: {end_index}")
for param in param_group:
param_size = param.numel()
param_id = self.get_param_id(param)
# debugging
# self.print_rank_0(f"### this param id: {param_id} with param size: {param_size} and current index is: {current_index}")
if (current_index >= start_index and current_index < end_index):
set_key_value_list(self.param_to_partition_ids[i], param_id, partition_id)
increment_value(self.total_grads_in_partition[i], partition_id)
self.is_grad_computed[i][partition_id][param_id] = False
self.grad_partition_insertion_offset[i][partition_id][param_id] = current_index - start_index
self.grad_start_offset[i][partition_id][param_id] = 0
# cross partitions
elif start_index > current_index and start_index < (current_index + param_size):
assert (first_offset == 0
), "This can happen either zero or only once as this must be the first tensor in the partition"
first_offset = start_index - current_index
set_key_value_list(self.param_to_partition_ids[i], param_id, partition_id)
increment_value(self.total_grads_in_partition[i], partition_id)
self.is_grad_computed[i][partition_id][param_id] = False
self.grad_partition_insertion_offset[i][partition_id][param_id] = 0
self.grad_start_offset[i][partition_id][param_id] = first_offset
current_index = current_index + param_size
def overlapping_partition_gradients_reduce_epilogue(self):
self.independent_gradient_partition_epilogue()
def create_reduce_and_remove_grad_hooks(self):
self.grad_accs = []
for i, param_group in enumerate(self.bit16_groups):
for param in param_group:
if param.requires_grad:
def wrapper(param, i):
param_tmp = param.expand_as(param)
grad_acc = param_tmp.grad_fn.next_functions[0][0]
def reduce_partition_and_remove_grads(*notneeded):
self.reduce_ready_partitions_and_remove_grads(param, i)
grad_acc.register_hook(reduce_partition_and_remove_grads)
self.grad_accs.append(grad_acc)
wrapper(param, i)
def get_param_id(self, param):
unique_id = id(param)
return self.param_id[unique_id]
def report_ipg_memory_usage(self, tag, param_elems):
elem_count = self.elements_in_ipg_bucket + param_elems
percent_of_bucket_size = (100.0 * elem_count) // self.reduce_bucket_size
see_memory_usage(
f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}"
)
# create a flat tensor aligned at the alignment boundary
def flatten_dense_tensors_aligned(self, tensor_list, alignment):
return self.flatten(align_dense_tensors(tensor_list, alignment))
############### Independent Partition Gradient ########################
def reduce_independent_p_g_buckets_and_remove_grads(self, param, i):
# debugging
self.print_rank_0(f"### elements in ipg bucket: {self.elements_in_ipg_bucket:>8} and elements of this param: {param.numel():>8} with id: {self.get_param_id(param)}")
if self.elements_in_ipg_bucket + param.numel() > self.reduce_bucket_size:
self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads", param.numel())
# debugging
self.print_rank_0(f"=== calling reduce ipg grads...")
self.reduce_ipg_grads()
if self.contiguous_gradients and self.overlap_comm:
# Swap ipg_index between 0 and 1
self.ipg_index = 1 - self.ipg_index
self.report_ipg_memory_usage("In ipg_remove_grads after reduce_ipg_grads", param.numel())
param_id = self.get_param_id(param)
assert self.params_already_reduced[param_id] == False, \
f"The parameter {param_id} has already been reduced. \
Gradient computed twice for this partition. \
Multiple gradient reduction is currently not supported"
if self.contiguous_gradients:
if param.numel() > self.reduce_bucket_size:
self.extra_large_param_to_reduce = param
else:
# keeping the gradients contiguous to prevent memory fragmentation, and avoid flattening
new_grad_tensor = self.ipg_buffer[self.ipg_index].narrow(0, self.elements_in_ipg_bucket, param.numel())
new_grad_tensor.copy_(param.grad.view(-1))
param.grad.data = new_grad_tensor.data.view_as(param.grad)
self.elements_in_ipg_bucket += param.numel()
assert param.grad is not None, f"rank {dist.get_rank()} - Invalid to reduce Param {param_id} with None gradient"
self.grads_in_ipg_bucket.append(param.grad)
self.params_in_ipg_bucket.append((i, param, param_id))
#make sure the average tensor function knows how to average the gradients
if is_moe_param(param):
self.ipg_bucket_has_moe_params = True
self.report_ipg_memory_usage("End ipg_remove_grads", 0)
def print_rank_0(self, message):
if dist.get_rank() == 0:
logger.info(message)
def gradient_reduction_w_predivide(self, tensor):
dp_world_size = dist.get_world_size(group=self.dp_process_group)
tensor_to_allreduce = tensor
if self.communication_data_type != tensor.dtype:
tensor_to_allreduce = tensor.to(self.communication_data_type)
if self.postscale_gradients:
if self.gradient_predivide_factor != 1.0:
tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor)
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
if self.gradient_predivide_factor != dp_world_size:
tensor_to_allreduce.mul_(self.gradient_predivide_factor / dp_world_size)
else:
tensor_to_allreduce.div_(dp_world_size)
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
if self.communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce:
tensor.copy_(tensor_to_allreduce)
return tensor
def average_tensor(self, tensor):
if self.overlap_comm:
stream = self.reduction_stream
stream.wait_stream(get_accelerator().current_stream())
else:
stream = get_accelerator().current_stream()
with get_accelerator().stream(stream):
if not self.reduce_scatter:
self.gradient_reduction_w_predivide(tensor)
return
# Accumulate destination ranks and bucket offsets for each gradient slice.
# Note: potential future optimization, record access pattern of parameters
# in backward pass and partition gradients w.r.t. access pattern so that our
# bucket is guaranteed to be contiguous w.r.t. ranks
rank_and_offsets = []
real_dp_process_group = []
curr_size = 0
prev_id = -1
process_group = self.dp_process_group
# count = 0
for i, param, param_id in self.params_in_ipg_bucket:
process_group = self.dp_process_group
#Averages gradients at parameter level if ipg has a moe param
#Otherwise averaging is done at the entire buffer level at the end of the loop
# MoE param have different groups
if self.ipg_bucket_has_moe_params:
process_group = self.expert_dp_process_group[param.group_name] if is_moe_param(
param) else self.dp_process_group
param.grad.data.div_(dist.get_world_size(group=process_group))
partition_ids = self.param_to_partition_ids[i][param_id]
assert all([p_id < dist.get_world_size(group=process_group) for p_id in partition_ids
]), f"world size {dist.get_world_size(group=process_group)} and p_ids: {partition_ids}"
partition_size = self.partition_size[i]
# Get all partition ids + their offsets
partition_ids_w_offsets = []
for partition_id in partition_ids:
offset = self.grad_start_offset[i][partition_id][param_id]
partition_ids_w_offsets.append((partition_id, offset))
partition_ids_w_offsets.sort(key=lambda t: t[1])
# Calculate rank and offsets for grad slices
for idx in range(len(partition_ids_w_offsets)):
partition_id, offset = partition_ids_w_offsets[idx]
# if dist.get_rank() == 0 and count < 100:
# print(f"Rank {dist.get_rank()} rank offset id {idx} calculated dp size {dist.get_world_size(group=process_group)} real dp size {dist.get_world_size(self.real_dp_process_group[i])} and dst: {partition_id}")
# count += 1
# Calculate numel for grad slice depending on partition location
if idx == len(partition_ids_w_offsets) - 1:
# Last partition_id uses its own offset
numel = param.numel() - offset
else:
# Set numel to next partition's offset
numel = partition_ids_w_offsets[idx + 1][1] - offset
# Merge bucket ranges if they belong to the same rank
if partition_id == prev_id:
prev_pid, prev_size, prev_numel = rank_and_offsets[-1]
rank_and_offsets[-1] = (prev_pid, prev_size, prev_numel + numel)
else:
rank_and_offsets.append((partition_id, curr_size, numel))
real_dp_process_group.append(process_group)
curr_size += numel
prev_id = partition_id
# debugging
self.print_rank_0(f">>> param id: {param_id} partition_ids: {partition_ids} partition_ids_w_offsets: {partition_ids_w_offsets} rank_and_offsets: {rank_and_offsets}")
if not self.ipg_bucket_has_moe_params:
tensor.div_(dist.get_world_size(group=self.dp_process_group))
tensor_to_reduce = tensor
if self.communication_data_type != tensor.dtype:
tensor_to_reduce = tensor.to(self.communication_data_type)
async_handles = []
for i, (dst, bucket_offset, numel) in enumerate(rank_and_offsets):
grad_slice = tensor_to_reduce.narrow(0, int(bucket_offset), int(numel))
# if dist.get_rank() == 0:
# print(f"Rank {dist.get_rank()} rank offset id {i} real dp size {dist.get_world_size(group=real_dp_process_group[i])} and dst: {dst}")
# dist.barrier()
#dist.barrier()
dst_rank = dist.get_global_rank(real_dp_process_group[i], dst)
async_handle = dist.reduce(grad_slice, dst=dst_rank, group=real_dp_process_group[i], async_op=True)
async_handles.append(async_handle)
for handle in async_handles:
handle.wait()
if self.communication_data_type != tensor.dtype:
tensor.copy_(tensor_to_reduce)
##############################################################################
############################# CPU Offload Methods#############################
##############################################################################
def get_grad_position(self, group_id, tensor_list, first_offset, partition_size):
current_offset = 0
for i, tensor in enumerate(tensor_list):
param_id = self.get_param_id(tensor)
param_start_offset = 0
num_elements = tensor.numel()
# we need to offset to get to the right element
if i == 0 and first_offset > 0:
tensor_offset = first_offset
num_elements = num_elements - tensor_offset
param_start_offset = first_offset
# we dont need all elements of the tensor
if num_elements > (partition_size - current_offset):
num_elements = partition_size - current_offset
self.grad_position[param_id] = [
int(group_id), int(param_start_offset),
int(current_offset), int(num_elements)
]
current_offset += num_elements
def update_overflow_tracker_for_param_grad(self, param):
if param.grad is not None and self._has_inf_or_nan(param.grad.data):
self.local_overflow = True
def _get_offload_gradient_dict(self):
for param_group_index, _ in enumerate(self.optimizer.param_groups):
self.offload_gradient_dict[param_group_index] = []
for lp_param in self.params_in_partition[param_group_index]:
param_id = self.get_param_id(lp_param)
[_, _, dest_offset, num_elements] = self.grad_position[param_id]
dest_tensor = self.single_partition_of_fp32_groups[param_group_index].grad.view(-1).narrow(
0, dest_offset, num_elements)
self.offload_gradient_dict[param_group_index].append(dest_tensor)
def async_accumulate_grad_in_cpu_via_gpu(self, param):
param_id = self.get_param_id(param)
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
# copy to a preexisiting buffer to avoid memory allocation penalty
dest_buffer = self.temp_grad_buffer_for_gpu_offload.view(-1).narrow(0, 0, param.numel())
#buffer for storing gradients for this parameter in CPU
def buffer_to_accumulate_to_in_cpu():
if not self.fp16_master_weights_and_gradients:
return get_accelerator().pin_memory(torch.zeros(param.numel(), dtype=param.dtype, device=self.device))
else:
return self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(0, dest_offset, num_elements)
#accumulate gradients into param.grad or parts of it that belongs to this partition
def accumulate_gradients():
if not self.fp16_master_weights_and_gradients:
dest_buffer.copy_(self.accumulated_grads_in_cpu[param_id].view(-1), non_blocking=True)
param.grad.data.view(-1).add_(dest_buffer)
else:
dest_buffer.narrow(0, source_offset,
num_elements).copy_(self.accumulated_grads_in_cpu[param_id].view(-1),
non_blocking=True)
param.grad.data.view(-1).narrow(0, source_offset,
num_elements).add_(dest_buffer.narrow(0, source_offset, num_elements))
#move accumulated gradients back to CPU
def copy_gradients_to_cpu():
if not self.fp16_master_weights_and_gradients:
self.accumulated_grads_in_cpu[param_id].data.copy_(param.grad.data.view(-1), non_blocking=True)
else:
self.accumulated_grads_in_cpu[param_id].data.copy_(param.grad.data.view(-1).narrow(
0, source_offset, num_elements),
non_blocking=True)
if param_id not in self.accumulated_grads_in_cpu:
self.accumulated_grads_in_cpu[param_id] = buffer_to_accumulate_to_in_cpu()
if self.micro_step_id > 0:
accumulate_gradients()
# at the boundary we will send 32bit directly
if not self.is_gradient_accumulation_boundary:
copy_gradients_to_cpu()
def set_norm_for_param_grad(self, param):
param_id = self.get_param_id(param)
accumulated_grad = self.accumulated_grads_in_cpu[
param_id] if self.gradient_accumulation_steps > 1 else param.grad
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
start = source_offset
accumulated_grad = accumulated_grad.view(-1).narrow(0, start, num_elements)
self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(2)
def set_norm_for_param_grad_in_gpu(self, param):
param_id = self.get_param_id(param)
accumulated_grad = param.grad
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
start = source_offset
accumulated_grad = accumulated_grad.view(-1).narrow(0, start, num_elements)
self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(2)
def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param):
param_id = self.get_param_id(param)
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
dest_tensor = self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(0, dest_offset, num_elements)
src_tensor = param.grad.view(-1).narrow(0, source_offset, num_elements)
if not self.fp16_master_weights_and_gradients:
src_tensor = src_tensor.float()
dest_tensor.copy_(src_tensor, non_blocking=True)
param.grad = None #offload only
def complete_grad_norm_calculation_for_cpu_offload(self, params):
total_norm = 0.0
norm_type = 2.0
for p in params:
# Pipeline parallelism may replicate parameters. Avoid multi-counting.
if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
continue
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
param_id = self.get_param_id(p)
# as some model have trainable parameters but skipped in training,
# their backward hooks in self.create_reduce_and_remove_grad_hooks() will not run,
# so they have no norm_for_param_grads
if param_id in self.norm_for_param_grads:
param_norm = self.norm_for_param_grads[param_id]
total_norm += param_norm.item()**2
else:
# As unused parameters in modules may not be expected sometimes,
# add an explicit error msg when it occurred and an option to
# avoid the error
assert self.ignore_unused_parameters, """
This assert indicates that your module has parameters that
were not used in producing loss.
You can avoid this assert by
(1) enable ignore_unused_parameters option in zero_optimization config;
(2) making sure all trainable parameters and `forward` function
outputs participate in calculating loss.
"""
# Sum across all model parallel GPUs.
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM)
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
############################################################################################
def copy_grads_in_partition(self, param):
if self.cpu_offload:
if self.gradient_accumulation_steps > 1:
self.async_accumulate_grad_in_cpu_via_gpu(param)
if self.is_gradient_accumulation_boundary:
self.set_norm_for_param_grad_in_gpu(param)
self.update_overflow_tracker_for_param_grad(param)
self.async_inplace_copy_grad_to_fp32_buffer_from_gpu(param)
return
#print(f"ID {self.get_param_id(param)} grad norm {param.grad.norm()}")
if self.grads_in_partition is None:
self.grads_in_partition_offset = 0
total_size = 0
for group in self.params_in_partition:
for param_in_partition in group:
total_size += param_in_partition.numel()
see_memory_usage(f"before copying {total_size} gradients into partition")
self.grads_in_partition = torch.empty(int(total_size),
dtype=self.dtype,
device=get_accelerator().current_device_name())
see_memory_usage(f"after copying {total_size} gradients into partition")
# The allreduce buffer will be rewritten. Copy the gradients in partition to a new buffer
new_grad_tensor = self.grads_in_partition.view(-1).narrow(0, self.grads_in_partition_offset, param.numel())
new_grad_tensor.copy_(param.grad.view(-1))
param.grad.data = new_grad_tensor.data.view_as(param.grad)
#print(f"Grad norm after copy to contiguous_buffer {param.grad.data.norm()}")
self.grads_in_partition_offset += param.numel()
def reduce_ipg_grads(self):
if self.contiguous_gradients:
if self.extra_large_param_to_reduce is not None:
assert len(self.params_in_ipg_bucket) == 1, "more than 1 param in ipg bucket, this shouldn't happen"
_, _, param_id = self.params_in_ipg_bucket[0]
assert self.get_param_id(self.extra_large_param_to_reduce
) == param_id, "param in ipg bucket does not match extra-large param"
self.average_tensor(self.extra_large_param_to_reduce.grad.view(-1))
self.extra_large_param_to_reduce = None
else:
self.average_tensor(self.ipg_buffer[self.ipg_index])
else:
self.buffered_reduce_fallback(None,
self.grads_in_ipg_bucket,
elements_per_buffer=self.elements_in_ipg_bucket)
if self.overlap_comm:
stream = self.reduction_stream
elif self.cpu_offload:
# TODO: copy_grad_stream is disabled because of race with reduce. This hurts perf and should be fixed.
# get_accelerator().synchronize()
# stream = self.copy_grad_stream
stream = get_accelerator().current_stream()
else:
stream = get_accelerator().current_stream()
with get_accelerator().stream(stream):
for _, param, param_id in self.params_in_ipg_bucket:
assert self.params_already_reduced[param_id] == False, \
f"The parameter {param_id} has already been reduced. \
Gradient computed twice for this partition. \
Multiple gradient reduction is currently not supported"
self.params_already_reduced[param_id] = True
if self.partition_gradients:
if not self.is_param_in_current_partition[param_id]:
if self.overlap_comm and self.contiguous_gradients is False:
# Clear grads of other partitions during the next reduction
# to avoid clearing them before the reduction is complete.
if self.previous_reduced_grads is None:
self.previous_reduced_grads = []
self.previous_reduced_grads.append(param)
else:
param.grad = None #only if self.partition_gradients
elif self.contiguous_gradients:
self.copy_grads_in_partition(param)
else: # zero stage 1 - partition only optimizer state
if self.contiguous_gradients and self.is_param_in_current_partition[param_id]:
self.copy_grads_in_partition(param)
self.grads_in_ipg_bucket = []
self.params_in_ipg_bucket = []
self.ipg_bucket_has_moe_params = False
self.elements_in_ipg_bucket = 0
#####################################################################
def reduce_ready_partitions_and_remove_grads(self, param, i):
if self.partition_gradients or self.is_gradient_accumulation_boundary:
self.reduce_independent_p_g_buckets_and_remove_grads(param, i)
def zero_reduced_gradients(self, partition_id, i):
def are_all_related_partitions_reduced(params_id):
for partition_id in self.param_to_partition_ids[i][params_id]:
if not self.is_partition_reduced[i][partition_id]:
return False
return True
for params_id in self.is_grad_computed[i][partition_id]:
if are_all_related_partitions_reduced(params_id):
self.param_dict[params_id].grad = None # dead code
def flatten_and_print(self, message, tensors, start=0, n=5):
flatten_tensor = self.flatten(tensors)
def print_func():
logger.info(flatten_tensor.contiguous().view(-1).narrow(0, start, n))
self.sequential_execution(print_func, message)
def get_grads_to_reduce(self, i, partition_id):
def get_reducible_portion(key):
grad = self.param_dict[key].grad
total_elements = grad.numel()
start = self.grad_start_offset[i][partition_id][key]
num_elements = min(total_elements - start,
self.partition_size[i] - self.grad_partition_insertion_offset[i][partition_id][key])
if not pg_correctness_test:
if num_elements == total_elements:
return grad
else:
return grad.contiguous().view(-1).narrow(0, int(start), int(num_elements))
else:
if num_elements == total_elements:
return grad.clone()
else:
return grad.clone().contiguous().view(-1).narrow(0, int(start), int(num_elements))
grads_to_reduce = []
for key in self.is_grad_computed[i][partition_id]:
grad = get_reducible_portion(key)
grads_to_reduce.append(grad)
return grads_to_reduce
def sequential_execution(self, function, message, group=None):
if group is None:
group = self.dp_process_group
if dist.get_rank(group=group) == 0:
logger.info(message)
for id in range(dist.get_world_size(group=group)):
if id == dist.get_rank(group=group):
function()
dist.barrier(group=group)
def set_none_gradients_to_zero(self, i, partition_id):
for param_id in self.is_grad_computed[i][partition_id]:
param = self.param_dict[param_id]
if param.grad is None:
param.grad = torch.zero_like(param)
######################Reduction Related Methods##############################
def allreduce_bucket(self, bucket, rank=None, log=None):
rank = None
tensor = self.flatten(bucket)
tensor_to_allreduce = tensor
if pg_correctness_test:
communication_data_type = torch.float32
else:
communication_data_type = self.communication_data_type
if communication_data_type != tensor.dtype:
tensor_to_allreduce = tensor.to(communication_data_type)
tensor_to_allreduce.div_(dist.get_world_size(group=self.dp_process_group))
if rank is None:
# "All Reducing"
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
else:
global_rank = dist.get_global_rank(self.dp_process_group, rank)
dist.reduce(tensor_to_allreduce, global_rank, group=self.dp_process_group)
if communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce:
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
tensor.copy_(tensor_to_allreduce)
return tensor
def _clear_previous_reduced_grads(self):
if self.previous_reduced_grads is not None:
for param in self.previous_reduced_grads:
param.grad = None # overlap enabled
self.previous_reduced_grads = None
# if rank is specified do a reduction instead of an allreduce
def allreduce_and_copy(self, small_bucket, rank=None, log=None):
if self.overlap_comm:
get_accelerator().synchronize()
# It is safe to clear the previously reduced grads of other partitions
self._clear_previous_reduced_grads()
stream = self.reduction_stream
else:
stream = get_accelerator().current_stream()
with get_accelerator().stream(stream):
allreduced = self.allreduce_bucket(small_bucket, rank=rank, log=log)
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self, bucket, numel_per_bucket=500000000, rank=None, log=None):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket, rank=rank, log=None)
small_bucket = []
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket, rank=rank, log=log)
# allows using reduction of gradients instead of using all_reduce
def buffered_reduce_fallback(self, rank, grads, elements_per_buffer=500000000, log=None):
split_buckets = split_half_float_double(grads)
for i, bucket in enumerate(split_buckets):
self.allreduce_no_retain(bucket, numel_per_bucket=elements_per_buffer, rank=rank, log=log)
#############################################################################
#############################################################################
#############################################################################
# views the tensor as multiple partitions and returns
# those partitions
def get_data_parallel_partitions(self, tensor, group_id):
partitions = []
dp = dist.get_world_size(group=self.real_dp_process_group[group_id])
# dp_id = dist.get_rank(group=self.real_dp_process_group[group_id])
total_num_elements = tensor.numel()
base_size = total_num_elements // dp
remaining = total_num_elements % dp
start = 0
for id in range(dp):
partition_size = base_size
if id < remaining:
partition_size = partition_size + 1
partitions.append(tensor.narrow(0, start, partition_size))
start = start + partition_size
return partitions
def get_partition_info(self, tensor_list, partition_size, partition_id):
params_in_partition = []
params_not_in_partition = []
start_index = partition_size * partition_id
end_index = partition_size * (partition_id + 1)
current_index = 0
first_offset = 0
for tensor in tensor_list:
tensor_size = tensor.numel()
if (current_index >= start_index and current_index < end_index):
params_in_partition.append(tensor)
elif start_index > current_index and start_index < (current_index + tensor_size):
params_in_partition.append(tensor)
assert (first_offset == 0
), "This can happen either zero or only once as this must be the first tensor in the partition"
first_offset = start_index - current_index
else:
params_not_in_partition.append(tensor)
current_index = current_index + tensor_size
return params_in_partition, params_not_in_partition, first_offset
def zero_grad(self, set_to_none=False):
"""
Zero FP16 parameter grads.
"""
# FP32 grad should never exist.
# For speed, set model fp16 grad to None by default
for group in self.bit16_groups:
for p in group:
if set_to_none:
p.grad = None # epilogue and in step
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def _model_parallel_all_reduce(self, tensor, op):
""" Perform all reduce within model parallel group, if any.
"""
if self.model_parallel_group is None or self.model_parallel_world_size == 1:
pass
else:
dist.all_reduce(tensor=tensor, op=op, group=self.model_parallel_group)
def get_grad_norm_direct(self, gradients, params, norm_type=2):
"""Clips gradient norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(g.data.abs().max() for g in gradients)
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=self.dp_process_group)
# Take max across all GPUs.
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX)
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0.0
# if dist.get_rank() == 0:
# logger.info(f"Total Norm beginning {total_norm}")
for g, p in zip(gradients, params):
# Pipeline parallelism may replicate parameters. Avoid multi-counting.
if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated:
continue
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
param_norm = g.data.double().norm(2)
total_norm += param_norm.item()**2
# Sum across all model parallel GPUs.
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM)
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
# creates a flat fused tensor from the tensor list starting at the first_offset
# in the first tensor of the list. If there are not enough elements in the tensor
# list then the flat tensor will be padded with zeros
def get_flat_partition(self, tensor_list, first_offset, partition_size, dtype, device, return_tensor_list=False):
flat_tensor_list = []
current_size = 0
for i, tensor in enumerate(tensor_list):
if tensor.grad is None:
tensor.grad = torch.zeros_like(tensor)
tensor = tensor.grad
num_elements = tensor.numel()
tensor_offset = 0
# we need to offset to get to the right element
if i == 0 and first_offset > 0:
tensor_offset = first_offset
num_elements = num_elements - tensor_offset
# we dont need all elements of the tensor
if num_elements > (partition_size - current_size):
num_elements = partition_size - current_size
# we need a narrow view of the tensor based on the tensor offset and number of elements that
# we need from this tensor
if tensor_offset > 0 or num_elements < tensor.numel():
flat_tensor_list.append(tensor.contiguous().view(-1).narrow(0, int(tensor_offset), int(num_elements)))
else:
flat_tensor_list.append(tensor)
current_size = current_size + num_elements
# this means its the last partition and does not align with the dp boundary. We need to pad before flattening
if current_size < partition_size:
flat_tensor_list.append(torch.zeros(int(partition_size - current_size), dtype=dtype, device=device))
if return_tensor_list:
return flat_tensor_list
return self.flatten(flat_tensor_list)
def free_grad_in_param_list(self, param_list):
for p in param_list:
p.grad = None # in step
def reset_cpu_buffers(self):
self.norm_for_param_grads = {}
self.local_overflow = False
def log_timers(self, timer_names):
if self.timers is None:
return
self.timers.log(names=list(timer_names))
def start_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).start()
def stop_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).stop()
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
def get_lr(self):
"""Return the current learning rate."""
return self.optimizer.param_groups[0]["lr"]
def override_loss_scale(self, loss_scale):
if loss_scale != self.external_loss_scale:
logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}')
self.custom_loss_scaler = True
self.external_loss_scale = loss_scale
def scaled_global_norm(self, norm_type=2):
assert norm_type == 2, "only L2 norm supported"
norm_groups = []
for i, group in enumerate(self.bit16_groups):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
if self.cpu_offload:
norm_groups.append(self.complete_grad_norm_calculation_for_cpu_offload(self.params_in_partition[i]))
single_grad_partition = self.single_partition_of_fp32_groups[i].grad
else:
norm_groups.append(self.get_grad_norm_direct(self.averaged_gradients[i], self.params_in_partition[i]))
if self.has_moe_layers:
self._average_expert_grad_norms(norm_groups)
# note that the get_global_norm function only supports l2 norm
return get_global_norm(norm_list=norm_groups)
def get_bit16_param_group(self, group_no):
bit16_partitions = self.parallel_partitioned_bit16_groups[group_no]
partition_id = dist.get_rank(group=self.real_dp_process_group[group_no])
return [bit16_partitions[dist.get_rank(group=self.real_dp_process_group[group_no])]]
def _optimizer_step(self, group_no):
original_param_groups = self.optimizer.param_groups
self.optimizer.param_groups = [original_param_groups[group_no]]
# Disabling this as the C++ side copy & synchornize is not working correctly
#from deepspeed.ops.adam import DeepSpeedCPUAdam
#if type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half:
# self.optimizer.step(fp16_param_groups=[self.get_bit16_param_group(group_no)])
#else:
# self.optimizer.step()
self.optimizer.step()
self.optimizer.param_groups = original_param_groups
def step(self, closure=None):
"""
Not supporting closure.
"""
self.micro_step_id = -1
see_memory_usage(f"In step before checking overflow")
# First compute norm for all group so we know if there is overflow
self.check_overflow()
OPTIMIZER_ALLGATHER = 'optimizer_allgather'
OPTIMIZER_GRADIENTS = 'optimizer_gradients'
OPTIMIZER_STEP = 'optimizer_step'
timer_names = [OPTIMIZER_ALLGATHER, OPTIMIZER_GRADIENTS, OPTIMIZER_STEP]
prev_scale = self.loss_scale
self._update_scale(self.overflow)
if self.overflow:
see_memory_usage('After overflow before clearing gradients')
self.zero_grad(set_to_none=True)
if self.cpu_offload:
self.reset_cpu_buffers()
else:
self.averaged_gradients = {}
see_memory_usage('After overflow after clearing gradients')
self.start_timers(timer_names)
self.stop_timers(timer_names)
return
# Step 1:- Calculate gradient norm using fp-16 grads
if self.dtype == torch.float16:
see_memory_usage('Before norm calculation')
scaled_global_grad_norm = self.scaled_global_norm()
self._global_grad_norm = scaled_global_grad_norm / prev_scale
see_memory_usage('After norm before optimizer')
# Step 2:- run optimizer and upscaling simultaneously
for i, group in enumerate(self.bit16_groups):
self.start_timers([OPTIMIZER_GRADIENTS])
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
if self.cpu_offload:
single_grad_partition = self.single_partition_of_fp32_groups[i].grad
if self.dtype == torch.float16:
self.unscale_and_clip_grads([single_grad_partition], scaled_global_grad_norm)
self.stop_timers([OPTIMIZER_GRADIENTS])
self.start_timers([OPTIMIZER_STEP])
self._optimizer_step(i)
# Disabled, this is not currently working
#from deepspeed.ops.adam import DeepSpeedCPUAdam
#if not (type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half):
# bit16_partitions = self.parallel_partitioned_bit16_groups[i]
# fp32_partition = self.single_partition_of_fp32_groups[i]
# bit16_partitions[partition_id].data.copy_(fp32_partition.data)
bit16_partitions = self.parallel_partitioned_bit16_groups[i]
fp32_partition = self.single_partition_of_fp32_groups[i]
bit16_partitions[partition_id].data.copy_(fp32_partition.data)
self.stop_timers([OPTIMIZER_STEP])
else:
# free gradients for all the parameters that are not updated by this process(ZeRO stage2)
self.free_grad_in_param_list(self.params_not_in_partition[i])
# create a flat gradients for parameters updated by this process
# If we are last partition, ensure we have same size grads and partition size, if not pad with zero tensors
if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1:
single_grad_partition = self.flatten_dense_tensors_aligned(
self.averaged_gradients[i],
int(self.partition_size[i])).to(self.single_partition_of_fp32_groups[i].dtype)
else:
single_grad_partition = self.flatten(self.averaged_gradients[i]).to(
self.single_partition_of_fp32_groups[i].dtype)
assert single_grad_partition.numel() == self.partition_size[i], \
"averaged gradients have different number of elements that partition size {} {} {} {}".format(
single_grad_partition.numel(), self.partition_size[i], i, partition_id)
self.single_partition_of_fp32_groups[i].grad = single_grad_partition
# release all the gradient since we have already created a necessary copy in dp_grad_partition(ZeRO stage2)
self.free_grad_in_param_list(self.params_in_partition[i])
self.averaged_gradients[i] = None
if self.dtype == torch.float16:
self.unscale_and_clip_grads([single_grad_partition], scaled_global_grad_norm)
self.stop_timers([OPTIMIZER_GRADIENTS])
# Step 3:- run the optimizer if no offloading
self.start_timers([OPTIMIZER_STEP])
self._optimizer_step(i)
# Step 4:- get rid of the fp32 gradients. Not needed anymore
self.single_partition_of_fp32_groups[i].grad = None
del single_grad_partition
bit16_partitions = self.parallel_partitioned_bit16_groups[i]
fp32_partition = self.single_partition_of_fp32_groups[i]
bit16_partitions[partition_id].data.copy_(fp32_partition.data)
self.stop_timers([OPTIMIZER_STEP])
see_memory_usage('After optimizer before all-gather')
if self.cpu_offload:
self.reset_cpu_buffers()
self.start_timers([OPTIMIZER_ALLGATHER])
# Gather the updated weights from everyone.
# Then all partitions of the model parameters are updated and ready for next round forward.
all_gather_dp_groups(partitioned_param_groups=self.parallel_partitioned_bit16_groups,
dp_process_group=self.real_dp_process_group,
start_alignment_factor=self.nccl_start_alignment_factor,
allgather_bucket_size=self.allgather_bucket_size)
self.stop_timers([OPTIMIZER_ALLGATHER])
# TODO: we probably don't need this? just to be safe
for i in range(len(self.bit16_groups)):
self._update_model_bit16_weights(i)
self.log_timers(timer_names)
see_memory_usage('After zero_optimizer step')
return
@torch.no_grad()
def update_lp_params(self):
for i, (bit16_partitions, fp32_partition) in enumerate(
zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups)):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
bit16_partitions[partition_id].data.copy_(fp32_partition.data)
# print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True)
# if i == 0:
# print_rank_0(f'{fp32_partition[:10]=}', force=True)
all_gather_dp_groups(partitioned_param_groups=self.parallel_partitioned_bit16_groups,
dp_process_group=self.real_dp_process_group,
start_alignment_factor=self.nccl_start_alignment_factor,
allgather_bucket_size=self.allgather_bucket_size)
def _average_expert_grad_norms(self, norm_groups):
for i, norm in enumerate(norm_groups):
if self.is_moe_param_group[i]:
scaled_norm = norm * 1.0 / float(dist.get_world_size(group=self.real_dp_process_group[i]))
scaled_norm_tensor = torch.tensor(scaled_norm,
device=get_accelerator().device_name(),
dtype=torch.float)
dist.all_reduce(scaled_norm_tensor, group=self.real_dp_process_group[i])
norm_groups[i] = scaled_norm_tensor.item()
def unscale_and_clip_grads(self, grad_groups_flat, total_norm):
# compute combined scale factor for this group
combined_scale = self.loss_scale
if self.clip_grad > 0.:
# norm is in fact norm*scale
clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad
if clip > 1:
combined_scale = clip * self.loss_scale
for grad in grad_groups_flat:
if isinstance(grad, list):
sub_partitions = grad
for g in sub_partitions:
g.data.mul_(1. / combined_scale)
else:
grad.data.mul_(1. / combined_scale)
def _check_overflow(self, partition_gradients=True):
self.overflow = self.has_overflow(partition_gradients)
# `params` is a list / generator of torch.Variable
def has_overflow_serial(self, params, is_grad_list=False):
for p in params:
if p.grad is not None and self._has_inf_or_nan(p.grad.data):
return True
return False
def has_overflow_partitioned_grads_serial(self):
for i in range(len(self.bit16_groups)):
for j, grad in enumerate(self.averaged_gradients[i]):
if grad is not None and self._has_inf_or_nan(grad.data, j):
return True
return False
def has_overflow(self, partition_gradients=True):
if partition_gradients:
overflow = self.local_overflow if self.cpu_offload else self.has_overflow_partitioned_grads_serial()
overflow_gpu = get_accelerator().ByteTensor([overflow])
'''This will capture overflow across all data parallel and expert parallel process
Since expert parallel process are a subset of data parallel process'''
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.dp_process_group)
else:
params = []
for group in self.bit16_groups:
for param in group:
params.append(param)
overflow = self.has_overflow_serial(params, is_grad_list=partition_gradients)
overflow_gpu = get_accelerator().ByteTensor([overflow])
# Since each model parallel GPU carries only part of the model,
# make sure overflow flag is synced across all the model parallel GPUs
self._model_parallel_all_reduce(tensor=overflow_gpu, op=dist.ReduceOp.MAX)
overflow = overflow_gpu[0].item()
# return bool(overflow)
# hack for not overflow and force do optimization
return False
# `x` is a torch.Tensor
@staticmethod
def _has_inf_or_nan(x, j=None):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
def backward(self, loss, retain_graph=False):
"""
:attr:`backward` performs the following steps:
1. fp32_loss = loss.float()
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
"""
self.micro_step_id += 1
if self.contiguous_gradients:
self.ipg_buffer = []
buf_0 = torch.empty(int(self.reduce_bucket_size),
dtype=self.dtype,
device=get_accelerator().current_device_name())
self.ipg_buffer.append(buf_0)
# Use double buffers to avoid data access conflict when overlap_comm is enabled.
if self.overlap_comm:
buf_1 = torch.empty(int(self.reduce_bucket_size),
dtype=self.dtype,
device=get_accelerator().current_device_name())
self.ipg_buffer.append(buf_1)
self.ipg_index = 0
if self.custom_loss_scaler:
scaled_loss = self.external_loss_scale * loss
scaled_loss.backward()
else:
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
def check_overflow(self, partition_gradients=True):
self._check_overflow(partition_gradients)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
if self.custom_loss_scaler:
return self.external_loss_scale
else:
return self.loss_scaler.cur_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
cur_scale = property(_get_loss_scale, _set_loss_scale)
# Return group tensor after removing paddings that are added for alignment to DP world size.
# This method works on the assumption that each group contains a single flattened tensor.
def _get_groups_without_padding(self, groups_with_padding):
groups_without_padding = []
for i, group in enumerate(groups_with_padding):
lean_length = group.numel() - self.groups_padding[i]
groups_without_padding.append(group[:lean_length])
return groups_without_padding
# Return optimizer state after removing paddings that are added for alignment.
def _get_state_without_padding(self, state_with_padding, padding):
lean_state = {}
for key, value in state_with_padding.items():
if torch.is_tensor(value):
lean_length = value.numel() - padding
lean_state[key] = value[:lean_length]
else:
lean_state[key] = value
return lean_state
# Return base optimizer states.
# This method assumes that each param group contains a single flattened tensor.
def _get_base_optimizer_state(self):
optimizer_groups_state = []
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
lean_optimizer_state = self._get_state_without_padding(self.optimizer.state[p], self.groups_padding[i])
optimizer_groups_state.append(lean_optimizer_state)
return optimizer_groups_state
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict[CLIP_GRAD] = self.clip_grad
if self.elastic_checkpoint:
state_dict[BASE_OPTIMIZER_STATE] = self._get_base_optimizer_state()
else:
state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict()
# Remove paddings for DP alignment to enable loading for other alignment values
fp32_groups_without_padding = self._get_groups_without_padding(self.single_partition_of_fp32_groups)
state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = fp32_groups_without_padding
state_dict[
ZERO_STAGE] = ZeroStageEnum.gradients if self.partition_gradients else ZeroStageEnum.optimizer_states
state_dict[GROUP_PADDINGS] = self.groups_padding
state_dict[PARTITION_COUNT] = self.partition_count
state_dict[DS_VERSION] = version
state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings
return state_dict
# Restore base optimizer fp32 weights from elastic checkpoint by:
# 1) Merging fp32 weights from checkpoints of all partitions
# 2) Extracting fp32 weights for current partition from merged weights
# 3) Using extracted weights to update base optimizer weights directly.
def _restore_from_elastic_fp32_weights(self, all_state_dict):
merged_single_partition_of_fp32_groups = []
for i in range(len(self.single_partition_of_fp32_groups)):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
merged_partitions = [sd[SINGLE_PARTITION_OF_FP32_GROUPS][i] for sd in all_state_dict]
if self.is_moe_group(self.optimizer.param_groups[i]):
ranks = self.get_ep_ranks(group_name=self.optimizer.param_groups[i]['name'])
merged_partitions = [merged_partitions[i] for i in ranks]
flat_merged_partitions = self.flatten_dense_tensors_aligned(
merged_partitions,
self.nccl_start_alignment_factor * dist.get_world_size(group=self.real_dp_process_group[i]))
dp_partitions = self.get_data_parallel_partitions(flat_merged_partitions, i)
merged_single_partition_of_fp32_groups.append(dp_partitions[partition_id])
for current, saved in zip(self.single_partition_of_fp32_groups, merged_single_partition_of_fp32_groups):
current.data.copy_(saved.data)
# Restore base optimizer fp32 weights from ZeRO fp16 or bfloat16 weights
def _restore_from_bit16_weights(self):
for group_id, (bit16_partitions, fp32_partition) in enumerate(
zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups)):
partition_id = dist.get_rank(group=self.real_dp_process_group[group_id])
fp32_partition.data.copy_(bit16_partitions[partition_id].data)
# Refresh the fp32 master params from the fp16 or bfloat16 copies.
def refresh_fp32_params(self):
self._restore_from_bit16_weights()
# Extract optimizer state for current partition from merged states of all partitions
def _partition_base_optimizer_state(self, state_key, all_partition_states, group_id):
partition_id = dist.get_rank(group=self.real_dp_process_group[group_id])
alignment = dist.get_world_size(group=self.real_dp_process_group[group_id])
if torch.is_tensor(all_partition_states[0]):
flat_merged_partitions = self.flatten_dense_tensors_aligned(all_partition_states, alignment)
dp_partitions = self.get_data_parallel_partitions(flat_merged_partitions, group_id)
return dp_partitions[partition_id]
else:
# Assume non-tensor states are not partitioned and equal across ranks, so return first one
return all_partition_states[0]
def _restore_base_optimizer_state(self, base_optimizer_group_states):
if type(base_optimizer_group_states) == dict:
base_optimizer_group_states = base_optimizer_group_states['state']
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
for key, saved in base_optimizer_group_states[i].items():
if torch.is_tensor(self.optimizer.state[p][key]):
dst_tensor = self.optimizer.state[p][key]
src_tensor = _get_padded_tensor(saved, dst_tensor.numel())
self.optimizer.state[p][key].data.copy_(src_tensor.data)
else:
self.optimizer.state[p][key] = saved
def get_ep_ranks(self, rank=0, group_name=None):
from deepspeed.utils import groups
expert_parallel_size_ = groups._get_expert_parallel_world_size(group_name)
world_size = groups._get_data_parallel_world_size()
rank = groups._get_expert_parallel_rank(group_name)
ranks = range(rank, world_size, expert_parallel_size_)
return list(ranks)
# Restore base optimizer state from elastic checkpoint by
# 1) Merging optimizer state from checkpoints of all partitions
# 2) Extracting optimizer state for current partition from the merged state
# 3) Using the extracted value to directly update the base optimizer.
def _restore_elastic_base_optimizer_state(self, all_state_dict):
base_optimizer_group_states = []
for i in range(len(self.optimizer.param_groups)):
partition_states = {}
all_partition_group_states = [sd[BASE_OPTIMIZER_STATE][i] for sd in all_state_dict]
if self.is_moe_group(self.optimizer.param_groups[i]):
ranks = self.get_ep_ranks(group_name=self.optimizer.param_groups[i]['name'])
all_partition_group_states = [all_partition_group_states[i] for i in ranks]
for key in all_partition_group_states[0].keys():
all_partition_states = [all_states[key] for all_states in all_partition_group_states]
partition_states[key] = self._partition_base_optimizer_state(key, all_partition_states, i)
base_optimizer_group_states.append(partition_states)
self._restore_base_optimizer_state(base_optimizer_group_states)
def load_state_dict(self,
state_dict_list,
load_optimizer_states=True,
load_from_fp32_weights=False,
checkpoint_folder=None):
if checkpoint_folder:
self._load_universal_checkpoint(checkpoint_folder, load_optimizer_states, load_from_fp32_weights)
else:
self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights)
def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights):
self._load_hp_checkpoint_state(checkpoint_folder)
@property
def param_groups(self):
"""Forward the wrapped optimizer's parameters."""
return self.optimizer.param_groups
def _load_hp_checkpoint_state(self, checkpoint_dir):
checkpoint_dir = os.path.join(checkpoint_dir, "zero")
tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu)
tp_world_size = self.mpu.get_slice_parallel_world_size()
for i, _ in enumerate(self.optimizer.param_groups):
for lp in self.bit16_groups[i]:
if lp._hp_mapping is not None:
#print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}")
lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank,
tp_world_size)
def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False):
r"""Loading ZeRO checkpoint
Arguments:
state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition.
Note that the number of saved partitions may differ from number of loading partitions to support
changing GPU count, specifically DP world size, between saving and loading checkpoints.
load_optimizer_states: Boolean indicating whether or not to load base optimizer states
load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32
copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss).
"""
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
dp_rank = dist.get_rank(group=self.dp_process_group)
current_rank_sd = state_dict_list[dp_rank]
self.loss_scaler = current_rank_sd.get('loss_scaler', self.loss_scaler)
self.dynamic_loss_scale = current_rank_sd.get('dynamic_loss_scale', self.dynamic_loss_scale)
self.overflow = current_rank_sd.get('overflow', self.overflow)
self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad)
ckpt_version = current_rank_sd.get(DS_VERSION, False)
assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed"
ckpt_version = pkg_version.parse(ckpt_version)
# zero stage 1 mode
if not self.partition_gradients:
required_version = pkg_version.parse("0.3.17")
error_str = f"ZeRO stage 1 changed in {required_version} and is not backwards compatible " \
"with older stage 1 checkpoints. If you'd like to load an old ZeRO-1 checkpoint " \
"please use an older version of DeepSpeed (<= 0.5.8) and set 'legacy_stage1': true in your zero config json."
assert required_version <= ckpt_version, f"Old version: {ckpt_version} {error_str}"
ckpt_is_rigid = isinstance(current_rank_sd[BASE_OPTIMIZER_STATE], dict)
# padding is always at the last rank/partition
# if DP=1024 and param-group elems=16 -> padding will be 1024-16 across all but one rank
# scenario-1 (shrink): saving w. 4 gpus -> loading w. 2 gpus
# scenario-2 (expand): saving w. 2 gpus -> loading w. 4 gpus
# if load_optimizer_states:
# if new_dp_size:
# self.strip_padding()
# self.add_padding_w_new_dp_size()
# self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE])
if load_optimizer_states:
if ckpt_is_rigid:
# loading rigid ckpt into either rigid or elastic exec
self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE])
else:
if self.elastic_checkpoint:
# loading elastic into elastic exec
self._restore_elastic_base_optimizer_state(state_dict_list)
else:
# loading an elastic checkpoint into rigid exec
self._restore_base_optimizer_state(current_rank_sd[BASE_OPTIMIZER_STATE])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 1 if changing DP degree and option 2 otherwise.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
if load_from_fp32_weights:
# option 2 from above
if self.elastic_checkpoint and not ckpt_is_rigid:
self._restore_from_elastic_fp32_weights(state_dict_list)
else:
# For non-elastic checkpoint, simply copying from saved weights of current rank is sufficient.
for current, saved in zip(self.single_partition_of_fp32_groups,
current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]):
src_tensor = _get_padded_tensor(saved, current.numel())
current.data.copy_(src_tensor.data)
else:
# option 1 from above
self._restore_from_bit16_weights()
if load_optimizer_states:
self._link_all_hp_params()
def _handle_overflow(cpu_sum, x, i):
import math
rank = dist.get_rank()
if rank == 0:
t_i = -1
for v_i, v in enumerate(x.data.contiguous().view(-1)):
if not math.isfinite(float(v)):
t_i = v_i
break
logger.info(f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}")
def estimate_zero2_model_states_mem_needs(total_params,
num_gpus_per_node=1,
num_nodes=1,
cpu_offload=True,
additional_buffer_factor=1.5):
total_gpus = num_nodes * num_gpus_per_node
if cpu_offload:
gpu_mem = 2 * total_params
cpu_mem = total_params * max(4 * total_gpus, 16) * additional_buffer_factor
else:
gpu_mem = 4 * total_params + int(16 * total_params / total_gpus)
cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor
return int(cpu_mem), int(gpu_mem)
def model_to_params(model):
# shared params calculated only once
total_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values())
return total_params
def estimate_zero2_model_states_mem_needs_all_live(model,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients
for a given ``model`` and hardware setup.
If you have an actual model object, use this function and everything will be derived
automatically.
If it's a hypothetical model, use ``estimate_zero2_model_states_mem_needs_all_cold`` where you have to pass
the ``total_params`` explicitly.
Args:
- ``model``: ``nn.Module`` object
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
total_params = model_to_params(model)
estimate_zero2_model_states_mem_needs_all_cold(total_params=total_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
additional_buffer_factor=additional_buffer_factor)
def estimate_zero2_model_states_mem_needs_all_cold(total_params,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients
for a given ``model`` and hardware setup.
If it's a hypothetical model, use this function where you have to pass
the ``total_params`` and ``largest_layer_params`` explicitly.
If you have an actual model object, use ``estimate_zero2_model_states_mem_needs_all_live`` and everything
will be derived automatically.
Args:
- ``total_params``: total model params
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
def format_options(cpu_offload):
enabled = []
device = f'{OffloadDeviceEnum.cpu:4}' if cpu_offload else "none"
enabled.append(f"offload_optimizer={device}")
return ", ".join(enabled)
nodes_str = "nodes" if num_nodes > 1 else "node"
gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU"
print("Estimated memory needed for params, optim states and gradients for a:\n"
f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n"
f"SW: Model with {int(total_params/1e6)}M total params.")
print(" per CPU | per GPU | Options")
for cpu_offload in [True, False]:
cpu_mem, gpu_mem = estimate_zero2_model_states_mem_needs(total_params=total_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
cpu_offload=cpu_offload,
additional_buffer_factor=additional_buffer_factor)
options_str = format_options(cpu_offload=cpu_offload)
print(f" {cpu_mem/2**30:7.2f}GB | {gpu_mem/2**30:6.2f}GB | {options_str}") | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/stage_1_and_2.py | stage_1_and_2.py |
# DeepSpeed Team
from dataclasses import dataclass
import collections
from collections import UserDict
from typing import Deque, Set
from deepspeed import comm as dist
from deepspeed.utils.logging import logger
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.runtime.zero.partition_parameters import *
from deepspeed.runtime.swap_tensor.partitioned_param_swapper import PartitionedParamStatus
from deepspeed.utils.debug import debug_module2name_id, debug_param2name_id
from deepspeed.accelerator import get_accelerator
def debug_rank0(message: str) -> None:
if dist.get_rank() == 0:
logger.debug(message)
@instrument_w_nvtx
def get_all_parameters(sub_module, recurse=False):
return itertools.chain(sub_module.named_parameters(recurse=recurse), sub_module.ds_external_parameters())
def iter_params(module: Module, recurse=False) -> Iterable[Parameter]:
return map(lambda pair: pair[1], get_all_parameters(module, recurse))
class ZeRoTraceMode(Enum):
# Record trace of the network during a single forward+backward (for training) or forward (for inference)
RECORD = 1
# Use recorded network trace to optimize current forward+backward or forward
COMPLETE = 2
# Recorded trace does not match current forward+backward or forward pass.
INVALID = 3
class PartitionedParameterCoordinator:
"""Handles partitioning and gathering of parameters."""
class __InflightParamRegistry(UserDict):
"""registry for parameters in flight"""
def __setitem__(self, param: Parameter, handle: AllGatherCoalescedHandle) -> None:
if param in self.data:
raise RuntimeError(f"{param.ds_summary()} already in registry")
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"attempted to add non-inflight parameter to registry {param.ds_summary()}")
self.data[param] = handle
@dataclass
class __ParamInTrace:
param: Parameter
step_id_last_used_at: int
def __init__(
self,
prefetch_bucket_sz: int,
max_reuse_distance_in_numel: int,
max_available_parameters_in_numel: int,
allgather_stream: get_accelerator().Stream,
prefetch_nvme: bool = False,
) -> None:
# mapping of param -> handle for each param that is currently in flight
self.__inflight_param_registry = __class__.__InflightParamRegistry()
# keeps track of the number of submodules invoked so far.
self.__step_id: int = 0
# network tracing mode
self.__trace_mode: ZeRoTraceMode = ZeRoTraceMode.RECORD
# sequence of submodules/parameters in forward pass + backward pass
self.__submodule_order: Iterable[Module] = []
self.__param_order: Iterable[__class__.__ParamInTrace] = []
self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10))
self.__step_id_module_fetched_for = collections.defaultdict(lambda: collections.deque())
# number of available params, and max number of available params
self.__n_available_params: int = 0
self.__max_n_available_params: int = max_available_parameters_in_numel
# max distance between two use of the module beyond which module is released
self.__max_reuse_dist_in_numel: int = max_reuse_distance_in_numel
# queue for parameters to fetch. parameters will be popped off the left
# side of the dequeue as they are fetched
self.__param_queue: Deque[__class__.__ParamInTrace] = None
self.__prefetch_bucket_sz: int = prefetch_bucket_sz
self.__prefetch_nvme: bool = prefetch_nvme
self.hierarchy: int = 0
# stream that will be used for allgather operations
self.__allgather_stream: get_accelerator().Stream = allgather_stream
# limit the number of fetch events that can be queued at once
# otherwise, what happens is memory is allocated by the host thread at the
# time of the call, but not used until later by the asynchronous cuda stream.
# allowing an infinite number of these to queue up causes a lot of memory
# pressure that then becomes detrimental to performance.
# this is a much less elegant way of fixing this vs something like using
# cudaMallocAsync/cudaFreeAsync. Choosing to not expose this to the user now
# because ideally in the future its replaced by an async allocation
# mechanism which doesn't require any configuration by the user.
self.__ongoing_fetch_events: Deque[get_accelerator().Event] = collections.deque()
# TODO. make this configurable via JSON
self.__max_ongoing_fetch_events: int = 2
"""Tracing and Tracking
TODO. consider performing trace before initializing PartitionedParameterCoordinator
and passing trace results into constructor. This way all the code in here can
just assume that the trace is complete and the results can be entirely
immutable.
Bookkeeping operations used to track where we are in the forward/backward pass
"""
def _clear_trace_structures(self) -> None:
self.__submodule_order = []
self.__param_order = []
self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10))
self.__param_queue = None
def is_complete_trace(self) -> bool:
return self.__trace_mode == ZeRoTraceMode.COMPLETE
def is_invalid_trace(self) -> bool:
return self.__trace_mode == ZeRoTraceMode.INVALID
def is_record_trace(self) -> bool:
return self.__trace_mode == ZeRoTraceMode.RECORD
def _invalidate_trace(self) -> None:
if self.is_invalid_trace():
raise RuntimeError("attempted to invalidate already invalid trace")
self.__trace_mode = ZeRoTraceMode.INVALID
self._clear_trace_structures()
def trace_prologue(self, sub_module: Module) -> None:
if self.is_complete_trace():
# sub_module must match expectation else invalidate trace cache
if len(self.__submodule_order) <= self.__step_id:
print_rank_0(
f"Invalidate trace cache @ step {self.__step_id} and module {sub_module.id}: "
f"cache has only {len(self.__submodule_order)} modules",
force=True)
self._invalidate_trace()
return
if sub_module != self.__submodule_order[self.__step_id]:
expected_module_id = self.__submodule_order[self.__step_id].id
print_rank_0(
f"Invalidate trace cache @ step {self.__step_id}: "
f"expected module {expected_module_id}, but got module {sub_module.id}",
force=True)
self._invalidate_trace()
def record_module(self, sub_module: Module) -> None:
"""adds sub module to trace"""
if not self.is_record_trace():
raise RuntimeError(f"attempted to record trace when status = {self.__trace_mode}")
self.__submodule_order.append(sub_module)
self.__step_id_module_fetched_for[sub_module.id].append(self.__step_id)
def record_parameters(self, sub_module: Module) -> None:
"""adds sub module to trace"""
if not self.is_record_trace():
raise RuntimeError(f"attempted to record trace when status = {self.__trace_mode}")
step_id = self.__step_id_module_fetched_for[sub_module.id].popleft()
for param in sorted(set(iter_params(sub_module)), key=lambda p: p.ds_id):
self.__param_order.append(__class__.__ParamInTrace(param=param, step_id_last_used_at=step_id))
def construct_parameter_trace_from_module_trace(self):
"""use module trace to construct parameter trace"""
self.__param_order = []
for sub_module in self.__submodule_order:
self.record_parameters(sub_module)
def reset_step(self) -> None:
"""indicate that we have completed one fwd+bwd for the model"""
if self.__inflight_param_registry:
raise RuntimeError(f"still have inflight params "
f"{[p.ds_summary for p in self.__inflight_param_registry.keys()]}")
if not self.is_complete_trace(): # not self.trace_complete:
# Make sure that recorded submodule orders are identical across ranks
assert_ints_same_as_other_ranks([m.id for m in self.__submodule_order])
if self.is_record_trace():
# Successfully recorded a trace
self.construct_parameter_trace_from_module_trace()
# Make sure that recorded parameter orders are identical across ranks
assert_ints_same_as_other_ranks([p.param.ds_id for p in self.__param_order])
assert_ints_same_as_other_ranks([p.step_id_last_used_at for p in self.__param_order])
self.__submodule_order = tuple(self.__submodule_order) # freeze
self.__param_order = tuple(self.__param_order) # freeze
self.__trace_mode = ZeRoTraceMode.COMPLETE
print_rank_0(
f"completed record trace of {len(self.__submodule_order)} sub modules: {[m.id for m in self.__submodule_order]}",
force=False)
else:
# Enable trace recording for next forward/backward pass
self.__trace_mode = ZeRoTraceMode.RECORD
self.__param_queue = collections.deque(self.__param_order) # reset fetch queue
self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10))
self.__step_id_module_fetched_for = collections.defaultdict(lambda: collections.deque())
self.__step_id = 0
self.__n_available_params = 0
def _dump_params(self, tag, sub_module, params, step_id=None):
if step_id is None:
step_id = self.__step_id
param_names = [debug_param2name_id(p) for p in params]
print(f'{tag} step = {step_id} mod = {debug_module2name_id(sub_module)} p_names = {param_names}')
def _dump_param_ids(self, tag, mod_id, p_ids, step_id=None):
if step_id is None:
step_id = self.__step_id
print(f'{tag} mod = {mod_id}, step = {step_id}, p_ids = {p_ids}')
"""Fetch and Release
Fetching, prefetching, and releasing parameters
"""
@instrument_w_nvtx
@torch.no_grad()
def fetch_sub_module(self, current_submodule: Module) -> None:
"""This method does the following (in order):
1. kick off fetch for parameters in immediately required sub module
2. kick off fetch for next few parameters we will need later (prefetch)
3. block on parameters in immediately required sub module
"""
debug_rank0(
f"{self.__step_id}: M{current_submodule.id}({type(current_submodule).__name__}) P{[p.ds_id for p in iter_params(current_submodule)]} "
+ str({
"avail": f"{self.__n_available_params:.1e}",
"queue_sz": f"{len(self.__param_queue or [])}",
"inflight": [p.ds_id for p in self.__inflight_param_registry],
}))
params_to_fetch = frozenset(iter_params(current_submodule))
# kick off all gather for params in the immediately required submodule
for param in params_to_fetch:
debug_rank0(f"-fetch: {param.ds_summary()}")
self.__all_gather_params(params_to_fetch)
# wait for parameters in the immediately needed submodule to become available
for param in params_to_fetch:
param.ds_active_sub_modules.add(current_submodule.id)
debug_rank0(f"-wait: {param.ds_summary()}")
if param in self.__inflight_param_registry:
with get_accelerator().stream(self.__allgather_stream):
while self.__ongoing_fetch_events and self.__ongoing_fetch_events[0].query():
self.__ongoing_fetch_events.popleft()
if len(self.__ongoing_fetch_events) > self.__max_ongoing_fetch_events:
self.__ongoing_fetch_events.popleft().synchronize()
self.__inflight_param_registry.pop(param).wait()
event = get_accelerator().Event()
event.record()
self.__ongoing_fetch_events.append(event)
assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary()
get_accelerator().current_stream().wait_stream(self.__allgather_stream)
# kick off parameter prefetches for upcoming modules
# don't prefetch if we dont have a completed model trace
if self.is_complete_trace():
# go through the parameters we need for the current module and pop them
# off the fetch queue so that they aren't prefetched later.
# if params have already been popped off the fetch queue by earlier
# prefetches we won't look for them here
discarded_from_prefetch_queue = set()
params_not_already_fetched = set(
filter(lambda p: self.__most_recent_step_id_param_fetched_for[p] < self.__step_id, params_to_fetch))
while self.__param_queue and len(discarded_from_prefetch_queue) < len(params_not_already_fetched):
param_in_trace = self.__param_queue.popleft()
self.__most_recent_step_id_param_fetched_for[
param_in_trace.param] = param_in_trace.step_id_last_used_at
discarded_from_prefetch_queue.add(param_in_trace.param)
if discarded_from_prefetch_queue != params_not_already_fetched:
raise RuntimeError(
f"tracing error at step {self.__step_id}: \n"
f"module id: {current_submodule.id}, training: {current_submodule.training}\n"
f"expected the next {len(params_not_already_fetched)} parameters in the "
f"parameter fetch queue to be {tuple(p.ds_summary(use_debug_name=True) for p in params_not_already_fetched)} \n"
f"but got \n {tuple(p.ds_summary(use_debug_name=True) for p in discarded_from_prefetch_queue)}.")
def _is_currently_on_nvme(param):
if param.nvme_swapper is None:
return False
return param.ds_tensor.final_location == OffloadDeviceEnum.nvme \
and param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE
# kick off all gather for params in the next few submodules (prefetch)
if self.__prefetch_bucket_sz > 0:
max_params_to_prefetch = min(self.__max_n_available_params - self.__n_available_params,
self.__prefetch_bucket_sz)
params_to_prefetch = set()
numel_prefetching = 0
while self.__param_queue and numel_prefetching < max_params_to_prefetch:
param_in_trace: __class__.__ParamInTrace = self.__param_queue.popleft()
if _is_currently_on_nvme(param_in_trace.param):
# nvme prefetch is handled elsewhere. Need to break here to preserve fetch order
self.__param_queue.appendleft(param_in_trace)
break
do_prefetch = param_in_trace.param.ds_status == ZeroParamStatus.NOT_AVAILABLE
if param_in_trace.param in params_to_prefetch:
# Avoid duplicates
do_prefetch = False
self.__most_recent_step_id_param_fetched_for[param_in_trace.param] = \
max(self.__most_recent_step_id_param_fetched_for[param_in_trace.param],
param_in_trace.step_id_last_used_at)
if do_prefetch:
params_to_prefetch.add(param_in_trace.param)
numel_prefetching += param_in_trace.param.ds_numel
for param in params_to_prefetch:
debug_rank0(f"-prefetch: {param.ds_summary()}")
self.__all_gather_params(params_to_prefetch)
if self.__prefetch_nvme:
self.__prefetch_nvme_param_partitions()
self.__step_id += 1
@instrument_w_nvtx
@torch.no_grad()
def release_sub_module(self, submodule: Module) -> None:
"""release the parameters of a sub module, assuming they meet conditions to
be released."""
params_to_release = (self.__params_to_release(submodule, self.__step_id) if self.is_complete_trace() else set(
p.ds_id for p in iter_params(submodule)))
for param in iter_params(submodule):
param.ds_active_sub_modules.discard(submodule.id)
if param.ds_id in params_to_release and not param.is_external_param:
self.__release_param(param)
@instrument_w_nvtx
@torch.no_grad()
def release_and_reset_all(self, module: Module) -> None:
"""release all module parameters"""
for param in iter_params(module, recurse=True):
if param in self.__inflight_param_registry:
raise RuntimeError(f"param {param.ds_summary()} still in flight")
# TODO. make this throw if if there are still active submodules. currently
# there's a hook execution issue
param.ds_active_sub_modules.clear()
self.__release_param(param)
for param in iter_params(module, recurse=True):
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(f"{param.ds_summary()} expected to be released")
@instrument_w_nvtx
def __all_gather_params(self, params: Set[Parameter]) -> None:
"""for each partitioned parameter, kick off an async allgather and store
the work handle for the in flight parameters."""
partitioned_params = []
for param in params:
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
partitioned_params.append(param)
self.__n_available_params += param.ds_numel
if partitioned_params:
with get_accelerator().stream(self.__allgather_stream):
handle = partitioned_params[0].all_gather_coalesced(partitioned_params)
for param in partitioned_params:
assert param.ds_status == ZeroParamStatus.INFLIGHT, param.ds_summary()
self.__inflight_param_registry[param] = handle
# Release swap buffers for persisted params on nvme since they will never be partitioned or evicted from GPU
swap_persisted_params = [
p for p in partitioned_params if p.ds_persist and p.ds_tensor.final_location == OffloadDeviceEnum.nvme
]
if swap_persisted_params:
swap_persisted_params[0].nvme_swapper.remove_partition_and_release_buffers(swap_persisted_params)
@instrument_w_nvtx
def __release_param(self, param: Parameter) -> None:
if param.ds_status == ZeroParamStatus.AVAILABLE and not param.ds_active_sub_modules:
debug_rank0(f"-release: {param.ds_summary()}")
param.partition()
self.__n_available_params -= param.ds_numel
@instrument_w_nvtx
@functools.lru_cache(maxsize=None)
def __params_to_release(self, submodule_to_release: Module, step_id: int) -> Set[int]:
if not self.is_complete_trace():
raise RuntimeError("expected trace to be complete")
params_to_release = set(p.ds_id for p in iter_params(submodule_to_release) if not p.ds_persist)
# Problem: When prefetcher scans the param trace, it skips AVAILABLE params.
# This creates issues if those params are released before the skipped uses:
# 1) It hurts performance as the skipped uses are never prefetched.
# 2) For nvme params, we run out of swap buffers because the prefetch order
# diverges from the trace.
# Solution: Don't release params whose reuse was skipped by prefetch. This is
# possible because we detect such skips during prefetch and mark those params.
for param in iter_params(submodule_to_release):
if self.__most_recent_step_id_param_fetched_for[param] > step_id:
params_to_release.discard(param.ds_id)
# examine all modules within `max_reuse_dist_in_numel` of the current step,
# if we see any of the candidate parameters to be released reoccur while
# doing this, remove them from the set of parameters to release.
params_traversed = 0
for module in self.__submodule_order[step_id:]:
if params_traversed >= self.__max_reuse_dist_in_numel:
break
for param in iter_params(module):
params_to_release.discard(param.ds_id)
params_traversed += param.ds_numel
return params_to_release
@instrument_w_nvtx
def __prefetch_nvme_param_partitions(self) -> None:
"""swap in parameter partitions from nvme for those parameters that will be used
after the ones that are already being prefetched into full parameters
"""
if not self.is_complete_trace():
return
numel_in_flight = sum(param.ds_numel for param in self.__inflight_param_registry)
numel_considered = 0
swap_in_params = []
for param_in_trace in self.__param_queue:
param = param_in_trace.param
if param.nvme_swapper is None:
continue
if (numel_considered > 2 * numel_in_flight
or len(swap_in_params) >= param.nvme_swapper.available_swap_in_buffers()):
break
if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE:
swap_in_params.append(param)
numel_considered += param.ds_numel
if swap_in_params:
swap_in_params[0].nvme_swapper.swap_in(swap_in_params, async_op=True) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/partitioned_param_coordinator.py | partitioned_param_coordinator.py |
# DeepSpeed Team
from pydantic import Field, validator
import sys
from typing import Optional
from enum import Enum
from deepspeed.runtime.config_utils import get_scalar_param, pp_int, DeepSpeedConfigModel
from deepspeed.utils import logger
from .offload_config import DeepSpeedZeroOffloadParamConfig, DeepSpeedZeroOffloadOptimizerConfig, OffloadDeviceEnum
# ZeRO optimization. By default, this optimization is not enabled.
# Users have to configure the desired optimization (0 means disabled) in params.json as below example:
ZERO_FORMAT = """
ZeRO optimization should be enabled as:
"session_params": {
"zero_optimization": {
"stage": [0|1|2],
"stage3_max_live_parameters" : 1000000000,
"stage3_max_reuse_distance" : 1000000000,
"allgather_partitions": [true|false],
"allgather_bucket_size": 500000000,
"reduce_scatter": [true|false],
"contiguous_gradients" : [true|false]
"overlap_comm": [true|false],
"reduce_bucket_size": 500000000,
"load_from_fp32_weights": [true|false],
"cpu_offload": [true|false] (deprecated),
"cpu_offload_params" : [true|false] (deprecated),
"cpu_offload_use_pin_memory": [true|false] (deprecated),
"sub_group_size" : 1000000000000,
"offload_param": {...},
"offload_optimizer": {...},
"ignore_unused_parameters": [true|false],
"round_robin_gradients": [true|false],
"memory_efficient_linear": [true|false]
}
}
"""
ZERO_OPTIMIZATION = "zero_optimization"
def read_zero_config_deprecated(param_dict):
zero_config_dict = {}
zero_config_dict["stage"] = 1 if param_dict[ZERO_OPTIMIZATION] else 0
if zero_config_dict["stage"] > 0:
zero_config_dict["allgather_bucket_size"] = get_scalar_param(param_dict, "allgather_size", 5e8)
logger.warning(
"DeepSpeedConfig: this format of ZeRO optimization setup is deprecated. Please use the following format: {}".
format(ZERO_FORMAT))
return zero_config_dict
def get_zero_config(param_dict):
if ZERO_OPTIMIZATION in param_dict:
zero_config_dict = param_dict[ZERO_OPTIMIZATION]
if isinstance(zero_config_dict, bool):
zero_config_dict = read_zero_config_deprecated(param_dict)
else:
zero_config_dict = {}
return DeepSpeedZeroConfig(**zero_config_dict)
class ZeroStageEnum(int, Enum):
""" Enum class for possible zero stages """
disabled = 0
optimizer_states = 1
gradients = 2
weights = 3
max_stage = 3
class DeepSpeedZeroConfig(DeepSpeedConfigModel):
"""
Sets parameters for ZeRO optimizations.
"""
stage: ZeroStageEnum = 0
"""
Chooses different stages of ZeRO Optimizer. Stage 0, 1, 2, and 3 refer
to disabled, optimizer state partitioning, and optimizer+gradient state
partitioning, and optimizer+gradient+parameter partitioning, respectively.
"""
contiguous_gradients: bool = True
"""
Copies the gradients to a contiguous buffer as they are produced. Avoids
memory fragmentation during backward pass.
"""
reduce_scatter: bool = True
"""
Uses reduce or reduce scatter instead of allreduce to average gradients
"""
reduce_bucket_size: int = Field(pp_int(5e8), ge=0)
"""
Number of elements reduced/allreduced at a time. Limits the memory required
for the allgather for large model sizes
"""
allgather_partitions: bool = True
"""
Chooses between allgather collective or a series of broadcast collectives
to gather updated parameters from all the GPUs at the end of each step
"""
allgather_bucket_size: int = Field(pp_int(5e8), ge=0)
"""
Number of elements allgathered at a time. Limits the memory required for
the allgather for large model sizes
"""
overlap_comm: bool = None # None for dynamic default value (see validator `overlap_comm_valid` below)
"""
Attempts to overlap the reduction of the gradients with backward computation
"""
load_from_fp32_weights: bool = True
"""
Boolean indicating whether to initialize fp32 master weights from fp32
copies in checkpoint (no precision loss) or from model's fp16 copies (with
precision loss). This can be used to initialize optimizer state even when
checkpoint is missing optimizer state.
"""
elastic_checkpoint: bool = False
"""
Enable loading checkpoint that was saved by job with different GPU count.
No longer supported.
"""
offload_param: Optional[DeepSpeedZeroOffloadParamConfig] = None
"""
Enable offloading of model parameters to CPU or NVMe. This frees up GPU
memory for larger models or batch sizes. Valid only with stage 3. Expects a
dictionary containing values for :any:`DeepSpeedZeroOffloadParamConfig`.
"""
offload_optimizer: Optional[DeepSpeedZeroOffloadOptimizerConfig] = None
"""
Enable offloading of optimizer state to CPU or NVMe, and optimizer
computation to CPU. This frees up GPU memory for larger models or batch
sizes. Valid for ZeRO stage 1, 2, 3. Expects a dictionary containing values
for :any:`DeepSpeedZeroOffloadOptimizerConfig`.
"""
sub_group_size: int = Field(pp_int(1e9), ge=0)
"""
Tile size for parameter processing to fit massive models (with trillions of
parameters). Used by ZeRO3-Offload and ZeRO-Infinity
"""
cpu_offload_param: bool = Field(
None,
deprecated=True,
new_param="offload_param",
new_param_fn=(lambda val: DeepSpeedZeroOffloadParamConfig(device=OffloadDeviceEnum.cpu) if val else None),
)
""" Deprecated, please use ``offload_param`` """
cpu_offload_use_pin_memory: bool = Field(
None,
deprecated=True,
new_param="offload_param or offload_optimizer",
set_new_param=False,
)
""" Deprecated, please use ``offload_param`` or ``offload_optimizer`` """
cpu_offload: bool = Field(
None,
deprecated=True,
new_param="offload_optimizer",
new_param_fn=(lambda val: DeepSpeedZeroOffloadOptimizerConfig(device=OffloadDeviceEnum.cpu) if val else None),
)
""" Deprecated, please use ``offload_optimizer`` """
prefetch_bucket_size: int = Field(pp_int(5e7), ge=0, alias="stage3_prefetch_bucket_size")
"""
Maximum number of parameter elements to fetch ahead of use. Used by ZeRO3,
ZeRO3-Offload, ZeRO-Infinity, and ZeRO-Inference.
"""
param_persistence_threshold: int = Field(pp_int(1e5), ge=0, alias="stage3_param_persistence_threshold")
"""
Do not partition parameters smaller than this threshold. Smaller values use
less memory, but can greatly increase communication (especially
latency-bound messages).
"""
model_persistence_threshold: int = Field(pp_int(sys.maxsize, "sys.maxsize"),
ge=0,
alias="stage3_model_persistence_threshold")
"""
Maximum number of parameter elements that can be persisted in GPU and not
partitioned. This imposes an upper bound on the number of unpartitioned
parameters resulting from param_persistence_threshold setting. Used by
ZeRO3-Offload, ZeRO-Infinity and ZeRO-Inference.
"""
max_live_parameters: int = Field(pp_int(1e9), ge=0, alias="stage3_max_live_parameters")
"""
The maximum number of parameters resident per GPU before releasing. Smaller
values use less memory, but perform more communication.
"""
max_reuse_distance: int = Field(pp_int(1e9), ge=0, alias="stage3_max_reuse_distance")
"""
Do not release a parameter if it will be reused within this threshold of
parameters. Smaller values use less memory, but perform more communication.
"""
gather_16bit_weights_on_model_save: bool = Field(False, alias="stage3_gather_16bit_weights_on_model_save")
"""
Consolidate the weights before saving the model by ``save_16bit_model()``.
Since the weights are partitioned across GPUs, they arenβt part of
``state_dict``, so this function automatically gathers the weights when
this option is enabled and then saves the fp16 model weights.
"""
stage3_gather_fp16_weights_on_model_save: bool = Field(False,
deprecated=True,
new_param="gather_16bit_weights_on_model_save")
""" Deprecated, please use ``gather_16bit_weights_on_model_save`` """
ignore_unused_parameters: bool = True
"""
Unused parameters in modules may be unexpected in static networks, but
could be normal in dynamic networks. This controls whether or not training
should terminate with an error message when unused parameters are detected.
This is set to ``False`` by default, which means unused parameters are
ignored and training continues. Now is just used in stage 2.
"""
legacy_stage1: bool = False
"""
For backward-compatibility enable old ZeRO stage 1 implementation. Use at
your own risk, will be deprecated soon.
"""
round_robin_gradients: bool = False
"""
Stage 1 and 2 optimization for CPU offloading that parallelizes gradient
copying to CPU memory among ranks by fine-grained gradient partitioning.
Performance benefit grows with gradient accumulation steps (more copying
between optimizer steps) or GPU count (increased parallelism).
"""
mics_shard_size: int = Field(-1, new_param="mics_shard_size")
mics_hierarchical_params_gather: bool = False
memory_efficient_linear: bool = True
"""
Use memory efficient linear implementation, for Stage 3.
"""
# Validators
@validator("overlap_comm")
def overlap_comm_valid(cls, field_value, values):
if field_value is None:
assert ("stage" in values), "DeepSpeedZeroConfig: 'stage' must be defined before 'overlap_comm'"
field_value = values["stage"] == ZeroStageEnum.weights
return field_value | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/config.py | config.py |
# DeepSpeed Team
import torch
from deepspeed import comm as dist
def print_rank_0(message):
if dist.get_rank() == 0:
print(message)
class ContiguousMemoryAllocator(object):
def __init__(self, size, dtype, device):
self.buffer = torch.zeros(size, dtype=dtype, device=device)
#address to contiguous size available
self.contiguous_sizes = {}
self.contiguous_sizes[0] = size
#tensor id to its address
self.tensor_addresses = {}
#tensor address to its size
self.tensor_sizes = {}
#tensor address to ids
self.tensor_ids = {}
#id to tensors
self.tensor_map = {}
#id to params. Maps each tensor buffer to list of parameters that uses it
self.id_to_params = {}
self.total_size = size
self.total_free = size
self.largest_contiguous = size
self.max_allocated = 0
self.count = 0
#create a tensor of size from the pre-allocated buffer
#if not enough free space will fail
#if not enough contiguous space, will defragment and allocate
def allocate_tensor(self, size):
free_before = self.total_free
assert size <= self.total_free, "Not enough memory in buffer. Allocation failed"
if self.largest_contiguous < size:
print_rank_0("Needs defragmentation to allocate. Before Defragmentation:")
self.print_allocation(resolution=100)
self._defragment_memory()
#set the param data to the new tensor buffer locations
self._reset_param_data()
print_rank_0("After defragmentation:")
self.print_allocation(resolution=100)
self.total_free = self.total_free - size
allocated = self.total_size - self.total_free
if allocated > self.max_allocated:
self.max_allocated = allocated
tensor_address = self._get_new_tensor_address(size)
ret_tensor = self._get_new_tensor(tensor_address, size)
print_rank_0(
f"Free before allocation {free_before}. Allocating {size}. Free after allocation {self.total_free}. Max allocated {self.max_allocated}"
)
assert self.total_free + size == free_before, "Allocation bookkeeping error"
return ret_tensor
#assigns the tensor data to the param data and keeps track of the assignment
#any change the the underlying buffer from defragmentation will cause a
#reassignment of the param data
def assign_to_param(self, tensor, param, numel, shape):
tensor_id = id(tensor)
assert tensor_id in self.tensor_map.keys(), "No such tensor allocated by the allocator."
assert tensor.numel() >= numel, "Assert tensor buffer does is not large enough"
assert not tensor_id in self.id_to_params.keys(), "This tensor has already been assigned to a param"
self.id_to_params[tensor_id] = [param]
replicated_tensor = tensor.narrow(0, 0, numel).view(shape)
param.data = replicated_tensor.data
param.contiguous_tensor_id = tensor_id
#deletes the tensor and frees up the underlying buffer
def release_tensor(self, tensor):
free_before = self.total_free
tensor_id = id(tensor)
tensor_size = tensor.numel()
self._release_tensor(tensor_id)
self._unassign_params(tensor_id)
self.total_free += tensor_size
print_rank_0(
f"Free before release {free_before}. Released {tensor.numel()}. Total free after {self.total_free}.")
assert self.total_free - tensor_size == free_before, "Release bookkeeping error"
def release_tensor_with_id(self, tensor_id):
free_before = self.total_free
assert tensor_id in self.tensor_map.keys(), "Invalid tensor id"
tensor = self.tensor_map[tensor_id]
tensor_size = tensor.numel()
self._release_tensor(tensor_id)
self._unassign_params(tensor_id)
self.total_free += tensor_size
print_rank_0(
f"Free before release {free_before}. Released {tensor.numel()}. Total free after {self.total_free}.")
assert self.total_free - tensor_size == free_before, "Release bookkeeping error"
#shows the current memory allocation at specified resolution
def print_allocation(self, resolution=200):
total_size = self.buffer.numel() * 1.0
empty = []
for addr, size in self.contiguous_sizes.items():
start = int(addr * resolution / total_size)
end = int((addr + size) * resolution / total_size)
empty.extend(range(start, end))
s = ''
for i in range(resolution):
s += '.' if i in empty else '|'
print_rank_0(s)
def max_allocated(self):
return self.max_allocated
#to be called after defragmentation that moves the tensor buffers
#this call reassigns the data of all the parameters using the tensor buffers
def _reset_param_data(self):
for id, tensor in self.tensor_map.items():
for param in self.id_to_params[id]:
param.data = tensor.narrow(0, 0, param.numel()).view(param.data.shape).data
def _unassign_params(self, tensor_id):
if tensor_id in self.id_to_params.keys():
del self.id_to_params[tensor_id]
def _release_tensor(self, tensor_id):
assert tensor_id in self.tensor_addresses, f"Tensor id {tensor_id} not found"
address = self.tensor_addresses[tensor_id]
contiguous_size = self.tensor_map[tensor_id].numel()
del self.tensor_addresses[tensor_id]
del self.tensor_ids[address]
del self.tensor_map[tensor_id]
del self.tensor_sizes[address]
self._consolidate_address(address, contiguous_size)
self.largest_contiguous = self._largest_contiguous()
def _consolidate_address(self, address, contiguous_size):
#consolidate next buffer
end_address = address + contiguous_size
if end_address in self.contiguous_sizes:
contiguous_size += self.contiguous_sizes[end_address]
del self.contiguous_sizes[end_address]
#consolidate previous buffer
for addr, size in self.contiguous_sizes.items():
if addr + size == address:
del self.contiguous_sizes[addr]
contiguous_size += size
address = addr
break
self.contiguous_sizes[address] = contiguous_size
def _defragment_memory(self):
empty_addresses = sorted(self.contiguous_sizes.keys())
tensor_addresses = sorted(self.tensor_addresses.values())
tensor_index = 0
while tensor_index < len(tensor_addresses):
empty_addr = empty_addresses[0]
empty_size = self.contiguous_sizes[empty_addr]
tensor_addr = tensor_addresses[tensor_index]
tensor_size = self.tensor_sizes[tensor_addr]
tensor_id = self.tensor_ids[tensor_addr]
tensor = self.tensor_map[self.tensor_ids[tensor_addr]]
assert tensor_size == tensor.numel(), \
"Size mismatch. {tensor_size} is allocated at addr {tensor_addr} but tensor size is {tensor.numel()} "
assert empty_addr != tensor_addr, \
f"Cannot have same empty address {empty_addr} and tensor address {tensor_addr}"
if empty_addr < tensor_addr:
if empty_size >= tensor_size:
dest_buffer = self.buffer.narrow(0, empty_addr, tensor_size)
src_buffer = self.buffer.narrow(0, tensor_addr, tensor_size)
dest_buffer.data.copy_(src_buffer.data)
else:
#print_rank_0(f'empty addr : {empty_addr}, empty size {empty_size} tensor addr {tensor_addr} tensor size {tensor_size}')
src_addr = tensor_addr
dest_addr = empty_addr
while src_addr < (tensor_addr + tensor_size):
copy_size = min(empty_size, tensor_addr + tensor_size - src_addr)
dest_buffer = self.buffer.narrow(0, dest_addr, copy_size)
src_buffer = self.buffer.narrow(0, src_addr, copy_size)
dest_buffer.data.copy_(src_buffer.data)
src_addr += copy_size
dest_addr += copy_size
self._replace_old_address_with_new(tensor_id, empty_addr)
tensor_index += 1
else:
tensor_index += 1
empty_addresses = sorted(self.contiguous_sizes.keys())
def _replace_old_address_with_new(self, tensor_id, new_address):
tensor = self.tensor_map[tensor_id]
tensor_size = tensor.numel()
tensor.data = self.buffer.narrow(0, new_address, tensor_size).data
self._release_tensor(tensor_id)
self._mark_as_occupied(new_address, tensor_size)
self.tensor_ids[new_address] = tensor_id
self.tensor_map[tensor_id] = tensor
self.tensor_addresses[tensor_id] = new_address
self.tensor_sizes[new_address] = tensor_size
def _get_new_tensor_address(self, size):
tensor_address = None
for address, contiguous_size in self.contiguous_sizes.items():
if contiguous_size >= size and \
(tensor_address is None or \
contiguous_size < self.contiguous_sizes[tensor_address]):
tensor_address = address
assert tensor_address is not None, "address cannot be None"
return tensor_address
def _get_new_tensor(self, address, size):
available_contiguous_size = self.contiguous_sizes[address]
assert size <= available_contiguous_size, \
f"Tensor numel {size} is large than available contiguous size {available_contiguous_size}"
self.count += 1
new_tensor = self.buffer.narrow(0, address, size)
tensor_id = id(new_tensor)
self.tensor_addresses[tensor_id] = address
self.tensor_sizes[address] = size
self.tensor_ids[address] = tensor_id
self.tensor_map[tensor_id] = new_tensor
self._mark_as_occupied(address, size)
return new_tensor
def _largest_contiguous(self):
if len(self.contiguous_sizes) > 0:
return max([size for _, size in self.contiguous_sizes.items()])
else:
return 0
def _mark_as_occupied(self, address, size):
available_contiguous_size = self.contiguous_sizes[address]
del self.contiguous_sizes[address]
if available_contiguous_size != size:
self.contiguous_sizes[address + size] = available_contiguous_size - size
self.largest_contiguous = self._largest_contiguous() | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/contiguous_memory_allocator.py | contiguous_memory_allocator.py |
# DeepSpeed Team
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from dataclasses import dataclass
from typing import List
import numpy as np
import torch
from torch import Tensor
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
from deepspeed.utils import logger
def _log_rank0(msg):
if dist.get_rank() == 0:
logger.info(msg)
@torch.jit.script
def scale_tensors(tensors: List[Tensor], scale: int):
for t in tensors:
t.div_(scale)
@dataclass
class MiCS_CommGroups:
""""""
param_shard_group = None
param_shard_size = -1
param_shard_rank = -1
param_repli_group = None
param_repli_size = -1
param_repli_rank = -1
param_intra_node_group = None
param_inter_node_shard_group = None
def create_mics_comm_groups(
shard_size,
dp_group,
hierarchical_allgather=False,
mpu=None,
):
"""
create shard-group, replicate-group from config_file
TODO: consider broadcast the config from rank0
Returns:
MiCS_CommGroups
"""
# env var for debugging purpose
ndevices_per_node = int(os.environ.get("NDEV_PER_NODE", get_accelerator().device_count()))
_log_rank0(f'creating MiCS communication groups with per node device size {ndevices_per_node}')
groups = MiCS_CommGroups()
if mpu is not None:
assert dp_group == mpu.get_data_parallel_group()
# full size of the world
world_size = dist.get_world_size()
# global rank
global_rank = dist.get_rank()
config = _generate_mics_config(world_size, ndevices_per_node, shard_size, 1)
ranks_of_shard_group = config['shard_groups']
ranks_of_repli_group = config['replicate_groups']
if len(ranks_of_repli_group) == 0:
assert len(ranks_of_shard_group) == 1, "replicate groups are empty only for single shard group"
for r in ranks_of_shard_group[0]:
ranks_of_repli_group.append([r])
# for simplicity
assert _sizes_all_same(ranks_of_repli_group), "replicate groups must have the same size"
assert _sizes_all_same(ranks_of_shard_group), "shard groups must have the same size"
assert sum([len(g) for g in ranks_of_shard_group]) == dist.get_world_size(), "all sharded ranks "
if len(ranks_of_shard_group) > 1: # if only shard on one group then no need for replicate groups
assert len(ranks_of_shard_group) == len(
ranks_of_repli_group[0]), "number of shard groups must equal to the size of each replicate group"
global_rank = dist.get_rank()
# create shard groups
for shard_ranks in ranks_of_shard_group:
_group = dist.new_group(shard_ranks)
if global_rank in shard_ranks:
groups.param_shard_group = _group
groups.param_shard_size = len(shard_ranks)
groups.param_shard_rank = dist.get_rank(_group)
logger.info(f'rank {global_rank}, shard group'
f' {groups.param_shard_rank}/{dist.get_world_size(group=_group)}')
# create replicate groups
for repli_ranks in ranks_of_repli_group:
if len(repli_ranks) > 1:
_group = dist.new_group(repli_ranks)
if global_rank in repli_ranks:
groups.param_repli_group = _group
groups.param_repli_size = len(repli_ranks)
groups.param_repli_rank = dist.get_rank(group=_group)
logger.info(f'rank {global_rank} '
f'replicate group {groups.param_repli_rank}/{dist.get_world_size(group=_group)}')
else:
groups.param_repli_group = None
groups.param_repli_size = 1
groups.param_repli_rank = 0
logger.info(f'rank {global_rank} replicate group 0/1')
# assign shard group size as world size
assert groups.param_shard_size == len(ranks_of_shard_group[0])
if hierarchical_allgather:
# create hierarchy inter-node, intra-node groups
# n_span_nodes = config['shard_span']
n_span_nodes = config['span_nodes']
assert n_span_nodes > 1, "sharding spans on single node, no need for hierarchy allgather"
assert len(ranks_of_shard_group[0]) % n_span_nodes == 0
n_gpu_per_node = len(ranks_of_shard_group[0]) // n_span_nodes
intra_node_ranks_group = []
inter_node_ranks_group = []
for shard_group in ranks_of_shard_group:
_intra_node_ranks = []
for i in range(0, len(shard_group), n_gpu_per_node):
_intra_node_ranks.append(shard_group[i:i + n_gpu_per_node])
_inter_node_ranks = []
for i in range(n_gpu_per_node):
_ranks = [_g[i] for _g in _intra_node_ranks]
_inter_node_ranks.append(_ranks)
intra_node_ranks_group.append(_intra_node_ranks)
inter_node_ranks_group.append(_inter_node_ranks)
_log_rank0(f"create for hierarchy all-gather groups: intra nodes {intra_node_ranks_group}")
_log_rank0(f"create for hierarchy all-gather groups: inter nodes {inter_node_ranks_group}")
# create communicators
for shard_group in intra_node_ranks_group:
for intra_node_ranks in shard_group:
_group = dist.new_group(intra_node_ranks)
if global_rank in intra_node_ranks:
groups.param_intra_node_group = _group
_log_rank0(f'create group for intra node ranks {intra_node_ranks}')
for shard_group in inter_node_ranks_group:
for inter_node_ranks in shard_group:
_group = dist.new_group(inter_node_ranks)
if global_rank in inter_node_ranks:
groups.param_inter_node_shard_group = _group
_log_rank0(f'create group for inter node ranks {inter_node_ranks}')
return groups
def _generate_mics_config(world_size, ndev_per_node, shard_size, pp_size=1):
"""Generating the configuration for sharding This shard config generation assume
that the pipeline stages are partitioned in order, i.e., first ranks
hold the stage0, etc.
Args:
shard_size (int): zero3 data-parallel shard size, FIXME:
change the name later
pp_size (int): pipeline parallel size, currently, only work with
pipeline parallelism + zero
"""
assert world_size % pp_size == 0
assert (world_size // pp_size) % shard_size == 0, \
f"dp group size is not dividable by dp_shard_size, "\
f" (world_size {world_size}, pp_size {pp_size}, dp_shard_size {shard_size})"
config = {}
shard_groups = np.arange(world_size).reshape(-1, shard_size)
replicate_groups = []
for i in range(shard_size):
same_shard_ranks = shard_groups[:, i].tolist()
n_ranks = len(same_shard_ranks)
replicate_size = n_ranks // pp_size
replicate_groups.extend([same_shard_ranks[j:j + replicate_size] for j in range(0, n_ranks, replicate_size)])
config['replicate_groups'] = replicate_groups
config['shard_groups'] = shard_groups.tolist()
config["span_nodes"] = len(shard_groups[0]) // ndev_per_node
return config
def _sizes_all_same(groups):
"""all groups have same length"""
all_same = True
for g in groups:
if len(g) != len(groups[0]):
return False
return all_same | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/mics_utils.py | mics_utils.py |
# DeepSpeed Team
import sys
import torch
from collections import OrderedDict
from deepspeed.runtime.utils import see_memory_usage
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.runtime.zero.partition_parameters import _init_external_params
from deepspeed.runtime.zero.partition_parameters import *
from deepspeed.runtime.zero.partitioned_param_coordinator import PartitionedParameterCoordinator, iter_params
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
FWD_MODULE_STACK = list()
def is_builtin_type(obj):
# https://stackoverflow.com/a/17795199
return obj.__class__.__module__ == '__builtin__' or obj.__class__.__module__ == "builtins"
def isinstance_namedtuple(obj: object) -> bool:
"""
Is this an instance of namedtuple/NamedTuple?
From: https://stackoverflow.com/a/62692640
Args:
obj (object): An object.
Returns:
bool: True if namedtuple/NamedTuple else False.
"""
return isinstance(obj, tuple) and hasattr(obj, '_asdict') and hasattr(obj, '_fields')
# ensure we only warn once, otherwise every iteration will trigger a warning
warned = False
def _apply_to_tensors_only(module, functional, backward_function, outputs):
"""
Apply a torch.autograd.Function that calls a `backward_function` to every Tensor in `outputs`.
Args:
module (torch.nn.Module): A torch module
functional (Type[torch.autograd.Function]): The function class to apply.
backward_function (Callable[[torch.nn.Module], None]): A backward_function to pass to
`functional.apply`.
outputs (Any): The output of `module`.
Returns:
Any: The output of `module`.
"""
if isinstance(outputs, (tuple, list)):
touched_outputs = []
for output in outputs:
touched_output = _apply_to_tensors_only(module, functional, backward_function, output)
touched_outputs.append(touched_output)
if isinstance_namedtuple(outputs):
# namedtuples require a slightly different syntax.
return outputs.__class__(*touched_outputs)
return outputs.__class__(touched_outputs)
elif isinstance(outputs, dict):
# apply inplace to avoid recreating dict inherited objects
for key in outputs.keys():
outputs[key] = _apply_to_tensors_only(module, functional, backward_function, outputs[key])
return outputs
elif isinstance(outputs, torch.Tensor):
# this also applies to torch.Tensor's subclasses like torch.nn.parameter.Parameter
touched_outputs = functional.apply(module, backward_function, outputs)
# restore zero param attributes if those get stripped by `backward_function`
if not is_zero_param(touched_outputs) and is_zero_param(outputs):
touched_outputs.ds_param_alias = outputs
return touched_outputs
else:
if not is_builtin_type(outputs):
global warned
if not warned and dist.get_rank() == 0:
logger.warning(
f"A module has unknown inputs or outputs type ({type(outputs)}) and the tensors embedded in it cannot be detected. "
"The ZeRO-3 hooks designed to trigger before or after backward pass of the module relies on knowing the input and "
"output tensors and therefore may not get triggered properly.")
warned = True
return outputs
#for each tensor in outputs run the forward_function and register backward_function as hook
def _apply_forward_and_backward_to_tensors_only(module, forward_function, backward_function, outputs):
if type(outputs) is tuple:
touched_outputs = []
for output in outputs:
touched_output = _apply_forward_and_backward_to_tensors_only(module, forward_function, backward_function,
output)
touched_outputs.append(touched_output)
return tuple(touched_outputs)
elif type(outputs) is torch.Tensor:
forward_function(outputs)
if outputs.requires_grad:
outputs.register_hook(backward_function)
return outputs
else:
return outputs
class ZeROOrderedDict(OrderedDict):
def __init__(self, parent_module, *args, **kwargs):
"""A replacement for ``collections.OrderedDict`` to detect external ZeRO params.
Args:
parent_module (``collections.OrderedDict``): the collection to replace
"""
super().__init__(*args, **kwargs)
self._parent_module = parent_module
self._in_forward = False
def __getitem__(self, key):
param = super().__getitem__(key)
# Params can be registered as None (e.g., bias)
if param is None:
return param
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
if self._parent_module._parameters._in_forward:
register_external_parameter(FWD_MODULE_STACK[-1], param)
param.all_gather()
print_rank_0(f'Registering external parameter from getter {key} ds_id = {param.ds_id}', force=False)
return param
def _inject_parameters(module, cls):
for module in module.modules():
if cls == ZeROOrderedDict:
new_param = cls(parent_module=module)
else:
new_param = cls()
for key, param in module._parameters.items():
new_param[key] = param
module._parameters = new_param
class PreBackwardFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, module, pre_backward_function, outputs):
ctx.module = module
ctx.pre_backward_function = pre_backward_function
if not hasattr(module, "applied_pre_backward_ref_cnt"):
module.applied_pre_backward_ref_cnt = 0
module.applied_pre_backward_ref_cnt += 1
#print(f"After Forward: {ctx.module.__class__.__name__}")
outputs = outputs.detach()
return outputs
@staticmethod
def backward(ctx, *args):
#print(f"Before Backward: {ctx.module.__class__.__name__}")
ctx.pre_backward_function(ctx.module)
return (None, None) + args
class PostBackwardFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, module, pre_backward_function, output):
ctx.module = module
if output.requires_grad:
#TODO SOME TIMES post backward does not seem to be triggered debug in detail
#Should only cause increase in memory not correctness issue
#if output.grad_fn.__class__.__name__ == 'ViewBackward':
# ctx.view=True
# print(f"Warning view tensor for input to module : {module.__class__.__name__}. Backward hooks may not trigger properly")
#assert len(module.parameters(recurse=False)), "The input tensor to the module is a view, and autograd Function or register_hook is not triggered with view tensors."
#if module.ds_grads_remaining == 0:
# print(f"Before Forward: {ctx.module.__class__.__name__}")
module.ds_grads_remaining += 1
ctx.pre_backward_function = pre_backward_function
output = output.detach()
return output
@staticmethod
def backward(ctx, *args):
ctx.module.ds_grads_remaining = ctx.module.ds_grads_remaining - 1
if ctx.module.ds_grads_remaining == 0:
ctx.pre_backward_function(ctx.module)
#print(f"After Backward: {ctx.module.__class__.__name__}")
return (None, None) + args
class DeepSpeedZeRoOffload(object):
def __init__(self,
module,
timers,
ds_config,
overlap_comm=True,
prefetch_bucket_size=50000000,
max_reuse_distance=1000000000,
max_live_parameters=1000000000,
param_persistence_threshold=100000,
model_persistence_threshold=sys.maxsize,
offload_param_config=None,
mpu=None):
see_memory_usage("DeepSpeedZeRoOffload initialize [begin]", force=True)
print_rank_0(f"initialized {__class__.__name__} with args: {locals()}", force=False)
self.module = module
self.dtype = list(module.parameters())[0].dtype
self.offload_device = None
self.offload_param_pin_memory = False
if offload_param_config is not None and offload_param_config.device != OffloadDeviceEnum.none:
self.offload_device = offload_param_config.device
self.offload_param_pin_memory = offload_param_config.pin_memory
self._convert_to_zero_parameters(ds_config, module, mpu)
for m in module.modules():
_init_external_params(m)
_inject_parameters(module, ZeROOrderedDict)
self.param_numel_persistence_threshold = int(param_persistence_threshold)
self.model_persistence_threshold = int(model_persistence_threshold)
self.persistent_parameters = self.mark_persistent_parameters(self.param_numel_persistence_threshold,
self.model_persistence_threshold)
self.param_coordinators = {}
self._prefetch_bucket_sz = int(prefetch_bucket_size)
self._max_reuse_distance_in_numel = int(max_reuse_distance)
self._max_available_parameters_in_numel = int(max_live_parameters)
self.__allgather_stream = get_accelerator().Stream() if overlap_comm else get_accelerator().default_stream()
self.forward_hooks = []
self.backward_hooks = []
self.setup_zero_stage3_hooks()
print_rank_0(
f'Created module hooks: forward = {len(self.forward_hooks)}, backward = {len(self.backward_hooks)}',
force=False)
see_memory_usage("DeepSpeedZeRoOffload initialize [end]", force=True)
@instrument_w_nvtx
def partition_all_parameters(self):
"""Partitioning Parameters that were not partitioned usually if parameters
of modules whose input parameters do not require grad computation do not
trigger post call and will therefore will remain unpartitioned"""
self.get_param_coordinator(training=self.module.training).release_and_reset_all(self.module)
for param in iter_params(self.module, recurse=True):
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(f"{param.ds_summary()} expected to be released")
def get_param_coordinator(self, training):
if not training in self.param_coordinators:
self.param_coordinators[training] = PartitionedParameterCoordinator(
prefetch_bucket_sz=self._prefetch_bucket_sz,
max_reuse_distance_in_numel=self._max_reuse_distance_in_numel,
max_available_parameters_in_numel=self._max_available_parameters_in_numel,
allgather_stream=self.__allgather_stream,
prefetch_nvme=self.offload_device == OffloadDeviceEnum.nvme,
)
return self.param_coordinators[training]
def empty_partition_cache(self):
self.partition_all_parameters()
def _convert_to_zero_parameters(self, ds_config, module, mpu):
non_zero_params = [p for p in module.parameters() if not is_zero_param(p)]
if non_zero_params:
zero_params = [p for p in module.parameters() if is_zero_param(p)]
if zero_params:
zero_params[0].convert_to_zero_parameters(param_list=non_zero_params)
else:
group = None
if mpu:
group = mpu.get_data_parallel_group()
Init(module=module,
data_parallel_group=group,
dtype=self.dtype,
config_dict_or_path=ds_config,
remote_device=self.offload_device,
pin_memory=self.offload_param_pin_memory,
mpu=mpu)
def destroy(self):
self._remove_module_hooks()
def _remove_module_hooks(self):
num_forward_hooks = len(self.forward_hooks)
num_backward_hooks = len(self.backward_hooks)
for hook in self.forward_hooks:
hook.remove()
for hook in self.backward_hooks:
hook.remove()
print_rank_0(f'Deleted module hooks: forward = {num_forward_hooks}, backward = {num_backward_hooks}',
force=False)
def setup_zero_stage3_hooks(self):
self.hierarchy = 0
#reset step if in inference mode
@instrument_w_nvtx
def _end_of_forward_hook(module, *args):
if not torch._C.is_grad_enabled():
self.get_param_coordinator(training=False).reset_step()
#likely one of them should be enough but just to be safe
self._register_hooks_recursively(self.module)
self.module.register_forward_hook(_end_of_forward_hook)
# Add top module to stack trace
global FWD_MODULE_STACK
FWD_MODULE_STACK.append(self.module)
def mark_persistent_parameters(self, param_threshold, model_threshold):
persistent_params = []
total_persistent_parameters = 0
params_count = 0
for _, param in self.module.named_parameters(recurse=True):
if param.ds_numel + total_persistent_parameters > model_threshold:
continue
if param.ds_numel <= param_threshold:
params_count += 1
param.ds_persist = True
persistent_params.append(param)
total_persistent_parameters += param.ds_numel
print_rank_0(
f"Parameter Offload: Total persistent parameters: {total_persistent_parameters} in {params_count} params",
force=True)
return persistent_params
def _register_hooks_recursively(self, module, count=[0]):
my_count = count[0]
module.id = my_count
#print(f"{module.__class__} : {module.id}")
for child in module.children():
count[0] = count[0] + 1
self._register_hooks_recursively(child, count=count)
@instrument_w_nvtx
def _pre_forward_module_hook(module, *args):
self.pre_sub_module_forward_function(module)
@instrument_w_nvtx
def _post_forward_module_hook(module, input, output):
global FWD_MODULE_STACK
FWD_MODULE_STACK.pop()
if output is None:
output = []
elif not isinstance(output, (list, tuple)):
if torch.is_tensor(output):
output = [output]
else:
#print(f'got UNKNOWN type {type(output)}')
outputs = []
output = output if isinstance(output, dict) else vars(output)
for name, val in output.items():
if not name.startswith('__') and torch.is_tensor(val):
outputs.append(val)
output = outputs
for item in filter(lambda item: is_zero_param(item) or hasattr(item, 'ds_param_alias'), output):
key = id(item) if hasattr(item, 'ds_id') else id(item.ds_param_alias)
actual_external_param = item if hasattr(item, 'ds_id') else item.ds_param_alias
if not any(key in m._external_params for m in FWD_MODULE_STACK):
actual_external_param.is_external_param = True
module_to_register = FWD_MODULE_STACK[-1]
register_external_parameter(module_to_register, actual_external_param)
print_rank_0(
f'Registering dangling parameter for module {module_to_register.__class__.__name__}, ds_id = {actual_external_param.ds_id}.',
force=False)
# It's possible that the parameter was already external to the completed module. If so, remove it the
# registration as it will be covered by the outer module instead.
if key in module._external_params:
print_rank_0(
f' Unregistering nested dangling parameter from module {module.__class__.__name__}, ds_id = {actual_external_param.ds_id}',
force=False)
unregister_external_parameter(module, actual_external_param)
actual_external_param.all_gather()
self.post_sub_module_forward_function(module)
def _pre_backward_module_hook(module, inputs, output):
@instrument_w_nvtx
def _run_before_backward_function(sub_module):
# some models (e.g. Albert) may run multiple forwards on the same layer in a loop
# before doing backwards, so each backward will need a pre-fetch - using reference
# counting to support this scenario
#print(f"COUNTER before: {sub_module.applied_pre_backward_ref_cnt}")
if sub_module.applied_pre_backward_ref_cnt > 0:
self.pre_sub_module_backward_function(sub_module)
sub_module.applied_pre_backward_ref_cnt -= 1
#print(f"COUNTER after: {sub_module.applied_pre_backward_ref_cnt}")
return _apply_to_tensors_only(module, PreBackwardFunction, _run_before_backward_function, output)
#This is an alternate to doing _post_backward_module_hook
#it uses tensor.register_hook instead of using torch.autograd.Function
def _alternate_post_backward_module_hook(module, inputs):
module.ds_grads_remaining = 0
#print(f"Before Forward {module.__class__.__name__}")
def _run_after_backward_hook(*unused):
module.ds_grads_remaining = module.ds_grads_remaining - 1
if module.ds_grads_remaining == 0:
#print(f"After backward {module.__class__.__name__}")
self.post_sub_module_backward_function(module)
def _run_before_forward_function(input):
if input.requires_grad:
module.ds_grads_remaining += 1
return _apply_forward_and_backward_to_tensors_only(module, _run_before_forward_function,
_run_after_backward_hook, inputs)
def _post_backward_module_hook(module, inputs):
module.ds_grads_remaining = 0
@instrument_w_nvtx
def _run_after_backward_function(sub_module):
if sub_module.ds_grads_remaining == 0:
self.post_sub_module_backward_function(sub_module)
return _apply_to_tensors_only(module, PostBackwardFunction, _run_after_backward_function, inputs)
# Pre forward hook
self.forward_hooks.append(module.register_forward_pre_hook(_pre_forward_module_hook))
# Post forward hook
self.forward_hooks.append(module.register_forward_hook(_post_forward_module_hook))
# Pre backward hook
self.backward_hooks.append(module.register_forward_hook(_pre_backward_module_hook))
# post backward hook
self.backward_hooks.append(module.register_forward_pre_hook(_post_backward_module_hook))
@torch.no_grad()
def pre_sub_module_forward_function(self, sub_module):
see_memory_usage(f"Before sub module function {sub_module.__class__.__name__}", force=False)
global FWD_MODULE_STACK
FWD_MODULE_STACK.append(sub_module)
param_coordinator = self.get_param_coordinator(training=sub_module.training)
param_coordinator.trace_prologue(sub_module)
if param_coordinator.is_record_trace():
param_coordinator.record_module(sub_module)
param_coordinator.fetch_sub_module(sub_module)
see_memory_usage(f"Before sub module function {sub_module.__class__.__name__} after fetch", force=False)
@torch.no_grad()
def post_sub_module_forward_function(self, sub_module):
see_memory_usage(f"After sub module function {sub_module.__class__.__name__} {sub_module.id} before release",
force=False)
param_coordinator = self.get_param_coordinator(training=sub_module.training)
param_coordinator.release_sub_module(sub_module)
see_memory_usage(f"After sub module function {sub_module.__class__.__name__} {sub_module.id} after release",
force=False)
@torch.no_grad()
def pre_sub_module_backward_function(self, sub_module):
assert sub_module.training, "backward pass is invalid for module in evaluation mode"
param_coordinator = self.get_param_coordinator(training=True)
param_coordinator.trace_prologue(sub_module)
if param_coordinator.is_record_trace():
param_coordinator.record_module(sub_module)
param_coordinator.fetch_sub_module(sub_module)
@torch.no_grad()
def post_sub_module_backward_function(self, sub_module):
assert sub_module.training, "backward pass is invalid for module in evaluation mode"
see_memory_usage(
f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} before release",
force=False)
self.get_param_coordinator(training=True).release_sub_module(sub_module)
see_memory_usage(
f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} after release",
force=False) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/parameter_offload.py | parameter_offload.py |
# DeepSpeed Team
import math
import os
import types
from typing import Callable, Iterable
from enum import Enum
import functools
import itertools
from typing import List
import torch
from torch import Tensor
from deepspeed import comm as dist
from torch.nn import Module
from torch.nn import Parameter
from .linear import zero3_linear_wrap
import deepspeed
from ..utils import get_only_unique_item, see_memory_usage
from deepspeed.runtime.zero.config import DeepSpeedZeroConfig
from deepspeed.runtime.zero.utils import assert_ints_same_as_other_ranks
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.runtime.config_utils import get_config_default
from deepspeed.utils import instrument_w_nvtx, logger
from deepspeed.comm.comm import init_distributed
from deepspeed.utils.debug import (debug_param2name_id_shape, debug_param2name_id_shape_device, debug_module2name,
debug_param2name_id, debug_param2name_id_shape_status)
from deepspeed.accelerator import get_accelerator
from ..swap_tensor.partitioned_param_swapper import AsyncPartitionedParameterSwapper, PartitionedParamStatus
param_count = 0
partitioned_param_data_shape = [0]
zero_init_context = []
all_wrapped_classes = set()
class NoGatherHandle:
def __init__(self, param: Parameter) -> None:
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"expected param {param.ds_summary()} to be available")
param.data = param.ds_tensor.data.to(device=get_accelerator().current_device_name(),
non_blocking=True).view(param.ds_shape)
self.__param = param
def wait(self) -> None:
get_accelerator().current_stream().synchronize()
self.__param.ds_status = ZeroParamStatus.AVAILABLE
class NoGatherCoalescedHandle:
def __init__(self, params: List[Parameter]) -> None:
self.__params = params
self.__complete = False
for param in self.__params:
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"expected param {param.ds_summary()} to not be available")
param.data = param.ds_tensor.data.to(device=get_accelerator().current_device_name(),
non_blocking=True).view(param.ds_shape)
@instrument_w_nvtx
def wait(self) -> None:
if self.__complete:
return
get_accelerator().current_stream().synchronize()
for param in self.__params:
assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight"
param.ds_status = ZeroParamStatus.AVAILABLE
self.__complete = True
def _dist_allgather_fn(input_tensor: Tensor, output_tensor: Tensor, group=None):
return instrument_w_nvtx(dist.allgather_fn)(output_tensor, input_tensor, group=group, async_op=True)
def print_rank_0(message, debug=False, force=False):
rank = dist.get_rank()
if rank == 0 and (debug or force):
print(message)
# other variations
# - print for all ranks w/o interleaving
# printflock(f"[{rank}] {message}")
# - print to log file per rank
# log_rank_file(rank, message)
def debug_rank0(msg: str) -> None:
if dist.get_rank() == 0:
logger.debug(msg)
def is_zero_param(parameter):
if not torch.is_tensor(parameter):
return False
return hasattr(parameter, 'ds_id')
def _init_external_params(module):
if not hasattr(module, '_external_params'):
module._external_params = {}
def external_parameters(self):
return self._external_params.items()
def all_parameters(self):
return itertools.chain(self.named_parameters(self, recurse=False), external_parameters(self))
module.ds_external_parameters = types.MethodType(external_parameters, module)
module.all_parameters = types.MethodType(all_parameters, module)
def register_external_parameter(module, parameter):
"""Instruct DeepSpeed to coordinate ``parameter``'s collection and partitioning in
the forward and backward passes of ``module``.
This is used when a parameter is accessed outside of its owning module's
``forward()``. DeepSpeed must know to collect it from its partitioned
state and when to release the memory.
.. note::
This is only applicable to training with ZeRO stage 3.
Args:
module (``torch.nn.Module``): The module that requires ``parameter`` in its forward pass.
parameter (``torch.nn.Parameter``): The parameter to register.
Raises:
RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``.
Examples
========
#. Register a weight that is used in another module's forward pass (line 6).
Parameter ``layer1.weight`` is used by ``layer2`` (line 11).
.. code-block:: python
:linenos:
:emphasize-lines: 6,11
class ModuleZ3(torch.nn.Module):
def __init__(self, *args):
super().__init__(self, *args)
self.layer1 = SomeLayer()
self.layer2 = OtherLayer()
deepspeed.zero.register_external_parameter(self, self.layer1.weight)
def forward(self, input):
x = self.layer1(input)
# self.layer1.weight is required by self.layer2.forward
y = self.layer2(x, self.layer1.weight)
return y
"""
if not isinstance(parameter, torch.nn.Parameter):
raise RuntimeError('Parameter is not a torch.nn.Parameter')
if not hasattr(module, '_external_params'):
_init_external_params(module)
key = id(parameter)
module._external_params[key] = parameter
def unregister_external_parameter(module, parameter):
"""Reverses the effects of :meth:`register_external_parameter`.
Args:
module (``torch.nn.Module``): The module to affect.
parameter (``torch.nn.Parameter``): The parameter to unregister.
Raises:
RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``.
RuntimeError: If ``parameter`` is not a registered external parameter of ``module``.
"""
if not isinstance(parameter, torch.nn.Parameter):
raise RuntimeError('Parameter is not a torch.nn.Parameter')
if not hasattr(module, '_external_params') or id(parameter) not in module._external_params:
raise RuntimeError('Parameter is not a registered external parameter of module.')
key = id(parameter)
del module._external_params[key]
class ZeroParamType(Enum):
# same as regular pytorch parameters
NORMAL = 1
# parameters are partitioned across data parallel process
PARTITIONED = 2
# the parameter is held with a unique process rank
# and is not available on all other process
REMOTE = 3
class ZeroParamStatus(Enum):
# parameters are fully present and ready for use on all processes
AVAILABLE = 1
# parameters are either partitioned or remote in some or all process
NOT_AVAILABLE = 2
# parameters are being gathered.
INFLIGHT = 3
_orig_torch_empty = torch.empty
_orig_torch_zeros = torch.zeros
_orig_torch_ones = torch.ones
_orig_torch_full = torch.full
def zero_wrapper_for_fp_tensor_constructor(fn: Callable, target_fp_dtype: torch.dtype) -> Callable:
def wrapped_fn(*args, **kwargs) -> Tensor:
if kwargs.get("device", None) is None:
kwargs['device'] = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"]))
tensor: Tensor = fn(*args, **kwargs)
if tensor.is_floating_point():
tensor = tensor.to(target_fp_dtype)
return tensor
return wrapped_fn
def get_new_tensor_fn_for_dtype(dtype: torch.dtype) -> Callable:
def new_tensor(cls, *args) -> Tensor:
device = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"]))
tensor = _orig_torch_empty(0, device=device).new_empty(*args)
if tensor.is_floating_point():
tensor = tensor.to(dtype)
return tensor
return new_tensor
# https://stackoverflow.com/a/63851681/9201239
def get_all_subclasses(cls):
subclass_list = []
def recurse(cl):
for subclass in cl.__subclasses__():
subclass_list.append(subclass)
recurse(subclass)
recurse(cls)
return set(subclass_list)
@instrument_w_nvtx
def free_param(param: Parameter) -> None:
"""Free underlying storage of a parameter."""
assert not param.ds_active_sub_modules, param.ds_summary()
if get_accelerator().on_accelerator(param.data):
# need to make sure that we don't free the parameter while it is still
# being used for computation
param.data.record_stream(get_accelerator().current_stream())
# param.data doesn't store anything meaningful in partitioned state
param.data = torch.empty(0, dtype=param.dtype, device=param.device)
param.ds_status = ZeroParamStatus.NOT_AVAILABLE
reuse_buffers = False
temp_contiguous_tensor = None
empty_buffers = {}
# Inserts _post_init_method at the end of init method
# for all sub classes of torch.nn.Module
class InsertPostInitMethodToModuleSubClasses(object):
def __init__(self, enabled=True, mem_efficient_linear=True, ds_config=None, dtype=None):
self.mem_efficient_linear = mem_efficient_linear
self.enabled = enabled
self._set_dtype(ds_config, dtype)
assert self.dtype in [
torch.half, torch.bfloat16, torch.float
], f"Invalid data type {self.dtype}, allowed values are [torch.half, torch.bfloat16, torch.float]"
self.wrapped_cls = set()
def __enter__(self):
if not self.enabled:
return
def apply_with_gather(orig_module_apply_fn: Callable) -> Callable:
"""many models make use of child modules like Linear or Embedding which
perform their own weight initialization in their __init__ methods,
but will then have more weight initialization in a parent module's __init__
method that modifies weights of child modules, which is typically done
using the Module.apply method.
since the Init context manager partitions child modules immediately after
they are initialized, without modifying apply we would entirely skip
any initialization done by parent modules.
to get around this issue, we wrap the function passed to Module.apply
so that the applied function is applied to child modules correctly.
"""
def get_wrapped_fn_to_apply(fn_to_apply: Callable) -> Callable:
if hasattr(fn_to_apply, "wrapped"):
return fn_to_apply
@functools.wraps(fn_to_apply)
def wrapped_fn_to_apply(module_to_apply_fn_to: Module) -> None:
"""gathers parameters before calling apply function. afterwards
parameters are broadcasted to ensure consistency across all ranks
then re-partitioned.
takes the following steps:
1. allgathers parameters for the current module being worked on
2. calls the original function
3. broadcasts root rank's parameters to the other ranks
4. re-partitions the parameters
"""
if not all(is_zero_param(p) for p in module_to_apply_fn_to.parameters(recurse=False)):
raise RuntimeError(f"not all parameters for {module_to_apply_fn_to.__class__.__name__}, "
f"were zero params, is it possible that the parameters were "
f"overwritten after they were initialized? "
f"params: {[p for p in module_to_apply_fn_to.parameters(recurse=False)]} ")
params_to_apply_fn_to: Iterable[Parameter] = list(
sorted(module_to_apply_fn_to.parameters(recurse=False), key=lambda p: p.ds_id))
for param in params_to_apply_fn_to:
param.all_gather()
fn_to_apply(module_to_apply_fn_to)
for param in params_to_apply_fn_to:
dist.broadcast(param.data, 0, group=param.ds_process_group)
for param in params_to_apply_fn_to:
param.partition(has_been_updated=True)
wrapped_fn_to_apply.wrapped = True
return wrapped_fn_to_apply
@functools.wraps(orig_module_apply_fn)
def wrapped_apply(module: Module, fn_to_apply: Callable) -> None:
orig_module_apply_fn(module, get_wrapped_fn_to_apply(fn_to_apply))
return wrapped_apply
def partition_after(f):
@functools.wraps(f)
def wrapper(module, *args, **kwargs):
# important logic: We want to run post_init only after child's __init__ is
# completed, and do nothing after __init__ of any of its parents and grandparents in
# the inheritance ancestry. This way the partitioning will need to happen only once
# when the whole object is ready to be partitioned and not before. This is because
# often the child module will need to tweak the weights - for example running a
# custom weights init function. So if a parent created the weights param, the child
# won't need to gather it in order to tweak it
print_rank_0(f'Before initializing {module.__class__.__name__}', force=False)
is_child_module = False
if not hasattr(module, "_ds_child_entered"):
# child's __init__ was called, since parents all see the same object they can now skip post_init
is_child_module = True
setattr(module, "_ds_child_entered", True)
f(module, *args, **kwargs)
if is_child_module:
# child's __init__ is done, now we can run a single post_init on the child object
delattr(module, "_ds_child_entered")
print_rank_0(f'Running post_init for {module.__class__.__name__}', force=False)
self._post_init_method(module)
print_rank_0(f'After initializing followed by post init for {module.__class__.__name__}', force=False)
return wrapper
def _enable_class(cls):
cls._old_init = cls.__init__
cls.__init__ = partition_after(cls.__init__)
def _init_subclass(cls, **kwargs):
cls.__init__ = partition_after(cls.__init__)
# Replace .__init__() for all existing subclasses of torch.nn.Module recursively
global zero_init_context
self.nest_level = len(zero_init_context)
global all_wrapped_classes
for subclass in get_all_subclasses(torch.nn.modules.module.Module):
# Only wrap classes that haven't been wrapped yet
if subclass not in all_wrapped_classes:
_enable_class(subclass)
self.wrapped_cls.add(subclass)
all_wrapped_classes = all_wrapped_classes.union(self.wrapped_cls)
# Wrap some functions only at top level call of Init
if self.nest_level == 0:
# holding onto some methods so we can put them back the way they were in __exit__
torch.nn.modules.module.Module._old_init_subclass = torch.nn.modules.module.Module.__init_subclass__
torch.nn.modules.module.Module._old_apply = torch.nn.modules.module.Module.apply
torch.Tensor.__old_new__ = torch.Tensor.__new__
# Replace .__init__() for future subclasses of torch.nn.Module
torch.nn.modules.module.Module.__init_subclass__ = classmethod(_init_subclass)
torch.nn.modules.module.Module.apply = apply_with_gather(torch.nn.modules.module.Module._old_apply)
torch.Tensor.__new__ = get_new_tensor_fn_for_dtype(self.dtype)
torch.empty = zero_wrapper_for_fp_tensor_constructor(_orig_torch_empty, self.dtype)
torch.zeros = zero_wrapper_for_fp_tensor_constructor(_orig_torch_zeros, self.dtype)
torch.ones = zero_wrapper_for_fp_tensor_constructor(_orig_torch_ones, self.dtype)
torch.full = zero_wrapper_for_fp_tensor_constructor(_orig_torch_full, self.dtype)
if self.mem_efficient_linear:
print_rank_0(
"nn.functional.linear has been overridden with a more memory efficient version. This will persist unless manually reset.",
force=False)
self.linear_bk = torch.nn.functional.linear
torch.nn.functional.linear = zero3_linear_wrap
self.torch_func_wrapped = True
zero_init_context.append(self)
def __exit__(self, exc_type, exc_value, traceback):
if not self.enabled:
return
self.remove_wrappers()
# Exiting the top level context
global zero_init_context
zero_init_context.pop()
if self.nest_level == 0:
if dist.get_rank() == 0:
logger.info("finished initializing model with %.2fB parameters", param_count / 1e9)
# Now that we cleaned up the metaclass injection, raise the exception.
if exc_type is not None:
return False
# To be implemented by inheriting classes
def _post_init_method(self, module):
pass
def _set_dtype(self, ds_config, dtype):
if ds_config is not None and dtype is None:
if ds_config.bfloat16_enabled and ds_config.fp16_enabled:
raise RuntimeError("bfloat16 and fp16 cannot be enabled at once")
if ds_config.bfloat16_enabled:
self.dtype = torch.bfloat16
elif ds_config.fp16_enabled:
self.dtype = torch.half
else:
self.dtype = torch.float
else:
self.dtype = dtype or torch.half
def remove_wrappers(self):
def _disable_class(cls):
cls.__init__ = cls._old_init
for subclass in self.wrapped_cls:
_disable_class(subclass)
self.wrapped_cls.clear()
# This context is the top level of nested Init
if self.nest_level == 0 and self.torch_func_wrapped:
# putting methods back the way we found them
torch.nn.modules.module.Module.__init_subclass__ = torch.nn.modules.module.Module._old_init_subclass
torch.nn.modules.module.Module.apply = torch.nn.modules.module.Module._old_apply
torch.Tensor.__new__ = torch.Tensor.__old_new__
torch.empty = _orig_torch_empty
torch.zeros = _orig_torch_zeros
torch.ones = _orig_torch_ones
torch.full = _orig_torch_full
# un doing it here will undo it during training
# if self.mem_efficient_linear:
# torch.nn.functional.linear = self.linear_bk
# if self.mem_efficient_linear:
# torch.nn.functional.linear = self.linear_bk
self.torch_func_wrapped = False
global all_wrapped_classes
for subclass in get_all_subclasses(torch.nn.modules.module.Module):
if subclass not in all_wrapped_classes:
msg = f"`{subclass}' was not properly set up for sharding by zero.Init(). A subclass of torch.nn.Module must be defined before zero.Init() where an instance of the class is created."
raise RuntimeError(msg)
all_wrapped_classes.clear()
def shutdown_init_context():
"""
This function is used to initialize deepspeed engine inside the context of Init.
We need to remove the wrappers but keep the list of contexts.
"""
global zero_init_context
for ctx in zero_init_context:
ctx.remove_wrappers()
class AllGatherHandle:
def __init__(self, handle, param: Parameter) -> None:
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"expected param {param.ds_summary()} to be available")
self.handle = handle
self.param = param
def wait(self) -> None:
instrument_w_nvtx(self.handle.wait)()
self.param.ds_status = ZeroParamStatus.AVAILABLE
class AllGatherCoalescedHandle:
def __init__(
self,
allgather_handle,
params: List[Parameter],
partitions: List[Tensor],
world_size: int,
) -> None:
# renaming the fields without double underscore to ease
# the class inheritance
self.allgather_handle = allgather_handle
self.params = params
self.partitions = partitions
self.world_size = world_size
self.complete = False
for param in self.params:
if param.ds_status != ZeroParamStatus.INFLIGHT:
raise RuntimeError(f"expected param {param.ds_summary()} to not be available")
@instrument_w_nvtx
def wait(self) -> None:
if self.complete:
return
instrument_w_nvtx(self.allgather_handle.wait)()
# split the single tensor out into individual tensors
param_offset = 0
for param in self.params:
assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight"
partitions: List[Tensor] = []
for rank in range(self.world_size):
param_start = rank * param.ds_tensor.ds_numel
if param_start < param.ds_numel:
part_to_copy = self.partitions[rank].narrow(
0, param_offset, min(param.ds_numel - param_start, param.ds_tensor.ds_numel))
partitions.append(part_to_copy)
param.data = instrument_w_nvtx(torch.cat)(partitions).view(param.ds_shape)
param.ds_status = ZeroParamStatus.AVAILABLE
for part_to_copy in partitions:
part_to_copy.record_stream(get_accelerator().current_stream())
param_offset += param.ds_tensor.ds_numel
self.complete = True
def _no_gather_coalesced(params: Iterable[Parameter]) -> AllGatherCoalescedHandle:
for param in params:
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(param.ds_summary())
param.ds_status = ZeroParamStatus.INFLIGHT
params = sorted(params, key=lambda p: p.ds_id)
if len(params) == 1:
param, = params
return NoGatherHandle(param)
return NoGatherCoalescedHandle(params)
# Replaces all parameters in module with Scattered Parameters
class Init(InsertPostInitMethodToModuleSubClasses):
param_id = 0
param_persistence_threshold = get_config_default(DeepSpeedZeroConfig, "param_persistence_threshold")
model_persistence_threshold = get_config_default(DeepSpeedZeroConfig, "model_persistence_threshold")
num_persisted_parameters = 0
num_persisted_elements = 0
apply_param_persistence = False
def __init__(self,
module=None,
data_parallel_group=None,
mem_efficient_linear=True,
remote_device=None,
pin_memory=False,
config_dict_or_path=None,
config=None,
enabled=True,
dtype=None,
mpu=None):
"""A context to enable massive model construction for training with
ZeRO-3. Models are automatically partitioned (or, sharded) across the
system and converted to half precision.
Args:
module (``torch.nn.Module``, optional): If provided, partition the model as
if it was constructed in the context.
data_parallel_group (``deepspeed.comm`` process group, optional):
The group of processes to partition among. Defaults to all processes.
mem_efficient_linear (bool, optional): Replace
torch.nn.functional.linear with an implementation that allows
DeepSpeed to partition parameters. Defaults to ``True``.
remote_device (string, optional): The initial device to store model
weights e.g., ``cpu``, ``nvme``. Passing ``"cpu"`` will create the model in CPU
memory. The model may still be moved to GPU based on the
offload settings for training. Defaults to param offload device if a config is
defined, otherwise GPU.
pin_memory (bool, optional): Potentially increase performance by
using pinned memory for model weights. ``remote_device`` must be
``"cpu"``. Defaults to pin_memory value in config, otherwise ``False``.
config_dict_or_path (dict or ``json file``, optional): If provided, provides configuration
for swapping fp16 params to NVMe.
config (dict or ``json file``, optional): Deprecated, use config_dict_or_path instead.
enabled (bool, optional): If ``False``, this context has no
effect. Defaults to ``True``.
dtype (``dtype``, optional): Can be used to change the data type of the parameters.
Supported options are ``torch.half`` and ``torch.float``. Defaults to ``None``
mpu (``object``, optional): A model parallelism unit object that implements get_{model,data}_parallel_{rank,group,world_size}.
This context accelerates model initialization and enables models that
are too large to allocate in their entirety in CPU memory. It has the
following effects:
#. allocates tensors to either GPU or CPU memory or NVMe
#. converts floating point tensors to half precision
#. immediately partitions tensors among the group of data-parallel devices
#. (*optional*) replaces ``torch.nn.functional.linear`` with a more
memory-efficient implementation
These modifications allow for models that exceed the size of local CPU/GPU
memory/NVMe, but fit within the total NVMe capacity (*i.e.*, aggregate CPU
or GPU memory or NVMe) across all nodes. Consider initializing a model with one
trillion parameters, whose weights occupy two terabytes (TB) in half
precision. The initial CPU allocation in full precision requires 4TB of
memory *per process*, and so a system with 8 GPUs per node would need 32TB of
CPU memory due to data-parallel redundancies. Instead, by immediately
partitioning tensors we remove the redundancies. The result is that
regardless of the number of GPUs, we still only require the original 4TB. This
allows for a linear increase in model size with the aggregate system memory.
For example, if a node has 1TB of memory and 8 GPUs, we could fit a trillion
parameter model with 4 nodes and 32 GPUs.
Important: If the fp16 weights of the model can't fit onto a single GPU memory
this feature must be used.
.. note::
Initializes ``deepspeed.comm`` if it has not already been done so.
See :meth:`deepspeed.init_distributed` for more information.
.. note::
Can also be used as a decorator:
.. code-block:: python
@deepspeed.zero.Init()
def get_model():
return MyLargeModel()
.. note::
Only applicable to training with ZeRO-3.
Examples
--------
#. Allocate a model and partition it among all processes:
.. code-block:: python
with deepspeed.zero.Init():
model = MyLargeModel()
#. Allocate a model in pinned CPU memory and partition it among a subgroup of processes:
.. code-block:: python
with deepspeed.zero.Init(data_parallel_group=mpu.get_data_parallel_group(),
remote_device="cpu",
pin_memory=True):
model = MyLargeModel()
#. Partition an already-allocated model in CPU memory:
.. code-block:: python
model = deepspeed.zero.Init(module=model)
"""
if config is not None:
config_dict_or_path = config
logger.warning(
f'zero.Init: the `config` argument is deprecated. Please use `config_dict_or_path` instead.')
_ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,
mpu) if config_dict_or_path is not None else None
if _ds_config is not None:
mem_efficient_linear = _ds_config.zero_config.memory_efficient_linear
super().__init__(enabled=enabled, mem_efficient_linear=mem_efficient_linear, ds_config=_ds_config, dtype=dtype)
if not dist.is_initialized():
init_distributed()
assert dist.is_initialized(), "Parameters cannot be scattered without initializing deepspeed.comm"
if data_parallel_group is None:
self.ds_process_group = dist.get_world_group()
else:
self.ds_process_group = data_parallel_group
self.rank = dist.get_rank(group=self.ds_process_group)
self.dp_world_size = dist.get_world_size(group=self.ds_process_group)
# Local device is the device where the parameters are consumed, must be default device.
# It is the device where parameters are fully instantiated using allgather
self.local_device = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"]))
get_accelerator().set_device(self.local_device)
if _ds_config is not None:
self._update_persist_config(_ds_config)
if _ds_config.zero_config.offload_param is not None:
remote_device = _ds_config.zero_config.offload_param.device
pin_memory = _ds_config.zero_config.offload_param.pin_memory
self._validate_remote_device(remote_device, _ds_config)
# Remote device is the device where parameter partitions are stored
# It can be same as local_device or it could be CPU or NVMe.
self.remote_device = self.local_device if remote_device in [None, OffloadDeviceEnum.none] else remote_device
self.pin_memory = pin_memory if (self.remote_device in [OffloadDeviceEnum.cpu, OffloadDeviceEnum.nvme
]) else False
# Enable fp16 param swapping to NVMe
if self.remote_device == OffloadDeviceEnum.nvme:
self.param_swapper = AsyncPartitionedParameterSwapper(_ds_config, self.dtype)
else:
self.param_swapper = None
# If we are provided an already-allocated module to prepare.
if module is not None:
assert isinstance(module, torch.nn.Module)
self._convert_to_zero_parameters(module.parameters(recurse=True))
self.use_all_gather_into_tensor = dist.has_all_gather_into_tensor()
if not self.use_all_gather_into_tensor:
logger.info(f"all_gather_into_tensor API is not available in torch {torch.__version__}")
def _update_persist_config(self, ds_config):
Init.apply_param_persistence = True
Init.param_persistence_threshold = ds_config.zero_config.param_persistence_threshold
Init.model_persistence_threshold = ds_config.zero_config.model_persistence_threshold // self.num_partitions
def _convert_to_zero_parameters(self, param_list):
for param in param_list:
if is_zero_param(param):
continue
self._convert_to_deepspeed_param(param)
param.partition()
def _validate_remote_device(self, remote_device, ds_config):
if ds_config is not None:
if remote_device in [None, OffloadDeviceEnum.cpu]:
if ds_config.zero_config.offload_param is not None:
offload_param_device = ds_config.zero_config.offload_param.device
assert offload_param_device != OffloadDeviceEnum.nvme, \
f"'device' in DeepSpeed Config cannot be {offload_param_device} if remote device is {remote_device}."
if remote_device == OffloadDeviceEnum.nvme:
assert ds_config.zero_config.offload_param is not None, \
f'"offload_param" must be defined in DeepSpeed Config if remote device is {OffloadDeviceEnum.nvme}.'
assert ds_config.zero_config.offload_param.nvme_path is not None, \
f'"nvme_path" in DeepSpeed Config cannot be None if remote device is {OffloadDeviceEnum.nvme}'
def _post_init_method(self, module):
#see_memory_usage(f"Before converting parmas in {module.__class__.__name__}", force=False)
print_rank_0(f'Converting Params in {module.__class__.__name__}', force=False)
see_memory_usage(f"Before converting and partitioning parmas in {module.__class__.__name__}", force=False)
global param_count
for name, param in module.named_parameters(recurse=False):
param_count += param.numel()
if not is_zero_param(param):
self._convert_to_deepspeed_param(param)
print_rank_0(
f"Partitioning param {debug_param2name_id_shape(param)} module={debug_module2name(module)}")
if get_accelerator().on_accelerator(param):
dist.broadcast(param, 0, self.get_dp_process_group())
else:
if dist.get_rank() == 0:
logger.warn(f"param `{name}` in {module.__class__.__name__} "
f"not on GPU so was not broadcasted from rank 0")
param.partition()
see_memory_usage(
f"Param count {param_count}. After converting and partitioning parmas in {module.__class__.__name__}",
force=False)
def _convert_to_deepspeed_param(self, param):
# Partitioned, Normal, Remote
param.ds_param_type = ZeroParamType.PARTITIONED
# Replicated vs Partitioned vs Inflight
param.ds_status = ZeroParamStatus.AVAILABLE
# Stores the shape of the original tensor
param.ds_shape = param.shape
# Stores the number of elements in the original parameter without padding
param.ds_numel = param.numel()
# Stores the partitioned copy of the tensor
param.ds_tensor = None
# Keeps track of how many active sub-modules need this param at any given point in time
param.ds_active_sub_modules = set()
# If this flag is true, then the parameters are replicated throughput training
# And only partitioned before the step
if Init.apply_param_persistence and param.ds_numel <= Init.param_persistence_threshold and Init.num_persisted_elements + param.ds_numel <= Init.model_persistence_threshold:
param.ds_persist = True
Init.num_persisted_parameters += 1
Init.num_persisted_elements += param.ds_numel
else:
param.ds_persist = False
param.is_external_param = False
# The group that the parameter is scattered across.
param.ds_process_group = self.ds_process_group
# This is set to the Async Param swapper if remote device is nvme
# else this is set to None
param.nvme_swapper = self.param_swapper
# DeepSpeed Param ID
param.ds_id = Init.param_id
Init.param_id += 1
def all_gather(param_list=None, async_op=False, hierarchy=0):
cls = param
if param_list is None:
param_list = [cls]
return self._all_gather(param_list, async_op=async_op, hierarchy=hierarchy)
@instrument_w_nvtx
def all_gather_coalesced(params: Iterable[Parameter], safe_mode: bool = False) -> AllGatherCoalescedHandle:
# fetches from nvme if the partition is not available and in nvme
self._ensure_availability_of_partitioned_params(params)
if self.num_partitions == 1:
return _no_gather_coalesced(params)
for param in params:
if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
raise RuntimeError(param.ds_summary())
param.ds_status = ZeroParamStatus.INFLIGHT
# ensure that each rank has params in same order. the allgather
# is done by flattening the parameter list into a single tensor that
# can be allgathered in a single call - this means that if each rank
# gives a list of the same parameters in a different order we will
# silently get incorrect parameter values, and have very difficult
# to debug correctness issues.
params = sorted(params, key=lambda p: p.ds_id)
debug_rank0(f"-allgather_coalesced: {[p.ds_id for p in params]}")
if safe_mode:
# ensure that same list (with same ordering) of parameters are
# being allgathered across all ranks, otherwise could mix
# data between tensors.
assert_ints_same_as_other_ranks([p.ds_id for p in params])
# ensure that tensors from each rank agree on the same ds_numel
# otherwise could mix data between tensors.
assert_ints_same_as_other_ranks([p.ds_tensor.ds_numel for p in params])
if len(params) == 1:
# have an opportunity to avoid some intermediate memory allocations
param, = params
param_buffer = torch.empty(
math.ceil(param.ds_numel / self.num_partitions) * self.num_partitions,
dtype=param.dtype,
device=get_accelerator().current_device_name(),
requires_grad=False,
)
handle = _dist_allgather_fn(param.ds_tensor.to(get_accelerator().current_device_name()), param_buffer,
self.get_partition_dp_group(param))
param.data = param_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape).to(param.device)
return AllGatherHandle(handle, param)
else:
partition_sz = sum(p.ds_tensor.ds_numel for p in params)
flat_tensor = torch.empty(partition_sz * self.num_partitions,
dtype=get_only_unique_item(p.dtype for p in params),
device=get_accelerator().current_device_name(),
requires_grad=False)
partitions: List[Parameter] = []
for i in range(self.num_partitions):
partitions.append(flat_tensor.narrow(0, partition_sz * i, partition_sz))
instrument_w_nvtx(torch.cat)([p.ds_tensor.to(get_accelerator().current_device_name()) for p in params],
out=partitions[self.get_partition_rank()])
handle = _dist_allgather_fn(partitions[self.get_partition_rank()], flat_tensor,
self.get_partition_dp_group(params[0]))
return AllGatherCoalescedHandle(
allgather_handle=handle,
params=params,
partitions=partitions,
world_size=self.num_partitions,
)
def partition(param_list=None, hierarchy=0, has_been_updated=False):
cls = param
print_rank_0(f"{'--'*hierarchy}----Partitioning param {debug_param2name_id_shape_device(cls)}")
if param_list is None:
param_list = [cls]
self._partition(param_list, has_been_updated=has_been_updated)
def reduce_gradients_at_owner(param_list=None, hierarchy=0):
cls = param
if param_list is None:
param_list = [cls]
print_rank_0(
f"{'--'*hierarchy}----Reducing Gradients for param with ids {[param.ds_id for param in param_list]} to owner"
)
self._reduce_scatter_gradients(param_list)
def partition_gradients(param_list=None, partition_buffers=None, hierarchy=0, accumulate=False):
cls = param
print_rank_0(
f"{'--'*hierarchy}----Partitioning param gradient with id {debug_param2name_id_shape_device(cls)}")
if param_list is None:
param_list = [cls]
if isinstance(partition_buffers, torch.Tensor):
partition_buffers = [partition_buffers]
self._partition_gradients(param_list, partition_buffers=partition_buffers, accumulate=accumulate)
def aligned_size():
return self._aligned_size(param)
def padding_size():
return self._padding_size(param)
def partition_numel():
return self._partition_numel(param)
def item_override():
param.all_gather()
return param._orig_item()
def ds_summary(slf: torch.Tensor, use_debug_name: bool = False) -> dict:
return {
"id": debug_param2name_id(slf) if use_debug_name else slf.ds_id,
"status": slf.ds_status.name,
"numel": slf.numel(),
"ds_numel": slf.ds_numel,
"shape": tuple(slf.shape),
"ds_shape": tuple(slf.ds_shape),
"requires_grad": slf.requires_grad,
"grad_shape": tuple(slf.grad.shape) if slf.grad is not None else None,
"persist": slf.ds_persist,
"active_sub_modules": slf.ds_active_sub_modules,
"ds_tensor.shape": slf.ds_tensor.shape if slf.ds_tensor is not None else None
}
def convert_to_zero_parameters(param_list):
self._convert_to_zero_parameters(param_list)
def allgather_before(func: Callable) -> Callable:
def wrapped(*args, **kwargs):
param.all_gather()
return func(*args, **kwargs)
return wrapped
# Collectives for gathering and partitioning parameters
param.all_gather = all_gather
param.all_gather_coalesced = all_gather_coalesced
param.partition = partition
# Collective for averaging gradients
param.reduce_gradients_at_owner = reduce_gradients_at_owner
param.partition_gradients = partition_gradients
# Partitioning size utilities
param.aligned_size = aligned_size
param.padding_size = padding_size
param.partition_numel = partition_numel
param.ds_summary = types.MethodType(ds_summary, param)
param.item = allgather_before(param.item)
param.convert_to_zero_parameters = convert_to_zero_parameters
def _aligned_size(self, param):
return param.ds_numel + self._padding_size(param)
def _padding_size(self, param):
remainder = param.ds_numel % self.num_partitions
return (self.num_partitions - remainder) if remainder else 0
def _partition_numel(self, param):
return param.ds_tensor.ds_numel
def _ensure_availability_of_partitioned_params(self, params):
swap_in_list = []
swap_in_flight = []
for param in params:
if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE:
assert param.ds_tensor.final_location == OffloadDeviceEnum.nvme and param.ds_status == ZeroParamStatus.NOT_AVAILABLE
swap_in_list.append(param)
if param.ds_tensor.status == PartitionedParamStatus.INFLIGHT:
assert param.ds_tensor.final_location == OffloadDeviceEnum.nvme and param.ds_status == ZeroParamStatus.NOT_AVAILABLE
swap_in_flight.append(param)
if len(swap_in_list) > 0:
swap_in_list[0].nvme_swapper.swap_in(swap_in_list, async_op=False)
elif len(swap_in_flight) > 0:
swap_in_flight[0].nvme_swapper.synchronize_reads()
@instrument_w_nvtx
def _all_gather(self, param_list, async_op=False, hierarchy=None):
# fetches from nvme if the partition is not available and in nvme
self._ensure_availability_of_partitioned_params(param_list)
handles = []
all_gather_list = []
for param in param_list:
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
if async_op:
handle = self._allgather_param(param, async_op=async_op, hierarchy=hierarchy)
param.ds_status = ZeroParamStatus.INFLIGHT # if async_op else ZeroParamStatus.AVAILABLE
handles.append(handle)
else:
all_gather_list.append(param)
if not async_op:
if len(param_list) == 1:
ret_value = self._allgather_params(all_gather_list, hierarchy=hierarchy)
else:
ret_value = self._allgather_params_coalesced(all_gather_list, hierarchy)
for param in all_gather_list:
param.ds_status = ZeroParamStatus.AVAILABLE
return ret_value
return handles
def _partition(self, param_list, force=False, has_been_updated=False):
for param in param_list:
#print_rank_0(f"Before Partitioning Param {param.ds_id}")
# self._param_status(param)
self._partition_param(param, has_been_updated=has_been_updated)
param.ds_status = ZeroParamStatus.NOT_AVAILABLE
# if param.ds_tensor is not None:
# assert id(param.data) == id(param.ds_tensor.data), \
# "After the parameters are initially partitioned, make sure we are not recreating the partition."
#print_rank_0(f"After Partitioning Param {param.ds_id}")
# self._param_status(param)
@instrument_w_nvtx
def _partition_param(self, param, buffer=None, has_been_updated=False):
assert param.ds_status is not ZeroParamStatus.INFLIGHT, f" {param} Cannot partition a param in flight"
global reuse_buffers
#print_rank_0(f"Param id {param.ds_id} status is {param.ds_status}")
if param.ds_status is ZeroParamStatus.AVAILABLE:
print_rank_0(f"Partitioning param id {param.ds_id} reuse buffers {reuse_buffers}", force=False)
# if reuse_buffers and False:
# numel = buffer.numel()
# buffer = param.data.view(-1)
# print_rank_0(
# "Returning buffer for param {param.ds_id} with numel {param.ds_numel} to empty buffers",
# force=False)
# if numel in empty_buffers:
# empty_buffers[numel].append(buffer)
# if deepspeed.comm.get_rank():
# print(f"Releasing {param.data.numel()}")
if param.ds_tensor is not None and not has_been_updated:
#param.data = param.ds_tensor.data
see_memory_usage(f'Before partitioning param {param.ds_id} {param.shape}', force=False)
# param.data does not store anything meaningful in partitioned state
free_param(param)
see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}', force=False)
if param.ds_tensor.final_location == OffloadDeviceEnum.nvme:
print_rank_0(f"Param {param.ds_id} partition released since it exists in nvme", force=False)
param.nvme_swapper.remove_partition_and_release_buffers([param])
return
tensor_size = self._aligned_size(param)
partition_size = tensor_size // self.num_partitions
if param.ds_tensor is None:
final_location = None
if self.remote_device == OffloadDeviceEnum.nvme and self.param_swapper.swappable_tensor(
numel=partition_size):
final_location = OffloadDeviceEnum.nvme
buffer = self.param_swapper.get_buffer(param, partition_size)
partitioned_tensor = torch.empty(0, dtype=param.dtype, device=buffer.device)
partitioned_tensor.data = buffer.data
print_rank_0(f"ID {param.ds_id} Initializing partition for the first time for nvme offload.")
else:
if param.ds_persist:
device = self.local_device
elif self.remote_device == OffloadDeviceEnum.nvme:
device = OffloadDeviceEnum.cpu
else:
device = self.remote_device
partitioned_tensor = torch.empty(partition_size, dtype=param.dtype, device=device)
if device == OffloadDeviceEnum.cpu and self.pin_memory:
partitioned_tensor = get_accelerator().pin_memory(partitioned_tensor)
partitioned_tensor.requires_grad = False
param.ds_tensor = partitioned_tensor
param.ds_tensor.ds_numel = partition_size
param.ds_tensor.status = PartitionedParamStatus.AVAILABLE
param.ds_tensor.final_location = final_location
start = partition_size * self.get_partition_rank()
end = start + partition_size
one_dim_param = param.contiguous().view(-1)
if start < param.ds_numel and end <= param.ds_numel:
src_tensor = one_dim_param.narrow(0, start, partition_size)
param.ds_tensor.copy_(src_tensor)
#partitioned_tensor = src_tensor.clone().detach().to(self.remote_device)
else:
# partitioned_tensor = torch.zeros(partition_size,
# dtype=param.dtype,
# device=self.remote_device )
if start < param.ds_numel:
elements_to_copy = param.ds_numel - start
param.ds_tensor.narrow(0, 0,
elements_to_copy).copy_(one_dim_param.narrow(0, start, elements_to_copy))
#print(f"Remote device {self.remote_device}")
#param.ds_tensor = partitioned_tensor
#param.data = param.ds_tensor.data
# param.data does not store anything meaningful in partitioned state
see_memory_usage(f'Before partitioning param {param.ds_id} {param.shape}', force=False)
free_param(param)
see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}', force=False)
if param.ds_tensor.final_location == OffloadDeviceEnum.nvme:
self.param_swapper.swap_out_and_release([param])
print_rank_0(f"ID {param.ds_id} Offloaded to nvme offload and buffers released.")
see_memory_usage(f"ID {param.ds_id} Offloaded to nvme offload and buffers released.", force=False)
print_rank_0(f"ID {param.ds_id} partitioned type {param.dtype} dev {param.device} shape {param.shape}")
def _param_status(self, param):
if param.ds_tensor is not None:
print_rank_0(
f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned numel {param.ds_tensor.numel()}, data numel {param.data.numel()}"
)
else:
print_rank_0(
f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned ds_tensor {param.ds_tensor}, data numel {param.data.numel()}"
)
def _allgather_param(self, param, async_op=False, hierarchy=0):
partition_size = param.ds_tensor.ds_numel
tensor_size = partition_size * self.num_partitions
aligned_param_size = self._aligned_size(param)
assert tensor_size == aligned_param_size, f'param id {param.ds_id} aligned size {aligned_param_size} does not match tensor size {tensor_size}'
print_rank_0(
f"{'--'* hierarchy}---- Before allocating allgather param {debug_param2name_id_shape_status(param)} partition size={partition_size}"
)
see_memory_usage(
f'Before allocate allgather param {debug_param2name_id_shape_status(param)} partition_size={partition_size} ',
force=False)
flat_tensor = torch.zeros(aligned_param_size, dtype=param.dtype, device=param.device).view(-1)
see_memory_usage(
f'After allocate allgather param {debug_param2name_id_shape_status(param)} {aligned_param_size} {partition_size} ',
force=False)
get_accelerator().synchronize()
print_rank_0(
f"{'--'* hierarchy}----allgather param with {debug_param2name_id_shape_status(param)} partition size={partition_size}"
)
# if not flat_tensor.numel() > 100000:
# replicated_tensor = flat_tensor.narrow(0,
# 0,
# param.ds_numel).view(param.ds_shape)
# param.data = replicated_tensor.data
# return None
if self.use_all_gather_into_tensor:
handle = dist.all_gather_into_tensor(flat_tensor,
param.ds_tensor.to(get_accelerator().device_name()),
group=self.get_partition_dp_group(param),
async_op=async_op)
else:
partitions = []
for i in range(self.num_partitions):
partitions.append(flat_tensor.narrow(0, partition_size * i, partition_size))
if i == dist.get_rank(group=self.get_partition_dp_group(param)):
partitions[i].data.copy_(param.ds_tensor.data, non_blocking=True)
handle = dist.all_gather(partitions,
partitions[self.get_partition_rank()],
group=self.get_partition_dp_group(param),
async_op=async_op)
replicated_tensor = flat_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape)
param.data = replicated_tensor.data
return handle
def _allgather_params_coalesced(self, param_list, hierarchy=0):
""" blocking call
avoid explicit memory copy in _allgather_params
"""
if len(param_list) == 0:
return
if self.num_partitions == 1:
handle = _no_gather_coalesced(param_list)
handle.wait()
return None
# collect local tensors and partition sizes
partition_sizes = []
local_tensors = []
for param in param_list:
partition_sizes.append(param.ds_tensor.ds_numel)
local_tensors.append(param.ds_tensor.to(get_accelerator().device_name()))
# allocate memory for allgather params
allgather_params = []
for psize in partition_sizes:
tensor_size = psize * self.num_partitions
flat_tensor = torch.empty(tensor_size, dtype=param_list[0].dtype, device=self.local_device).view(-1)
flat_tensor.requires_grad = False
allgather_params.append(flat_tensor)
# launch
launch_handles = []
for param_idx, param in enumerate(param_list):
input_tensor = local_tensors[param_idx].view(-1)
if self.use_all_gather_into_tensor:
# try the _all_gather_base from Pytorch master
h = dist.all_gather_into_tensor(allgather_params[param_idx],
input_tensor,
group=self.get_partition_dp_group(param),
async_op=True)
else:
output_list = []
for i in range(self.num_partitions):
psize = partition_sizes[param_idx]
partition = allgather_params[param_idx].narrow(0, i * psize, psize)
output_list.append(partition)
if not get_accelerator().on_accelerator(partition):
logger.warning(
f'param {param_idx}, partition {i} is not on CUDA, partition shape {partition.size()}')
# back to old all_gather function
h = dist.all_gather(output_list, input_tensor, group=self.get_partition_dp_group(param), async_op=True)
launch_handles.append(h)
# Wait ensures the operation is enqueued, but not necessarily complete.
launch_handles[-1].wait()
# assign to param.data (not copy)
for i, param in enumerate(param_list):
gathered_tensor = allgather_params[i]
param.data = gathered_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape).data
# guarantee the communication to be completed
get_accelerator().synchronize()
return None
def _allgather_params(self, param_list, hierarchy=0):
if len(param_list) == 0:
return
partition_size = sum([param.ds_tensor.ds_numel for param in param_list])
tensor_size = partition_size * self.num_partitions
flat_tensor = torch.empty(tensor_size, dtype=param_list[0].dtype, device=self.local_device)
flat_tensor.requires_grad = False
partitions = []
for i in range(self.num_partitions):
start = partition_size * i
partitions.append(flat_tensor.narrow(0, start, partition_size))
if i == self.get_partition_rank():
offset = 0
for param in param_list:
param_numel = param.ds_tensor.ds_numel
partitions[i].narrow(0, offset, param_numel).copy_(param.ds_tensor.data)
offset += param_numel
dist.all_gather(partitions,
partitions[self.get_partition_rank()],
group=self.get_partition_dp_group(param),
async_op=False)
param_offset = 0
for param in param_list:
param_partition_size = param.ds_tensor.ds_numel
param_size = param.ds_numel
replicated_tensor = torch.empty(param.ds_shape, dtype=param.dtype, device=self.local_device)
for i in range(self.num_partitions):
start = i * partition_size
param_start = i * param_partition_size
if param_start < param_size:
numel_to_copy = min(param_size - param_start, param_partition_size)
part_to_copy = partitions[i].narrow(0, param_offset, numel_to_copy)
replicated_tensor.view(-1).narrow(0, param_start, numel_to_copy).copy_(part_to_copy)
#param_offset += param.data.numel()
param_offset += param.ds_tensor.ds_numel
param.data = replicated_tensor.data
return None
def _reduce_scatter_gradients(self, param_list):
#print_rank_0([param.grad for param in param_list])
#assert any([param.grad is None for param in param_list]), "None gradients cannot be reduce scattered"
handles_and_reduced_partitions = []
for param in param_list:
assert param.grad.numel(
) == param.ds_numel, f"{param.grad.numel()} != {param.ds_numel} Cannot reduce scatter gradients whose size is not same as the params"
handles_and_reduced_partitions.append(self._reduce_scatter_gradient(param))
for param, (handle, reduced_partition) in zip(param_list, handles_and_reduced_partitions):
if handle is not None:
handle.wait()
# some ranks may have partitions that are padded to go beyond the grad size.
# For these ranks the output of reduce scatter is a separate buffer and needs
# to be copied in
partition_size = param.ds_tensor.ds_numel
start = self.get_partition_rank() * partition_size
end = start + partition_size
#print_rank_0("REduce scatter was executed for praam {param.ds_id}")
if start < param.ds_numel and end > param.ds_numel:
elements = param.ds_numel - start
param.grad.view(-1).narrow(0, start, elements).copy_(reduced_partition.narrow(0, 0, elements))
def _reduce_scatter_gradient(self, param):
partition_size = param.ds_tensor.ds_numel
#output = torch.empty(partition_size, dtype=param.dtype, device=param.device)
total_size = partition_size * self.num_partitions
input_list = []
for i in range(self.num_partitions):
start = i * partition_size
end = start + partition_size
#print("before reduce scatter gradients")
if start < param.ds_numel and end <= param.ds_numel:
input = param.grad.view(-1).narrow(0, start, partition_size)
else:
input = torch.zeros(partition_size, dtype=param.dtype, device=param.device)
if start < param.ds_numel:
elements = param.ds_numel - start
input.narrow(0, 0, elements).copy_(param.grad.view(-1).narrow(0, start, elements))
#print("after reduce scatter gradients")
input_list.append(input)
rank = dist.get_rank(group=self.get_partition_dp_group(param))
handle = dist.reduce_scatter(input_list[rank],
input_list,
group=self.get_partition_dp_group(param),
async_op=True)
return handle, input_list[rank]
def _partition_gradients(self, param_list, partition_buffers=None, accumulate=False):
if partition_buffers is None:
partition_buffers = [None] * len(param_list)
for param, partition_buffer in zip(param_list, partition_buffers):
self._partition_gradient(param, partition_buffer=partition_buffer, accumulate=accumulate)
def _partition_gradient(self, param, partition_buffer=None, accumulate=False):
#import pdb;pdb.set_trace()
# param.grad=None
# param.grad.test()
print_rank_0(
f"Partitioning param {param.ds_id} gradient of size {param.grad.numel()} type {param.grad.dtype} part_size {param.ds_tensor.ds_numel}"
)
see_memory_usage("Before partitioning gradients", force=False)
partition_size = param.ds_tensor.ds_numel
if partition_buffer is None:
assert not accumulate, "No buffer to accumulate to"
partition_buffer = torch.zeros(partition_size, dtype=param.dtype, device=param.device)
else:
assert partition_buffer.numel(
) >= partition_size, f"The partition buffer size {partition_buffer.numel()} should match the size of param.ds_tensor {partition_size}"
rank = dist.get_rank(group=self.get_partition_dp_group(param))
start = partition_size * rank
end = start + partition_size
dest_tensor_full_buffer = partition_buffer.view(-1).narrow(0, 0, partition_size)
#print("before partition gradients")
if start < param.ds_numel:
elements = min(param.ds_numel - start, partition_size)
dest_tensor = dest_tensor_full_buffer.narrow(0, 0, elements)
src_tensor = param.grad.view(-1).narrow(0, start, elements)
# just copy the grad partition to the buffer
if not accumulate:
dest_tensor.copy_(src_tensor)
# if source and destination are on same device,
# add to the provided buffer
elif src_tensor.device == dest_tensor.device:
dest_tensor.add_(src_tensor)
# if source and destination are on different device, copy first to src
# then add and move back to the destination. This seems to run faster
# when src is gpu and dest is cpu
# adding directly to cpu is very slow
else:
acc_tensor = torch.empty(src_tensor.numel(), dtype=param.dtype, device=param.device)
acc_tensor.copy_(dest_tensor)
acc_tensor.add_(src_tensor)
dest_tensor.copy_(acc_tensor)
# partition_buffer.view(-1).narrow(
# 0,
# 0,
# elements).copy_(param.grad.view(-1).narrow(0,
# start,
# elements))
#print("after partition gradients")
param.grad.data = dest_tensor_full_buffer.data
see_memory_usage("After partitioning gradients", force=False)
def get_partition_dp_group(self, param):
return param.ds_process_group
def get_partition_rank(self):
"""subclass can overload to specify different relative rank in
parameter partition group"""
return self.rank
@property
def num_partitions(self):
return self.dp_world_size
def get_dp_process_group(self):
""" Return the communication group with all data-parallel ranks """
return self.ds_process_group
class GatheredParameters:
def __init__(self, params, modifier_rank=None, fwd_module=None, enabled=True):
"""A context that collects parameters that were partitioned via a
:class:`deepspeed.zero.Init` context. The parameters are partitioned
again upon exit.
Args:
params (``torch.nn.Parameter``): A single parameter, or an iterable of parameters (list, tuple, generator) of parameters to collect.
It's assumed that all parameters are zero params.
modifier_rank (int, optional): If specified, this rank's parameter will be
broadcasted on exit from the context. This argument is required if ``params`` are
modified, so that all processes have a consistent view of the data. Defaults
to ``None``.
fwd_module (``torch.nn.Module``, optional): If specified, ``params`` will be
registered as external parameters of ``fwd_module``. See :meth:`deepspeed.zero.register_external_parameter`.
enabled (bool, optional): If ``False``, this context is a no-op. Defaults to ``True``.
Important: Make sure to use ``modifier_rank`` that is not ``None`` (e.g., ``modifier_rank=0``)
if you need the GPU memory allocated by gather to be released upon exit from the context manager.
Important: if ``params`` isn't an iterable of parameters or a single parameter it'll be silently ignored!
Examples
========
#. Allocate a partitioned module, initialize its weight on rank 0, and update all
processes.
.. code-block:: python
with deepspeed.zero.Init():
linear = torch.nn.Linear(1000,1000)
with deepspeed.zero.GatheredParameters(linear.weight,
modifier_rank=0):
if deepspeed.comm.get_rank() == 0:
linear.weight.zero_()
with deepspeed.zero.GatheredParameters(linear.weight,
modifier_rank=0):
if deepspeed.comm.get_rank() == 0:
linear.weight.zero_()
#. Collect a partitioned weight to pass to another module during
training. The parameter will be registered as an external parameter
and made available during the backward pass.
.. code-block:: python
:emphasize-lines: 6
def forward(self, input):
x = self.layer1(input)
# self.layer1.weight is required by self.layer2.forward
with deepspeed.zero.GatheredParameters(self.layer1.weight,
fwd_module=self):
y = self.layer2(x, self.layer1.weight)
return y
#. Pretrained model loading
.. code-block:: python
with deepspeed.zero.Init():
model = MyModel()
state_dict = torch.load(model_path, map_location="cpu")
def load(module: nn.Module, prefix=""):
# because zero3 puts placeholders in model params, this context
# manager gathers (unpartitions) the params of the current layer, then loads from
# the state dict and then re-partitions them again
with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
if deepspeed.comm.get_rank() == 0:
module._load_from_state_dict(state_dict, prefix)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
load(model, prefix="")
If this approach is not used, then the full model will first be copied to each GPU. For models
bigger than the memory of a single GPU, this method is required.
"""
self.enabled = enabled
if not enabled:
return
if isinstance(params, Iterable) and not isinstance(params, torch.Tensor):
# deal with generators like model.parameters()
# must convert to list to be able to iterate more than once if we get a generator
params = list(params)
else:
# single param
params = [params]
# enable if at least one is zero-param, otherwise a noop
if not any(is_zero_param(p) for p in params):
self.enabled = False
return
self.params = [p for p in params if hasattr(p, "ds_id")]
self.src_rank = None
if modifier_rank is not None:
if self.params[0].ds_process_group == dist.get_world_group():
self.src_rank = modifier_rank
else:
# A group was specified; convert DP rank to global rank
self.src_rank = dist.get_global_rank(self.params[0].ds_process_group, modifier_rank)
self.fwd_module = fwd_module
if self.fwd_module is not None:
# is a no-op if already registered
for p in self.params:
register_external_parameter(self.fwd_module, p)
def __enter__(self):
if not self.enabled:
return
self.params[0].all_gather(param_list=self.params)
def __exit__(self, *exc):
if not self.enabled:
return
if self.src_rank is None:
self.params[0].partition(param_list=self.params, has_been_updated=False)
return
handles = [dist.broadcast(p, self.src_rank, group=p.ds_process_group, async_op=True) for p in self.params]
for h in handles:
h.wait()
self.params[0].partition(param_list=self.params, has_been_updated=True) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/partition_parameters.py | partition_parameters.py |
# DeepSpeed Team
import os
from typing import List
import torch
from deepspeed import comm as dist
from deepspeed.utils import logger
from deepspeed.ops.adam import DeepSpeedCPUAdam
from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad
from deepspeed.ops.adam import FusedAdam
from deepspeed.utils.nvtx import instrument_w_nvtx
from deepspeed.accelerator import get_accelerator
def _initialize_parameter_parallel_groups(parameter_parallel_size=None):
data_parallel_size = int(dist.get_world_size())
parameter_parallel_size = parameter_parallel_size or data_parallel_size
logger.info("data_parallel_size: %s, parameter_parallel_size: %s", data_parallel_size, parameter_parallel_size)
assert data_parallel_size % parameter_parallel_size == 0, \
'world size should be divisible by parameter parallel size'
rank = dist.get_rank()
my_group = None
for i in range(data_parallel_size // parameter_parallel_size):
ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size)
group = dist.new_group(ranks)
if rank in ranks:
my_group = group
return my_group
class ZeRORuntimeException(Exception):
pass
ZERO_SUPPORTED_OPTIMIZERS = [
torch.optim.Adam, torch.optim.AdamW, FusedAdam, DeepSpeedCPUAdam, torch.optim.Adagrad, DeepSpeedCPUAdagrad
]
# Add apex FusedAdam to supported list if apex is installed
try:
import apex
if hasattr(apex, 'optimizers') and hasattr(apex.optimizers, 'FusedAdam'):
ZERO_SUPPORTED_OPTIMIZERS.append(apex.optimizers.FusedAdam)
except ImportError:
pass
def is_zero_supported_optimizer(optimizer):
if dist.get_rank() == 0:
logger.info(f'Checking ZeRO support for optimizer={optimizer.__class__.__name__} type={type(optimizer)}')
return type(optimizer) in ZERO_SUPPORTED_OPTIMIZERS
def get_lst_from_rank0(lst: List[int]) -> None:
"""
NOTE: creates both communication and synchronization overhead so should be used
sparingly
"""
lst_tensor = torch.tensor(
lst if dist.get_rank() == 0 else [-1] * len(lst),
dtype=int,
# device=get_accelerator().current_device_name(),
device=torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])),
requires_grad=False,
)
dist.broadcast(lst_tensor, src=0, async_op=False)
return list(lst_tensor.cpu().numpy())
@instrument_w_nvtx
def assert_ints_same_as_other_ranks(ints: List[int]) -> None:
"""
NOTE: creates both communication and synchronization overhead so should be
used sparingly
takes a list of ints from each rank and ensures that they are the same
across ranks, throwing an exception if they are not.
"""
rank0_ints = get_lst_from_rank0(ints)
if ints != rank0_ints:
raise RuntimeError(f"disagreement between rank0 and rank{dist.get_rank()}: "
f"rank0: {rank0_ints}, rank{dist.get_rank()}: {ints}") | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/utils.py | utils.py |
# DeepSpeed Team
import sys
import gc
import collections
from typing import Deque, Dict, Tuple
from deepspeed.runtime import ZeROOptimizer
from deepspeed.utils import logger
from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler
from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced
from deepspeed.runtime.utils import inf, get_global_norm, is_model_parallel_parameter
from deepspeed.runtime.zero.partition_parameters import *
from deepspeed.runtime.zero.config import ZeroStageEnum
from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum
from deepspeed.runtime.zero.parameter_offload import DeepSpeedZeRoOffload
from deepspeed.ops.adam import DeepSpeedCPUAdam
from deepspeed.runtime.swap_tensor.partitioned_param_swapper import PartitionedParamStatus
from deepspeed.runtime.swap_tensor.partitioned_optimizer_swapper import PartitionedOptimizerSwapper
from deepspeed.runtime.swap_tensor.pipelined_optimizer_swapper import PipelinedOptimizerSwapper
from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, FP32_FLAT_GROUPS, PARTITION_COUNT, ZERO_STAGE
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import UtilsBuilder
# Toggle this to true to enable correctness test
# with gradient partitioning and without
pg_correctness_test = False
def print_rank_0(message, debug=False, force=False):
rank = dist.get_rank()
if rank == 0 and (debug or force):
logger.info(message)
# other variations
# - print for all ranks w/o interleaving
# printflock(f"[{rank}] {message}")
# - print to log file per rank
# log_rank_file(rank, message)
def input(msg):
return
def isclose(a, b, rtol=1e-09, atol=0.0):
return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol)
def lcm(x, y):
from fractions import gcd # or can import gcd from `math` in Python 3
return x * y // gcd(x, y)
def move_to_cpu(tensor_list):
for tensor in tensor_list:
tensor.data = tensor.data.cpu()
INITIAL_MICRO_STEP_ID = -1
class DeepSpeedZeroOptimizer_Stage3(ZeROOptimizer):
"""
DeepSpeedZeroOptimizer designed to reduce the memory footprint
required for training large deep learning models.
For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models
https://arxiv.org/abs/1910.02054
For usage examples, refer to TODO: DeepSpeed Tutorial
"""
def __init__(self,
module,
init_optimizer,
timers,
ds_config,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True,
contiguous_gradients=True,
reduce_bucket_size=500000000,
prefetch_bucket_size=50000000,
max_reuse_distance=1000000000,
max_live_parameters=1000000000,
param_persistence_threshold=100000,
model_persistence_threshold=sys.maxsize,
dp_process_group=None,
reduce_scatter=True,
overlap_comm=False,
offload_optimizer_config=None,
offload_param_config=None,
sub_group_size=1000000000000,
mpu=None,
clip_grad=0.0,
communication_data_type=torch.float16,
postscale_gradients=True,
gradient_predivide_factor=1.0,
gradient_accumulation_steps=1,
elastic_checkpoint=False,
aio_config=None):
see_memory_usage("Stage 3 initialize beginning", force=True)
print_rank_0(f"initialized {__class__.__name__} with args: {locals()}", force=False)
if dist.get_rank() == 0:
logger.info(f"Reduce bucket size {reduce_bucket_size}")
logger.info(f"Prefetch bucket size {prefetch_bucket_size}")
# The fused optimizer does all the work. We need this layer for two reason:
# 1. maintain same user API from apex.fp16_utils
# 2. keep common stuff here in case we need to add ne552w fused optimizer later
# differences from apex.fp16_utils:
# - assume all model params in fp16
# - assume all params requires grad
# - flat by groups, not keeping state. TODO: remove state explicitly?
# - master grad and unflat master weight never exist. TODO: a way to save out unflat master?
if not get_accelerator().is_available():
raise SystemError("Cannot use fp16 without accelerator.")
self.optimizer = init_optimizer
# Load pre-built or JIT compile (un)flatten ops
util_ops = UtilsBuilder().load()
self.flatten = util_ops.flatten
self.unflatten = util_ops.unflatten
self.dtype = self.optimizer.param_groups[0]['params'][0].dtype
self._global_grad_norm = 0.
self.custom_loss_scaler = False
self.external_loss_scale = None
self.optimizer_swapper = None
self.swap_optimizer = False
self.offload_optimizer = False
self.offload_optimizer_pin_memory = False
self.offload_optimizer_fast_init = False
self.offload_param = False
self.offload_param_pin_memory = False
self.params_in_nvme_and_cpu = False
self.max_params_in_cpu = 0
self.parameter_offload = self.initialize_ds_offload(module=module,
timers=timers,
ds_config=ds_config,
overlap_comm=overlap_comm,
prefetch_bucket_size=prefetch_bucket_size,
max_reuse_distance=max_reuse_distance,
max_live_parameters=max_live_parameters,
param_persistence_threshold=param_persistence_threshold,
model_persistence_threshold=model_persistence_threshold,
offload_optimizer_config=offload_optimizer_config,
mpu=mpu)
self.persistent_parameters = self.parameter_offload.persistent_parameters
self._configure_offloading(offload_optimizer_config, offload_param_config)
self.module = module
self.elastic_checkpoint = elastic_checkpoint
self.inf_or_nan_tracker: Tensor = torch.zeros(1,
dtype=torch.bool,
device=get_accelerator().current_device_name(),
requires_grad=False)
self.deepspeed_adam_offload = (self.offload_optimizer and type(init_optimizer) == DeepSpeedCPUAdam)
self.device = get_accelerator().current_device_name() if not self.offload_optimizer else OffloadDeviceEnum.cpu
### streams used for overlapping computation with communication
self.reduce_and_partition_stream = get_accelerator().Stream() if overlap_comm else get_accelerator(
).default_stream()
############################################################################
self.n_caching_allocator_flushes = 0
#-------------Stage 3 Setup-------------------#
self.timers = timers
self.reduce_scatter = reduce_scatter
self.dp_process_group = dp_process_group
self.partition_count = dist.get_world_size(group=self.dp_process_group)
if mpu is None:
self.model_parallel_group = None
self.model_parallel_rank = 0
else:
self.model_parallel_group = mpu.get_model_parallel_group()
self.model_parallel_rank = mpu.get_model_parallel_rank()
self.overflow = False
self.clip_grad = clip_grad
self.communication_data_type = communication_data_type
self.gradient_predivide_factor = gradient_predivide_factor
self.postscale_gradients = postscale_gradients
self.gradient_accumulation_steps = gradient_accumulation_steps
self.micro_step_id = 0
self.reduce_bucket_size = int(reduce_bucket_size)
if self.reduce_scatter:
valid_reduce_scatter_dtypes = (torch.float16, torch.bfloat16, torch.float32)
assert self.communication_data_type in valid_reduce_scatter_dtypes, f"ZeRO-3 supports {valid_reduce_scatter_dtypes} communication_data_type with reduce scatter enabled. Got: '{self.communication_data_type}'"
assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with ZeRO-3 with reduce scatter enabled"
assert self.postscale_gradients, "pre-scale gradients is not yet supported with ZeRO-3 with reduce scatter enabled"
# Holds the mode parameter
# The param.data may not hold any meaningful data
# when param's status is NOT_AVAILABLE or IN_FLGHT
self.fp16_groups = []
# Hold partitioned parameters
self.fp16_partitioned_groups = []
# Holds a fused and flattened copy of the parameters
self.fp16_partitioned_groups_flat = []
self.fp16_partitioned_groups_flat_numel = []
#defragmented pinned memory
self.param_groups_fp16_flat_cpu_memory = []
#a single 32-bit partition of the parallel partitioned parameters
#that this process will update
self.fp32_partitioned_groups_flat = []
self.next_swappable_fp32_partitioned_groups = []
# number of elements per partition in each group
self.partition_size = []
self.all_reduce_print = False
self.prefetch_elements = int(prefetch_bucket_size)
self.contiguous_gradients = contiguous_gradients
# padding on each partition for alignment purposes
self.groups_padding = []
self.sub_group_size = sub_group_size
self.sub_group_to_group_id = {}
# Trainable parameters
self.trainable_param_groups = self._get_trainable_parameter_groups()
see_memory_usage("Before creating fp16 partitions", force=True)
self._create_fp16_partitions_with_defragmentation(self.trainable_param_groups)
num_fp16_subgroups = len(self.fp16_partitioned_groups_flat)
see_memory_usage(f"After creating fp16 partitions: {num_fp16_subgroups}", force=True)
# Optimizer tensor swapping
if self.swap_optimizer:
self._configure_tensor_swapping(offload_optimizer_config, aio_config)
self.params_in_ipg_bucket = []
self.is_gradient_accumulation_boundary: bool = True
self.param_reduce_events: Deque[get_accelerator().Event] = collections.deque()
# TODO. make this configurable via JSON
self.max_param_reduce_events: int = 2
self.param_dict = {}
# map between param_id and bool to specify if a param is in this partition
self.is_param_in_current_partition = {}
self.extra_large_param_to_reduce = None
self.grads_in_ipg_bucket = []
self.params_in_ipg_bucket = []
self.params_already_reduced = []
self.is_gradient_accumulation_boundary = True
self._release_ipg_buffers()
self.previous_reduced_grads = None
# simplified param id
self.param_id = {}
count = 0
for i, params_group in enumerate(self.fp16_groups):
for param in params_group:
unique_id = id(param)
self.param_id[unique_id] = count
self.param_dict[count] = param
self.params_already_reduced.append(False)
count = count + 1
#Largest partitioned param
largest_partitioned_param_numel = max([
max([max(tensor.numel(), tensor.ds_numel) for tensor in fp16_partitioned_group])
for fp16_partitioned_group in self.fp16_partitioned_groups
])
print_rank_0(f'Largest partitioned param numel = {largest_partitioned_param_numel}', force=False)
self._setup_for_real_optimizer()
self.grad_position = {}
self.set_grad_positions()
if self.offload_optimizer:
self.norm_for_param_grads = {}
self.local_overflow = False
# stores if a partition has been reduced in this step
self.is_partition_reduced = {}
# stores if a grad in a partition has been computed or not
self.is_grad_computed = {}
# will store the averaged gradients required by this partition
self.averaged_gradients = {}
#creates backward hooks for gradient partitioning
self.create_reduce_and_remove_grad_hooks()
#exit(0)
# we may have a way of fusing dynamic scale. Do not support for now
self.loss_scaler = CreateLossScaler(dtype=self.dtype,
static_loss_scale=static_loss_scale,
dynamic_scaling=dynamic_loss_scale,
dynamic_loss_args=dynamic_loss_args)
self.dynamic_loss_scale = self.loss_scaler.dynamic
self.debug_fp16_grads = [{} for _ in self.fp16_groups]
self._link_all_hp_params()
if dist.get_rank(group=self.dp_process_group) == 0:
see_memory_usage(f"After initializing ZeRO optimizer", force=True)
def destroy(self):
self.parameter_offload.destroy()
def initialize_ds_offload(
self,
module,
timers,
ds_config,
overlap_comm,
prefetch_bucket_size,
max_reuse_distance,
max_live_parameters,
param_persistence_threshold,
model_persistence_threshold,
offload_optimizer_config,
mpu,
):
return DeepSpeedZeRoOffload(module=module,
timers=timers,
ds_config=ds_config,
overlap_comm=overlap_comm,
prefetch_bucket_size=prefetch_bucket_size,
max_reuse_distance=max_reuse_distance,
max_live_parameters=max_live_parameters,
param_persistence_threshold=param_persistence_threshold,
model_persistence_threshold=model_persistence_threshold,
offload_param_config=offload_optimizer_config,
mpu=mpu)
def _get_trainable_parameter_groups(self):
param_groups = []
for param_group in self.optimizer.param_groups:
trainable_params = {"params": [p for p in param_group["params"] if p.requires_grad]}
param_groups.append(trainable_params)
return param_groups
def _setup_for_real_optimizer(self):
see_memory_usage("Before creating fp32 partitions", force=True)
self._create_fp32_partitions()
see_memory_usage("After creating fp32 partitions", force=True)
dist.barrier()
# To support pipelined optimizer swapping
self._create_next_swappable_fp32_groups()
see_memory_usage("Before initializing optimizer states", force=True)
self.initialize_optimizer_states()
see_memory_usage("After initializing optimizer states", force=True)
dist.barrier()
if dist.get_rank() == 0:
logger.info(f"optimizer state initialized")
# IPG
if self.contiguous_gradients:
self.__ipg_bucket_flat_buffer: Tensor = torch.empty(self.reduce_bucket_size,
dtype=self.dtype,
device=get_accelerator().current_device_name())
grad_partitions_flat_buffer = None
self.__param_id_to_grad_partition: Dict[int, Tensor] = {}
all_params = list(itertools.chain.from_iterable(self.fp16_groups))
grad_partitions_flat_buffer: Tensor = torch.zeros(sum(p.partition_numel() for p in all_params),
dtype=self.dtype,
device=self.device)
if self.offload_optimizer_pin_memory:
grad_partitions_flat_buffer = get_accelerator().pin_memory(grad_partitions_flat_buffer)
offset = 0
for param in all_params:
self.__param_id_to_grad_partition[param.ds_id] = grad_partitions_flat_buffer.narrow(
0, offset, param.partition_numel())
offset += param.partition_numel()
def _link_all_hp_params(self):
for p in self.module.parameters():
p._z3_optimizer = self
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
def get_lr(self):
"""Return the current learning rate."""
return self.optimizer.param_groups[0]["lr"]
# TODO. factor out to a utility outside of stage3
@staticmethod
def defragment(tensors: List[Tensor]) -> Tensor:
"""move provided tensors into a contiguous flat buffer, with some additional
measures taken to reduce memory fragmentation"""
assert len(set(t.dtype for t in tensors)) == 1
assert len(set(t.device for t in tensors)) == 1
cpu_buffer = torch.empty(sum(p.numel() for p in tensors),
dtype=get_only_unique_item(t.dtype for t in tensors),
device="cpu")
tensor_infos: List[Tuple[Tensor, int, int]] = []
orig_device = get_only_unique_item(t.device for t in tensors)
offset = 0
for tensor in tensors:
tensor_numel = tensor.numel()
# move the tensor from device memory to host memory
cpu_buffer.narrow(0, offset, tensor_numel).copy_(tensor)
tensor.data = torch.empty(0, dtype=tensor.dtype, device=tensor.device)
# record some data so we can restore the device tensor later
tensor_infos.append((tensor, offset, tensor_numel))
offset += tensor_numel
gc.collect()
get_accelerator().empty_cache()
# copy tensors (now flattened and contiguous) back to GPU
device_buffer = cpu_buffer.to(orig_device)
# restore device tensors
for tensor, offset, tensor_numel in tensor_infos:
tensor.data = device_buffer.narrow(0, offset, tensor_numel)
return device_buffer
def _get_param_coordinator(self, training):
return self.parameter_offload.get_param_coordinator(training)
def _configure_offloading(self, offload_optimizer_config, offload_param_config):
###################### offload optimizer setup ##################################
if offload_optimizer_config is not None and offload_optimizer_config.device != OffloadDeviceEnum.none:
self.offload_optimizer = True
self.offload_optimizer_pin_memory = offload_optimizer_config.pin_memory
self.swap_optimizer = offload_optimizer_config.device == OffloadDeviceEnum.nvme
self.offload_optimizer_fast_init = offload_optimizer_config.fast_init
###################### offload param setup ##################################
if offload_param_config is not None and offload_param_config.device != OffloadDeviceEnum.none:
self.offload_param = True
self.offload_param_pin_memory = offload_param_config.pin_memory
self.params_in_nvme_and_cpu = offload_param_config.device == OffloadDeviceEnum.nvme
self.max_params_in_cpu = offload_param_config.max_in_cpu
print_rank_0(
f"FP16 params swapping is {self.params_in_nvme_and_cpu}, Max params in CPU is {self.max_params_in_cpu}",
force=False)
def _configure_tensor_swapping(self, offload_optimizer_config, aio_config):
nvme_swap_folder = os.path.join(offload_optimizer_config.nvme_path, 'zero_stage_3')
os.makedirs(nvme_swap_folder, exist_ok=True)
if dist.get_rank() == 0:
logger.info(f'Tensor Swapping: Adding optimizer tensors')
swapper_type = PipelinedOptimizerSwapper if offload_optimizer_config.pipeline else PartitionedOptimizerSwapper
self.optimizer_swapper = swapper_type(swap_config=offload_optimizer_config,
aio_config=aio_config,
base_folder=nvme_swap_folder,
optimizer=self.optimizer,
largest_numel=max(self.fp16_partitioned_groups_flat_numel),
device=self.device,
dtype=torch.float32,
timers=self.timers)
@property
def elements_in_ipg_bucket(self):
return sum(p.ds_numel for p in self.params_in_ipg_bucket)
def _move_to_flat_buffer(self, param_list, flat_buffer, avoid_copy=False):
'''If flat buffer is None then the parameters in the param_list are
not copied to the flat buffer. This is because they exceed the number of max_params_in_cpu
Some of these parameters may already be in CPU in unflattened buffers
or they maybe in GPU, or they maybe in NVME. If they are in NVME, then
they will be marked as NOT_AVAILABLE, and will be moved to CPU when they are
needed during training.'''
if flat_buffer is None:
# this dst buffer is on NVMe, so skip this
return
start = 0
for param in param_list:
src = param.ds_tensor
dest = flat_buffer.narrow(0, start, src.ds_numel)
start = start + src.ds_numel
'''if the parameter was initialized in nvme then bring it to the destination buffer directly'''
if src.status == PartitionedParamStatus.NOT_AVAILABLE:
print_rank_0(
f"Swapping in {param.ds_id} with partition size {param.partition_numel()} permanently to CPU")
param.nvme_swapper.swap_into_buffer(param, dest)
src.data = dest.data
src.status = PartitionedParamStatus.AVAILABLE
else:
assert src.status == PartitionedParamStatus.AVAILABLE, "Partitioned Param must be available here"
if not avoid_copy:
dest.data.copy_(src.data)
src.data = dest.data
# Final location must be gpu/cpu in this case
param.ds_tensor.final_location = 'not-nvme'
def _create_param_groups_fp16_flat_cpu_memory(self):
aggregate_params_count = 0
for j, param_group in enumerate(self.trainable_param_groups):
params_in_group = sum([p.partition_numel() for p in param_group['params']])
flat_buffer_size = params_in_group
if self.params_in_nvme_and_cpu and \
aggregate_params_count + params_in_group > self.max_params_in_cpu:
flat_buffer_size = max(0, self.max_params_in_cpu - aggregate_params_count)
aggregate_params_count += params_in_group
if flat_buffer_size > 0:
print_rank_0(f"group {j} flat buffer size {flat_buffer_size}", force=False)
self.param_groups_fp16_flat_cpu_memory.append(get_accelerator().pin_memory(
torch.empty(int(flat_buffer_size), dtype=self.dtype)))
else:
print_rank_0(f"No flat buffer size. Param group size was {params_in_group}", force=False)
self.param_groups_fp16_flat_cpu_memory.append(torch.empty(1, dtype=self.dtype))
def _create_fp16_partitions_with_defragmentation(self, fp16_param_groups):
dist.barrier()
param_groups: List[List[Parameter]] = tuple(
self._create_fp16_sub_groups(param_group["params"]) for param_group in fp16_param_groups)
# bookkeeping related to param groups
for param_group_idx, param_group in enumerate(param_groups):
for sub_group in param_group:
sub_group_idx = len(self.fp16_groups)
# record sub group and partitions
self.fp16_groups.append(sub_group)
self.fp16_partitioned_groups.append([param.ds_tensor for param in sub_group])
# record sub group -> group mapping
self.sub_group_to_group_id[sub_group_idx] = param_group_idx
# record total elements of parameter partitions in sub group
self.fp16_partitioned_groups_flat_numel.append(sum(p.partition_numel() for p in sub_group))
# record padding required to align group to world size (only applies to last rank)
rank_requires_padding = dist.get_rank(
self.dp_process_group) == dist.get_world_size(self.dp_process_group) - 1
self.groups_padding.append([p.padding_size() if rank_requires_padding else 0 for p in sub_group])
# move parameters to flattened buffer
if not self.offload_param: # partitioned params remain in GPU during training
# move parameter partitions into a single contiguous flat buffer
parameter_partitions: List[Tensor] = []
for sub_group in self.fp16_groups:
for param in sub_group:
parameter_partitions.append(param.ds_tensor)
device_buffer = __class__.defragment(parameter_partitions)
# setup flat buffers per subgroup, these are each just sections of the
# contiguous flat buffer for all parameters that we created earlier
offset = 0
for sub_group in self.fp16_groups:
sub_group_numel = sum(param.partition_numel() for param in sub_group)
self.fp16_partitioned_groups_flat.append(device_buffer.narrow(0, offset, sub_group_numel))
offset += sub_group_numel
else: # partitioned params offloaded to CPU when not in use
# create a flat CPU memory allocation for each param group
self._create_param_groups_fp16_flat_cpu_memory()
for param_group_idx, param_group in enumerate(param_groups):
flat_offset = 0
for i, sub_group in enumerate(param_group):
total_elements = sum(p.partition_numel() for p in sub_group)
print_rank_0(f"Params in nvme and cpu {self.params_in_nvme_and_cpu}")
#Flat buffer may not be available for parameters that reside in NVME
if not self.params_in_nvme_and_cpu or flat_offset + total_elements <= self.param_groups_fp16_flat_cpu_memory[
param_group_idx].numel():
fp16_partitioned_group_flat = self.param_groups_fp16_flat_cpu_memory[param_group_idx].narrow(
0, flat_offset, total_elements)
print_rank_0(
f"Creating a flat buffer for subgroup {i} requiring {total_elements} elements, and cumulative CPU elements {flat_offset + total_elements}",
force=False)
elif self.params_in_nvme_and_cpu:
fp16_partitioned_group_flat = None
print_rank_0(f"No flat buffer for sub group {i} of {total_elements} elements", force=False)
else:
assert False, "Either params are in nvme, or they are in CPU memory. This code path should not be triggered. Please see you max_params_in_cpu and params_in_nvme configs"
self.fp16_partitioned_groups_flat.append(fp16_partitioned_group_flat)
flat_offset += total_elements
self._move_to_flat_buffer(sub_group,
fp16_partitioned_group_flat,
avoid_copy=not self.offload_param)
# if necessary, create a pinned memory buffer to be used for swapping out
# params to NVME after optimizer step
should_create_fp16_flat_reuse_buffer = any(flattened_partition_group is None
for flattened_partition_group in self.fp16_partitioned_groups_flat)
if should_create_fp16_flat_reuse_buffer:
max_partition_numel, largest_partition_numel = 0, None
for sub_group in self.fp16_groups:
total_elements = sum(t.partition_numel() for t in sub_group)
if total_elements > max_partition_numel:
largest_partition_numel = [t.ds_numel for t in sub_group]
max_partition_numel = total_elements
assert len(largest_partition_numel) > 0, f'Unexpected that largest partition is empty'
self.fp16_groups[0][0].nvme_swapper.reserve_partitioned_swap_space(largest_partition_numel)
def _swap_in_sub_group_to_flat_buffer(self, flat_buffer, sub_group_id):
offset = 0
elements_in_sub_group = sum([t.ds_numel for t in self.fp16_partitioned_groups[sub_group_id]])
assert (flat_buffer.numel() == elements_in_sub_group)
for param, partitioned_param in zip(self.fp16_groups[sub_group_id],
self.fp16_partitioned_groups[sub_group_id]):
dest = flat_buffer.narrow(0, offset, partitioned_param.ds_numel)
if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE:
print_rank_0(
f"Swapping in {param.ds_id} with elements {param.ds_numel} and partition {param.partition_numel()}"
)
param.nvme_swapper.swap_in([param], async_op=False)
dest.data.copy_(partitioned_param.data)
param.nvme_swapper.remove_partition_and_release_buffers([param])
print_rank_0(f"Swapping in {param.ds_id} done")
else:
dest.data.copy_(partitioned_param.data)
offset += partitioned_param.ds_numel
def _create_next_swappable_fp32_groups(self):
reverse_order_indices = [i for i in range(len(self.fp32_partitioned_groups_flat))]
reverse_order_indices.reverse()
next_group = None
for i in reverse_order_indices:
self.next_swappable_fp32_partitioned_groups.append(next_group)
if self._swappable_optimizer_subgroup(i):
next_group = self.fp32_partitioned_groups_flat[i]
self.next_swappable_fp32_partitioned_groups.reverse()
def _get_sub_group_partitions(self, sub_group_id):
sub_group_partitions = []
for param, partitioned_param in zip(self.fp16_groups[sub_group_id],
self.fp16_partitioned_groups[sub_group_id]):
if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE:
swap_path = param.nvme_swapper.get_path(param, True)
sub_group_partitions.append((partitioned_param, param.partition_numel(), swap_path))
else:
sub_group_partitions.append((partitioned_param, partitioned_param.ds_numel, None))
return sub_group_partitions
def _create_fp32_partitions(self):
cpu_memory_usage = 0
cpu_memory_sub_groups = 0
nvme_memory_usage = 0
num_swappable_partitions = 0
num_swap_from_nvme_partitions = 0
num_swap_from_cpu_partitions = 0
swap_from_nvme_memory_usage = 0
swap_from_cpu_memory_usage = 0
GIGA_BYTES = (1024**3)
swappable_fp32_tensors = []
swappable_fp16_src_tensors = []
nvme_fp16_partitions_info = []
nvme_fp16_num_elems = []
nvme_fp32_dest_tensors = []
fp32_element_size = torch.tensor([], dtype=torch.float32).element_size()
for i, tensor in enumerate(self.fp16_partitioned_groups_flat):
num_elements = self.fp16_partitioned_groups_flat_numel[i]
# a partition of the fp32 master weights that will be updated by this process
if self._swappable_optimizer_subgroup(i):
self.fp32_partitioned_groups_flat.append(torch.Tensor())
nvme_memory_usage += (fp32_element_size * num_elements)
num_swappable_partitions += 1
if self.params_in_nvme_and_cpu and tensor is None:
num_swap_from_nvme_partitions += 1
swap_from_nvme_memory_usage += (fp32_element_size * num_elements)
if self.offload_optimizer_fast_init:
sub_group_partitions = self._get_sub_group_partitions(i)
nvme_fp16_partitions_info.append(sub_group_partitions)
nvme_fp16_num_elems.append(num_elements)
nvme_fp32_dest_tensors.append(self.fp32_partitioned_groups_flat[i])
else:
unpinned_fp32_buffer = torch.empty(num_elements, device=self.device, dtype=torch.float)
self._swap_in_sub_group_to_flat_buffer(unpinned_fp32_buffer, i)
self.optimizer_swapper.initialize_parameters(parameters=[self.fp32_partitioned_groups_flat[i]],
src_tensors=[unpinned_fp32_buffer])
else:
num_swap_from_cpu_partitions += 1
swap_from_cpu_memory_usage += (fp32_element_size * num_elements)
swappable_fp32_tensors.append(self.fp32_partitioned_groups_flat[i])
swappable_fp16_src_tensors.append(self.fp16_partitioned_groups_flat[i])
else:
cpu_memory_usage += (fp32_element_size * num_elements)
cpu_memory_sub_groups += 1
if self.params_in_nvme_and_cpu and tensor is None:
unpinned_fp32_buffer = torch.empty(num_elements, device=self.device, dtype=torch.float)
self._swap_in_sub_group_to_flat_buffer(unpinned_fp32_buffer, i)
self.fp32_partitioned_groups_flat.append(unpinned_fp32_buffer)
else:
self.fp32_partitioned_groups_flat.append(self.fp16_partitioned_groups_flat[i].to(
self.device).clone().float().detach())
self.fp32_partitioned_groups_flat[i].requires_grad = True # keep this in case internal optimizer uses it
if len(swappable_fp32_tensors) > 0:
self.optimizer_swapper.initialize_parameters(parameters=swappable_fp32_tensors,
src_tensors=swappable_fp16_src_tensors)
if len(nvme_fp32_dest_tensors) > 0:
fp16_pinned_buffers = self.fp16_groups[0][0].nvme_swapper.reserve_available_buffers()
assert len(fp16_pinned_buffers) > 0
self.optimizer_swapper.initialize_from_swapped_fp16_params(fp16_partitions_info=nvme_fp16_partitions_info,
fp16_num_elems=nvme_fp16_num_elems,
fp16_pinned_buffers=fp16_pinned_buffers,
fp32_parameters=nvme_fp32_dest_tensors)
self.fp16_groups[0][0].nvme_swapper.release_reserved_buffers()
nvme_gigabytes = nvme_memory_usage / GIGA_BYTES
print_rank_0(f'Swappable FP32 Partitions: count={num_swappable_partitions} size={nvme_gigabytes:5.2f} GB',
force=False)
if self.params_in_nvme_and_cpu:
print_rank_0(
f'Swap from NVMe Partitions: count = {num_swap_from_nvme_partitions}, size = {swap_from_nvme_memory_usage/GIGA_BYTES:5.2f}GB',
force=False)
print_rank_0(
f'Swap from CPU Partitions: count = {num_swap_from_cpu_partitions}, size = {swap_from_cpu_memory_usage/GIGA_BYTES:5.2f}GB',
force=False)
cpu_memory_gigabytes = cpu_memory_usage / GIGA_BYTES
print_rank_0(f'In-Memory FP32 Partitions: count={cpu_memory_sub_groups} size={cpu_memory_gigabytes:5.2f} GB',
force=False)
# Clear for on-the-fly population before the optimizer step
for param_group in self.optimizer.param_groups:
param_group['params'] = []
def _create_fp16_sub_groups(self, params_group):
params_group_numel = sum([param.partition_numel() for param in params_group])
sub_group_size = self.sub_group_size
if sub_group_size is None or sub_group_size >= params_group_numel:
return [params_group]
sub_groups = []
sub_group = []
local_sub_group_size = 0
for param in params_group:
sub_group.append(param)
local_sub_group_size += param.partition_numel()
if local_sub_group_size >= sub_group_size or id(param) == id(params_group[-1]):
sub_groups.append(sub_group)
sub_group = []
local_sub_group_size = 0
return sub_groups
def _release_ipg_buffers(self):
if self.contiguous_gradients:
self.ipg_buffer = None
def _optimizer_step(self, sub_group_id):
param_group_id = self.sub_group_to_group_id[sub_group_id]
fp32_param = self.fp32_partitioned_groups_flat[sub_group_id]
self.optimizer.param_groups[param_group_id]['params'] = [fp32_param]
self.optimizer.step()
self.optimizer.param_groups[param_group_id]['params'] = []
def _swappable_optimizer_subgroup(self, sub_group_id):
if not self.swap_optimizer:
return False
return self.optimizer_swapper.swappable_tensor(None,
numel=self.fp16_partitioned_groups_flat_numel[sub_group_id])
def _partitioned_params_swap_out(self, i):
offset = 0
fp32_param = self.fp32_partitioned_groups_flat[i]
assert fp32_param is not None, \
f'fp32 parameters of sub_group {i} is None'
swap_fp16_params = []
swap_fp32_params = []
for param, partitioned_param in zip(self.fp16_groups[i], self.fp16_partitioned_groups[i]):
src = fp32_param.narrow(0, offset, partitioned_param.ds_numel)
if partitioned_param.status == PartitionedParamStatus.AVAILABLE:
partitioned_param.data.copy_(src.data)
else:
swap_fp32_params.append(src)
swap_fp16_params.append(param)
offset += partitioned_param.ds_numel
if len(swap_fp16_params):
swap_fp16_params[0].nvme_swapper.swap_out_partitioned_params(dst_fp16_params=swap_fp16_params,
src_fp32_params=swap_fp32_params)
def initialize_optimizer_states(self):
num_subgroups = len(self.fp16_groups)
largest_numel = max([sum([p.ds_numel for p in psg]) for psg in self.fp16_partitioned_groups])
gradient_dtype = self.fp32_partitioned_groups_flat[0].dtype
gradient_buffer = torch.zeros(int(largest_numel), dtype=gradient_dtype, device=self.device)
timer_names = set()
# State initialization for the Adagrad optimizer occurs at construction as opposed to other optimizers
# which do lazy initialization of the state at the first call to step.
is_adagrad = isinstance(self.optimizer, torch.optim.Adagrad)
if self.swap_optimizer:
self.optimizer_swapper.init_timers()
INIT_OPTIMIZER_TIMER = 'init_optimizer_state'
timer_names.add(INIT_OPTIMIZER_TIMER)
self.start_timers([INIT_OPTIMIZER_TIMER])
for i, group in enumerate(self.fp16_groups):
swappable_optimizer_subgroup = self._swappable_optimizer_subgroup(i)
swappable_param_subgroup = self.fp16_partitioned_groups_flat[i] is None
num_elements = int(self.fp16_partitioned_groups_flat_numel[i])
see_memory_usage(
f'[Begin] Initialize optimizer states {i} / {num_subgroups} subgroups, num_elems: {num_elements}, swappable opt/param:{swappable_optimizer_subgroup}/{swappable_param_subgroup}',
force=False)
if swappable_optimizer_subgroup:
self._optimizer_states_and_gradient_swap_in(i, timer_names)
if self.offload_optimizer and not swappable_optimizer_subgroup:
subgroup_gradient_buffer = torch.zeros(num_elements, dtype=gradient_dtype, device=self.device)
if self.offload_optimizer_pin_memory:
subgroup_gradient_buffer = get_accelerator().pin_memory(subgroup_gradient_buffer)
self.fp32_partitioned_groups_flat[i].grad = subgroup_gradient_buffer
else:
self.fp32_partitioned_groups_flat[i].grad = gradient_buffer.narrow(0, 0, num_elements)
# Initialize the optimizer states with the flattended fp32 partition.
if not is_adagrad:
self._optimizer_step(i)
if swappable_param_subgroup:
self._partitioned_params_swap_out(i)
if swappable_optimizer_subgroup:
self._optimizer_states_and_gradient_swap_out(i, timer_names)
see_memory_usage(
f'[End] Initialize optimizer states {i} / {num_subgroups} subgroups, num_elems: {num_elements}, swappable opt/param:{swappable_optimizer_subgroup}/{swappable_param_subgroup}',
force=False)
# Initialize the optimizer states with the flattended fp32 partition.
if is_adagrad:
self.optimizer = torch.optim.Adagrad(self.fp32_partitioned_groups_flat, **self.optimizer.defaults)
self.stop_timers([INIT_OPTIMIZER_TIMER])
self.log_timers(timer_names)
if self.swap_optimizer:
self.optimizer_swapper.log_timers()
if not self.offload_optimizer:
for group in self.fp32_partitioned_groups_flat:
group.grad = None
# Reset steps
return
#########################################################################
#########################ZeRO Partition Gradients########################
#########################################################################
def get_first_param_index(self, group_id, param_group, partition_id):
for index, param in enumerate(param_group):
param_id = self.get_param_id(param)
if partition_id in self.param_to_partition_ids[group_id][param_id]:
return index
return None
def initialize_gradient_partitioning_data_structures(self):
total_partitions = dist.get_world_size(group=self.dp_process_group)
for i, param_group in enumerate(self.fp16_groups):
self.param_to_partition_ids[i] = {}
self.is_partition_reduced[i] = {}
self.total_grads_in_partition[i] = {}
self.remaining_grads_in_partition[i] = {}
self.is_grad_computed[i] = {}
self.grad_partition_insertion_offset[i] = {}
self.grad_start_offset[i] = {}
self.first_param_index_in_partition[i] = {}
for partition_id in range(total_partitions):
self.is_grad_computed[i][partition_id] = {}
self.grad_partition_insertion_offset[i][partition_id] = {}
self.grad_start_offset[i][partition_id] = {}
self.initialize_gradient_partition(i, param_group, partition_id)
self.is_partition_reduced[i][partition_id] = False
self.first_param_index_in_partition[i][partition_id] = self.get_first_param_index(
i, param_group, partition_id)
@instrument_w_nvtx
def independent_gradient_partition_epilogue(self):
self.report_ipg_memory_usage(f"In ipg_epilogue before reduce_ipg_grads", 0)
self.__reduce_and_partition_ipg_grads()
self.report_ipg_memory_usage(f"In ipg_epilogue after reduce_ipg_grads", 0)
self.reduce_and_partition_stream.synchronize()
# if dist.get_rank() == 0:
# logger.info("Params already reduced %s", self.params_already_reduced)
for i in range(len(self.params_already_reduced)):
self.params_already_reduced[i] = False
#in case of cpu offload, averaged gradients are already in fp32_partitioned_groups_flat.grad
#TODO: use a similar code path for both cpu_offload and non-cpu offload
if not self.offload_optimizer:
for i, sub_group in enumerate(self.fp16_groups):
self.averaged_gradients[i] = [
self.__param_id_to_grad_partition[param.ds_id]
if param.requires_grad else torch.zeros_like(param.ds_tensor) for param in sub_group
]
# self.averaged_gradients[i] = self.get_flat_partition(
# self.fp16_groups[i],
# 0,
# self.fp32_partitioned_groups_flat[i].numel(),
# return_tensor_list=True)
# this method gets called after every backward. need to increment
# here because if it gets incremented in backward() the micro step
# id will be off by one when we do the reduce and partition at the.
# start of this method.
# TODO. make this less error prone
self.micro_step_id += 1
def overlapping_partition_gradients_reduce_epilogue(self):
self.independent_gradient_partition_epilogue()
def create_reduce_and_remove_grad_hooks(self):
print_rank_0(f'[Begin] Create gradient reduction hooks')
self.grad_accs = []
for i, param_group in enumerate(self.fp16_groups):
for param in param_group:
if param.requires_grad:
#print_rank_0(f" Before all gather {param.device}, {param.shape}")
# The hook must be created in un-partitioned parameter
param.all_gather()
#print(f"After all gather {param.device}, {param.shape}")
def wrapper(param, i):
param_tmp = param.expand_as(param)
grad_acc = param_tmp.grad_fn.next_functions[0][0]
@instrument_w_nvtx
def reduce_partition_and_remove_grads(*notneeded):
self.reduce_ready_partitions_and_remove_grads(param, i)
grad_acc.register_hook(reduce_partition_and_remove_grads)
self.grad_accs.append(grad_acc)
#print(f"param grad fn {param.expand_as(param).grad_fn}")
wrapper(param, i)
# Partition the parameter after creating the hook
param.partition()
print_rank_0(f'[End] Create gradient reduction hooks')
def get_param_id(self, param):
unique_id = id(param)
return self.param_id[unique_id]
def report_ipg_memory_usage(self, tag, param_elems):
elem_count = self.elements_in_ipg_bucket + param_elems
percent_of_bucket_size = (100.0 * elem_count) // self.reduce_bucket_size
see_memory_usage(
f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}",
force=False)
###############Idependent Partition Gradient ########################
def reduce_independent_p_g_buckets_and_remove_grads(self, param, i):
#print_rank_0(f"Inside reduce ipg buckets. {debug_param2name_id_shape(param)}, ipg elements {self.elements_in_ipg_bucket}, reduce bucket size {self.reduce_bucket_size}", force=True)
# Because the ipg bucket is initialized with a random place holder tensor, we must
# explicitly check that the bucket has any real data in it (self.elements_in_ipg_bucket >
# 0). Otherwise if the incoming param.ds_numel is large, this branch may get triggered on a
# garbage data and `self.average_tensor()` will crash because its params_to_reduce will be
# empty, while reduction_list will have that garbage data.
if self.elements_in_ipg_bucket > 0 and self.elements_in_ipg_bucket + param.ds_numel > self.reduce_bucket_size:
self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads", param.ds_numel)
self.__reduce_and_partition_ipg_grads()
param_id = self.get_param_id(param)
assert self.params_already_reduced[param_id] == False, \
f"The parameter {param_id} has already been reduced. \
Gradient computed twice for this partition. \
Multiple gradient reduction is currently not supported"
self.__add_grad_to_ipg_bucket(param)
@instrument_w_nvtx
@torch.no_grad()
def __add_grad_to_ipg_bucket(self, param: Parameter) -> None:
self.reduce_and_partition_stream.wait_stream(get_accelerator().default_stream())
if self.contiguous_gradients and self.elements_in_ipg_bucket + param.grad.numel() < self.reduce_bucket_size:
# move the gradient to a contiguous buffer
with get_accelerator().stream(self.reduce_and_partition_stream):
# move the parameter's gradient to the contiguous flat buffer
new_grad_tensor = self.__ipg_bucket_flat_buffer.narrow(0, self.elements_in_ipg_bucket,
param.grad.numel()).view_as(param.grad)
new_grad_tensor.copy_(param.grad, non_blocking=True)
param.grad.record_stream(get_accelerator().current_stream())
param.grad.data = new_grad_tensor
self.params_in_ipg_bucket.append(param)
@instrument_w_nvtx
@torch.no_grad()
def __reduce_and_partition_ipg_grads(self, safe_mode: bool = False) -> None:
if not self.params_in_ipg_bucket:
return
for param in self.params_in_ipg_bucket:
if param.grad.numel() != param.ds_numel:
raise RuntimeError(f"{param.grad.numel()} != {param.ds_numel} Cannot reduce scatter "
f"gradients whose size is not same as the params")
self.params_in_ipg_bucket.sort(key=lambda p: p.ds_id)
assert len(set(p.ds_id for p in self.params_in_ipg_bucket)) == len(self.params_in_ipg_bucket)
while self.param_reduce_events and self.param_reduce_events[0].query():
self.param_reduce_events.popleft()
if len(self.param_reduce_events) > self.max_param_reduce_events:
self.param_reduce_events.popleft().synchronize()
with get_accelerator().stream(self.reduce_and_partition_stream):
if safe_mode:
assert_ints_same_as_other_ranks([p.ds_id for p in self.params_in_ipg_bucket])
grad_partitions = self.__avg_scatter_grads(self.params_in_ipg_bucket)
self.partition_grads(self.params_in_ipg_bucket, grad_partitions)
self.params_in_ipg_bucket.clear()
event = get_accelerator().Event()
event.record()
self.param_reduce_events.append(event)
@instrument_w_nvtx
def __avg_scatter_grads(self, params_to_reduce: List[Parameter]) -> List[Tensor]:
"""average gradients and scatter partitions across ranks"""
full_grads_for_rank = [p.grad for p in params_to_reduce]
if self.communication_data_type != self.dtype:
full_grads_for_rank = [g.to(self.communication_data_type) for g in full_grads_for_rank]
if self.postscale_gradients and self.gradient_predivide_factor != 1.0:
full_grads_for_rank = [g.div(self.gradient_predivide_factor) for g in full_grads_for_rank]
grad_partitions_for_rank = reduce_scatter_coalesced(full_grads_for_rank, self.dp_process_group)
if self.postscale_gradients and self.gradient_predivide_factor != dist.get_world_size(self.dp_process_group):
grad_partitions_for_rank = [g.mul(self.gradient_predivide_factor) for g in grad_partitions_for_rank]
if self.communication_data_type != self.dtype:
grad_partitions_for_rank = [g.to(self.dtype) for g in grad_partitions_for_rank]
return grad_partitions_for_rank
def set_grad_positions(self):
for i, group in enumerate(self.fp16_groups):
current_offset = 0
for param in group:
param_id = self.get_param_id(param)
num_elements = param.partition_numel()
self.grad_position[param_id] = [int(i), int(current_offset), int(num_elements)]
#print(f"param id {param_id} i:{i}, ds_tensor {num_elements} numel {param.numel()}")
current_offset += num_elements
see_memory_usage(f"After Set Grad positions", force=False)
def _constant_buffered_norm2(self, input, buffer_size=250000000):
norm = None
for part in input.view(-1).split(buffer_size):
if norm is None:
norm = part.data.double().norm(2)**2.0
else:
norm += part.data.double().norm(2)**2.0
return norm**0.5
def set_norm_for_param_grad_in_gpu(self, param):
param_id = self.get_param_id(param)
#self.norm_for_param_grads[param_id] = param.grad.data.double().norm(2)
#Using a more memory efficient version
self.norm_for_param_grads[param_id] = self._constant_buffered_norm2(param.grad)
def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param, fp32_grad_tensor):
with get_accelerator().stream(self.copy_grad_stream):
param_id = self.get_param_id(param)
src_tensor = param.grad.view(-1).float()
#print(f"src_tensor {src_tensor.size()} and fp32 grad {fp32_grad_tensor.size()}")
fp32_grad_tensor.copy_(src_tensor, non_blocking=True)
param.grad = None
def complete_grad_norm_calculation_for_cpu_offload(self, params):
total_norm = 0.0
norm_type = 2.0
for p in params:
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
param_id = self.get_param_id(p)
if param_id in self.norm_for_param_grads.keys():
param_norm = self.norm_for_param_grads[param_id]
total_norm += param_norm.item()**2
# Sum across all model parallel GPUs.
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM)
total_norm = total_norm_cuda[0].item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
@instrument_w_nvtx
def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None:
offload_fp32_gradients = {}
offload_fp32_offsets = {}
buffers = []
for param, grad_partition in zip(params_to_release, grad_partitions):
contains_real_data = param.partition_numel() * dist.get_rank(self.dp_process_group) < param.ds_numel
if not contains_real_data:
# this grad partition is empty - don't need to do anything
param.grad = None
continue
# move or accumulate gradient partition to target buffer
grad_buffer = self.__param_id_to_grad_partition[param.ds_id].narrow(0, 0, grad_partition.numel())
buffers.append(grad_buffer)
if self.micro_step_id == 0: # don't accumulate
grad_buffer.copy_(grad_partition, non_blocking=True)
# ensure grad buffer is a CUDA buffer to speed up the next few
# operations and so it can be used asynchronously
grad_buffer = grad_buffer.to(grad_partition.device, non_blocking=True)
elif get_accelerator().on_accelerator(grad_buffer):
grad_buffer.add_(grad_partition)
else:
# if dst is CPU, copy first to src device, do the addition
# there, then move back to dst. adding directly to cpu is very slow
cuda_grad_buffer = grad_buffer.to(grad_partition.device, non_blocking=True)
cuda_grad_buffer.add_(grad_partition)
grad_buffer.copy_(cuda_grad_buffer, non_blocking=True)
# ensure grad buffer is a CUDA buffer to speed up the next few
# operations and so it can be used asynchronously
grad_buffer = cuda_grad_buffer
if hasattr(self.inf_or_nan_tracker, "logical_or_"):
self.inf_or_nan_tracker.logical_or_(torch.isinf(grad_buffer).any())
self.inf_or_nan_tracker.logical_or_(torch.isnan(grad_buffer).any())
else:
# logical_or_ not available in older versions of pytorch
self.inf_or_nan_tracker += torch.isinf(grad_buffer).any()
self.inf_or_nan_tracker += torch.isnan(grad_buffer).any()
self.inf_or_nan_tracker = self.inf_or_nan_tracker > 0
# offload the gradient partition if applicable
if self.offload_optimizer:
i, dest_offset, _ = self.grad_position[self.get_param_id(param)]
if self.is_gradient_accumulation_boundary:
self.norm_for_param_grads[self.get_param_id(param)] = self._constant_buffered_norm2(grad_buffer)
if self._swappable_optimizer_subgroup(i):
if not i in offload_fp32_gradients.keys():
offload_fp32_gradients[i] = []
offload_fp32_offsets[i] = []
offload_fp32_gradients[i].append(grad_buffer.float())
offload_fp32_offsets[i].append(dest_offset)
else:
fp32_grad_tensor = self.fp32_partitioned_groups_flat[i].grad.narrow(
0, dest_offset, grad_buffer.numel())
fp32_grad_tensor.copy_(grad_buffer)
# free the gradient
param.grad.record_stream(get_accelerator().current_stream())
param.grad = None
if self.offload_optimizer and self.swap_optimizer:
for i in offload_fp32_gradients.keys():
self.optimizer_swapper.swap_out_gradients(parameter=self.fp32_partitioned_groups_flat[i],
gradient_offsets=offload_fp32_offsets[i],
gradient_tensors=offload_fp32_gradients[i])
return buffers
def reduce_ready_partitions_and_remove_grads(self, param, i):
#print_rank_0(f"Backward {debug_param2name_id_shape(param)}", force=True)
self.reduce_independent_p_g_buckets_and_remove_grads(param, i)
def zero_reduced_gradients(self, partition_id, i):
def are_all_related_partitions_reduced(params_id):
for partition_id in self.param_to_partition_ids[i][params_id]:
if not self.is_partition_reduced[i][partition_id]:
return False
return True
for params_id in self.is_grad_computed[i][partition_id]:
if are_all_related_partitions_reduced(params_id):
self.param_dict[params_id].grad = None
def flatten_and_print(self, message, tensors, start=0, n=5):
flatten_tensor = self.flatten(tensors)
def print_func():
logger.info(flatten_tensor.contiguous().view(-1).narrow(0, start, n))
self.sequential_execution(print_func, message)
def get_grads_to_reduce(self, i, partition_id):
def get_reducible_portion(key):
grad = self.param_dict[key].grad
total_elements = grad.numel()
start = self.grad_start_offset[i][partition_id][key]
num_elements = min(total_elements - start,
self.partition_size[i] - self.grad_partition_insertion_offset[i][partition_id][key])
if not pg_correctness_test:
if num_elements == total_elements:
return grad
else:
return grad.contiguous().view(-1).narrow(0, int(start), int(num_elements))
else:
if num_elements == total_elements:
return grad.clone()
else:
return grad.clone().contiguous().view(-1).narrow(0, int(start), int(num_elements))
grads_to_reduce = []
for key in self.is_grad_computed[i][partition_id]:
grad = get_reducible_portion(key)
grads_to_reduce.append(grad)
return grads_to_reduce
def sequential_execution(self, function, message, group=None):
if group is None:
group = self.dp_process_group
if dist.get_rank(group=group) == 0:
logger.info(message)
for id in range(dist.get_world_size(group=group)):
if id == dist.get_rank(group=group):
function()
dist.barrier(group=group)
def set_none_gradients_to_zero(self, i, partition_id):
for param_id in self.is_grad_computed[i][partition_id]:
param = self.param_dict[param_id]
if param.grad is None:
param.grad = torch.zero_like(param)
######################Reduction Related Methods##############################
def allreduce_bucket(self, bucket, rank=None, log=None):
rank = None
tensor = self.flatten(bucket)
tensor_to_allreduce = tensor
if pg_correctness_test:
communication_data_type = torch.float32
else:
communication_data_type = self.communication_data_type
if communication_data_type != tensor.dtype:
tensor_to_allreduce = tensor.to(communication_data_type)
tensor_to_allreduce.div_(dist.get_world_size(group=self.dp_process_group))
if rank is None:
# "All Reducing"
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
else:
global_rank = dist.get_global_rank(self.dp_process_group, rank)
dist.reduce(tensor_to_allreduce, global_rank, group=self.dp_process_group)
if communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce:
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
tensor.copy_(tensor_to_allreduce)
return tensor
# if rank is specified do a reduction instead of an allreduce
def allreduce_and_copy(self, small_bucket, rank=None, log=None):
with get_accelerator().stream(self.reduction_stream):
allreduced = self.allreduce_bucket(small_bucket, rank=rank, log=log)
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self, bucket, numel_per_bucket=500000000, rank=None, log=None):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket, rank=rank, log=None)
small_bucket = []
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket, rank=rank, log=log)
#############################################################################
#############################################################################
#############################################################################
# views the tensor as multiple partitions and returns
# those partitions
def get_data_parallel_partitions(self, tensor):
partitions = []
dp = dist.get_world_size(group=self.dp_process_group)
dp_id = dist.get_rank(group=self.dp_process_group)
total_num_elements = tensor.numel()
base_size = total_num_elements // dp
remaining = total_num_elements % dp
start = 0
for id in range(dp):
partition_size = base_size
if id < remaining:
partition_size = partition_size + 1
partitions.append(tensor.narrow(0, start, partition_size))
start = start + partition_size
return partitions
def get_partition_info(self, tensor_list, partition_size, partition_id):
params_in_partition = []
params_not_in_partition = []
start_index = partition_size * partition_id
end_index = partition_size * (partition_id + 1)
current_index = 0
first_offset = 0
for tensor in tensor_list:
tensor_size = tensor.numel()
if (current_index >= start_index and current_index < end_index):
params_in_partition.append(tensor)
elif start_index > current_index and start_index < (current_index + tensor_size):
params_in_partition.append(tensor)
assert (first_offset == 0
), "This can happen either zero or only once as this must be the first tensor in the partition"
first_offset = start_index - current_index
else:
params_not_in_partition.append(tensor)
current_index = current_index + tensor_size
return params_in_partition, params_not_in_partition, first_offset
@instrument_w_nvtx
def zero_grad(self, set_to_none=False):
"""
Zero FP16 parameter grads.
"""
self.micro_step_id = 0
# FP32 grad should never exist.
# For speed, set model fp16 grad to None by default
for group in self.fp16_groups:
for p in group:
if set_to_none:
if p.grad is not None and get_accelerator().on_accelerator(p.grad):
p.grad.record_stream(get_accelerator().current_stream())
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def _model_parallel_all_reduce(self, tensor, op):
""" Perform all reduce within model parallel group, if any.
"""
if self.model_parallel_group is None:
pass
else:
dist.all_reduce(tensor=tensor, op=op, group=self.model_parallel_group)
@instrument_w_nvtx
def get_grad_norm_direct(self, gradients, params, norm_type=2):
"""Clips gradient norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(g.data.abs().max() for g in gradients)
total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)])
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=self.dp_process_group)
# Take max across all GPUs.
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX)
total_norm = total_norm_cuda[0].item()
else:
# if dist.get_rank() == 0:
# logger.info(f"Total Norm beginning {total_norm}")
grad_norms = []
for g, p in zip(gradients, params):
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
grad_norms.append(g.to(get_accelerator().device_name(), non_blocking=True).double().norm(2))
# Sum across all model parallel GPUs.
total_norm_cuda = torch.sum(torch.pow(torch.stack(grad_norms), 2))
dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM)
total_norm = total_norm_cuda.item()**(1. / norm_type)
if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
# creates a flat fused tensor from the tensor list starting at the first_offset
# in the first tensor of the list. If there are not enough elements in the tensor
# list then the flat tensor will be padded with zeros
def get_flat_partition(self, tensor_list, first_offset, partition_size, return_tensor_list=False):
flat_tensor_list = []
current_size = 0
for i, tensor in enumerate(tensor_list):
if tensor.grad is None:
tensor.grad = torch.zeros_like(tensor)
tensor = tensor.grad
num_elements = tensor.numel()
tensor_offset = 0
# we need to offset to get to the right element
if i == 0 and first_offset > 0:
tensor_offset = first_offset
num_elements = num_elements - tensor_offset
# we dont need all elements of the tensor
if num_elements > (partition_size - current_size):
num_elements = partition_size - current_size
# we need a narrow view of the tensor based on the tensor offset and number of elements that
# we need from this tensor
if tensor_offset > 0 or num_elements < tensor.numel():
flat_tensor_list.append(tensor.contiguous().view(-1).narrow(0, int(tensor_offset), int(num_elements)))
else:
flat_tensor_list.append(tensor)
current_size = current_size + num_elements
# this means its the last partition and does not align with the dp boundary. We need to pad before flattening
if current_size < partition_size:
flat_tensor_list.append(
torch.zeros(int(partition_size - current_size),
dtype=tensor_list[0].dtype,
device=tensor_list[0].device))
if return_tensor_list:
return flat_tensor_list
return self.flatten(flat_tensor_list)
def free_grad_in_param_list(self, param_list):
for p in param_list:
p.grad = None
def reset_cpu_buffers(self):
self.norm_for_param_grads = {}
self.local_overflow = False
def log_timers(self, timer_names):
if self.timers is None:
return
self.timers.log(names=list(timer_names))
def start_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).start()
def stop_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).stop()
def _pre_step(self):
self.micro_step_id = 0
print_rank_0(f"Inside Step function")
see_memory_usage(f"In step before checking overflow", force=False)
print_rank_0("Finished Tracing at Beginning of Step")
self._get_param_coordinator(training=True).hierarchy = 0
print_rank_0("Finished Tracing at Beginning of Step")
@instrument_w_nvtx
def _get_norm_groups(self):
norm_groups = []
for i, group in enumerate(self.fp16_groups):
if self.offload_optimizer:
norm_groups.append(self.complete_grad_norm_calculation_for_cpu_offload(self.fp16_groups[i]))
else:
norm_groups.append(self.get_grad_norm_direct(self.averaged_gradients[i], self.fp16_groups[i]))
return norm_groups
@instrument_w_nvtx
def _prepare_fp32_grad_for_sub_group(self, sub_group_id):
partition_id = dist.get_rank(group=self.dp_process_group)
single_grad_partition = self.flatten(self.averaged_gradients[sub_group_id]).to(
self.fp32_partitioned_groups_flat[sub_group_id].dtype)
assert single_grad_partition.numel() == self.fp32_partitioned_groups_flat[sub_group_id].numel(), \
"averaged gradients have different number of elements that partition size {} {} {} {}".format(
single_grad_partition.numel(), self.fp32_partitioned_groups_flat[sub_group_id].numel(), sub_group_id, partition_id)
self.fp32_partitioned_groups_flat[sub_group_id].grad = single_grad_partition
# release all the gradient since we have already created a necessary copy in dp_grad_partition
self.zero_grad(set_to_none=True)
for grad in filter(lambda g: get_accelerator().on_accelerator(g), self.averaged_gradients[sub_group_id]):
grad.record_stream(get_accelerator().current_stream())
self.averaged_gradients[sub_group_id] = None
@instrument_w_nvtx
def _prepare_sub_group(self, sub_group_id, timer_names=set()):
see_memory_usage(f'Before prepare optimizer sub group {sub_group_id}', force=False)
if self._swappable_optimizer_subgroup(sub_group_id):
self._optimizer_states_and_gradient_swap_in(sub_group_id, timer_names)
elif not self.offload_optimizer:
self._prepare_fp32_grad_for_sub_group(sub_group_id)
see_memory_usage(f'After prepare optimizer sub group {sub_group_id}', force=False)
def _optimizer_states_and_gradient_swap_in(self, sub_group_id, timer_names=set()):
param_length = self.fp16_partitioned_groups_flat_numel[sub_group_id]
fp32_param_id = id(self.fp32_partitioned_groups_flat[sub_group_id])
assert self._swappable_optimizer_subgroup(sub_group_id), \
f'Parameter {fp32_param_id} of numel={param_length} is not swappable'
OPTIMIZER_SWAP_IN_STATE = 'optimizer_swap_in_state'
see_memory_usage(f'pre-step Before swapping in optimizer tensors {sub_group_id}', force=False)
self.start_timers([OPTIMIZER_SWAP_IN_STATE])
self.optimizer_swapper.swap_in_optimizer_state(
parameter=self.fp32_partitioned_groups_flat[sub_group_id],
async_parameter=self.next_swappable_fp32_partitioned_groups[sub_group_id])
self.stop_timers([OPTIMIZER_SWAP_IN_STATE])
timer_names.add(OPTIMIZER_SWAP_IN_STATE)
see_memory_usage(f'pre-step After swapping in optimizer tensors {sub_group_id}', force=False)
@instrument_w_nvtx
def _release_sub_group(self, sub_group_id, timer_names=set()):
see_memory_usage(f'Before release optimizer sub group {sub_group_id}', force=False)
# get rid of the fp32 gradients. Not needed anymore
if not self.offload_optimizer:
self.fp32_partitioned_groups_flat[sub_group_id].grad = None
if self._swappable_optimizer_subgroup(sub_group_id):
self._optimizer_states_and_gradient_swap_out(sub_group_id, timer_names)
see_memory_usage(f'After release optimizer sub group {sub_group_id}', force=False)
# create a flat tensor aligned at the alignment boundary
@instrument_w_nvtx
def flatten_dense_tensors_aligned(self, tensor_list, alignment):
num_elements = 0
for tens in tensor_list:
num_elements = num_elements + tens.numel()
remaining = num_elements % alignment
if remaining:
elements_to_add = alignment - remaining
pad_tensor = torch.zeros(elements_to_add, device=tensor_list[0].device, dtype=tensor_list[0].dtype)
padded_tensor_list = tensor_list + [pad_tensor]
num_elements = num_elements + elements_to_add
else:
padded_tensor_list = tensor_list
return self.flatten(padded_tensor_list)
def _optimizer_states_and_gradient_swap_out(self, sub_group_id, timer_names=set()):
param_length = self.fp16_partitioned_groups_flat_numel[sub_group_id]
fp32_param_id = id(self.fp32_partitioned_groups_flat[sub_group_id])
assert self._swappable_optimizer_subgroup(sub_group_id), \
f'Parameter {fp32_param_id} of numel={param_length} is not swappable'
OPTIMIZER_SWAP_OUT_STATE = 'optimizer_swap_out_state'
see_memory_usage(f'post-step Before swapping out optimizer tensors {sub_group_id}', force=False)
self.start_timers([OPTIMIZER_SWAP_OUT_STATE])
self.optimizer_swapper.swap_out_optimizer_state(
parameter=self.fp32_partitioned_groups_flat[sub_group_id],
async_swap=self.next_swappable_fp32_partitioned_groups[sub_group_id] is not None)
self.stop_timers([OPTIMIZER_SWAP_OUT_STATE])
see_memory_usage(f'post-step After swapping out optimizer tensors {sub_group_id}', force=False)
timer_names.add(OPTIMIZER_SWAP_OUT_STATE)
# get rid of the fp32 gradients. Not needed anymore
self.fp32_partitioned_groups_flat[sub_group_id].grad = None
def _unflatten_partitioned_parameters(self, sub_group_id):
updated_params = self.unflatten(self.fp16_partitioned_groups_flat[sub_group_id],
self.fp16_partitioned_groups[sub_group_id])
for partitioned_param, q in zip(self.fp16_partitioned_groups[sub_group_id], updated_params):
partitioned_param.data = q.data
def _overflow_clean_up(self, prev_scale):
see_memory_usage('After overflow before clearing gradients', force=False)
self.zero_grad(set_to_none=True)
if self.offload_optimizer:
self.reset_cpu_buffers()
else:
self.averaged_gradients = {}
see_memory_usage('After overflow after clearing gradients', force=False)
@instrument_w_nvtx
def _overflow_check_and_loss_scale_update(self):
# First compute norm for all group so we know if there is overflow
self.check_overflow()
#loss scaling related computation
prev_scale = self.loss_scale
self._update_scale(self.overflow)
if self.overflow:
self._overflow_clean_up(prev_scale)
return self.overflow
@instrument_w_nvtx
def _post_step(self, timer_names=set()):
if self.offload_optimizer:
self.reset_cpu_buffers()
#Gathering persisting parameters
if len(self.persistent_parameters) > 0:
self.persistent_parameters[0].all_gather(self.persistent_parameters)
if self.swap_optimizer:
self.optimizer_swapper.log_timers()
self.log_timers(timer_names)
see_memory_usage('After zero_optimizer step', force=False)
print_rank_0(f"------------------Finishing Step-----------------------")
@instrument_w_nvtx
def _reassign_or_swap_out_partitioned_parameters(self, sub_group_id):
if self.fp16_partitioned_groups_flat[sub_group_id] is not None:
self.fp16_partitioned_groups_flat[sub_group_id].data.copy_(
self.fp32_partitioned_groups_flat[sub_group_id].data)
#unflatten fp16 parameter subgroup
self._unflatten_partitioned_parameters(sub_group_id)
else:
self._partitioned_params_swap_out(sub_group_id)
def override_loss_scale(self, loss_scale):
if loss_scale != self.external_loss_scale:
logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}')
self.custom_loss_scaler = True
self.external_loss_scale = loss_scale
@instrument_w_nvtx
def step(self, closure=None):
"""
Not supporting closure.
"""
self._pre_step()
self._partition_all_parameters()
#checks for overflow, adjust the loss scale accordingly
if self._overflow_check_and_loss_scale_update():
if self.swap_optimizer:
self.optimizer_swapper.log_timers()
return
norm_groups = self._get_norm_groups()
scaled_global_grad_norm = get_global_norm(norm_list=norm_groups)
# Stash unscaled gradient norm
self._global_grad_norm = scaled_global_grad_norm / self.loss_scale
timer_names = set()
timer_names.add('optimizer_step')
self.start_timers(['optimizer_step'])
#update parameters one sub group at a time
for sub_group_id, group in enumerate(self.fp16_groups):
#prepare optimizer states, gradients and fp32 parameters for update
self._prepare_sub_group(sub_group_id, timer_names)
#scale the fp32 gradients
self.unscale_and_clip_grads(sub_group_id, scaled_global_grad_norm)
#apply the optimizer step on the sub group and copy fp32 parameters to fp16
self._optimizer_step(sub_group_id)
#put fp16 parameters in appropriate location
self._reassign_or_swap_out_partitioned_parameters(sub_group_id)
#release memory or swap out optimizer states of fp32 parameters
self._release_sub_group(sub_group_id, timer_names)
self.stop_timers(['optimizer_step'])
self._post_step(timer_names)
# warn user about caching allocator flushes
memory_stats = get_accelerator().memory_stats()
alloc_retries = memory_stats["num_alloc_retries"] if memory_stats != None else 0
if alloc_retries > self.n_caching_allocator_flushes:
if dist.get_rank() == 0:
logger.warning(
"%d pytorch allocator cache flushes since last step. this happens "
"when there is high memory pressure and is detrimental to "
"performance. if this is happening frequently consider adjusting "
"settings to reduce memory consumption. If you are unable to "
"make the cache flushes go away consider adding "
"get_accelerator().empty_cache() calls in your training loop to ensure "
"that all ranks flush their caches at the same time",
alloc_retries - self.n_caching_allocator_flushes)
self.n_caching_allocator_flushes = alloc_retries
def dump_pre_step_gradients(self, debug_fp32_grads):
# Dump gradient norms for debugging
for i, _ in enumerate(self.fp16_groups):
print(f'Pre-Step Dump Norms for Group {i} FP16P, FP16G, FP32G, FP32GUC')
for fp16_param, fp32_grad in zip(self.fp16_groups[i], debug_fp32_grads[i]):
param_id = self.get_param_id(fp16_param)
fp16_grad_norm = self.debug_fp16_grads[i][param_id]
fp32_grad_norm = [float(t.data.float().norm(2)) for t in fp32_grad]
norm_list = [fp16_grad_norm, fp32_grad_norm]
print(f'Pre-Step Norms {i} {param_id} = {norm_list}')
def dump_post_step_gradients(self):
# Dump gradient norms for debugging
for i, group in enumerate(self.fp16_groups):
print(f'Post-Step Dump Norms for Group {i} FP16P, FP16DS, FP16FLAT, FP32FLAT')
unflat_fp16 = self.unflatten(self.fp16_groups_flat[i], self.fp16_groups[i])
unflat_fp32 = self.unflatten(self.fp32_partitioned_groups_flat[i], self.fp16_groups[i])
for j, p in enumerate(self.fp16_groups[i]):
param_id = self.get_param_id(p)
param_norm = float(p.data.float().norm(2))
ds_norm = float(p.ds_tensor.data.float().norm(2))
unflat_norm = [float(t.data.float().norm(2)) for t in [unflat_fp16[j], unflat_fp32[j]]]
norm_list = [param_norm, ds_norm] + unflat_norm
print(f'Post-Step Norms {i} {param_id} = {norm_list}')
@instrument_w_nvtx
def unscale_and_clip_grads(self, sub_group_id, total_norm):
# compute combined scale factor for this group
combined_scale = self.loss_scale
if self.clip_grad > 0.:
# norm is in fact norm*scale
clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad
if clip > 1:
combined_scale = clip * self.loss_scale
self.fp32_partitioned_groups_flat[sub_group_id].grad.mul_(1. / combined_scale)
def _check_overflow(self, partition_gradients=True):
self.overflow = self.has_overflow(partition_gradients)
# `params` is a list / generator of torch.Variable
def has_overflow_serial(self, params, is_grad_list=False):
for p in params:
if p.grad is not None and self._has_inf_or_nan(p.grad.data):
return True
return False
def has_overflow_partitioned_grads_serial(self):
for i in range(len(self.fp16_groups)):
for j, grad in enumerate(self.averaged_gradients[i]):
if grad is not None and self._has_inf_or_nan(grad.data, j):
return True
return False
@instrument_w_nvtx
def has_overflow(self, partition_gradients=True):
if partition_gradients:
with get_accelerator().stream(self.reduce_and_partition_stream):
self.local_overflow = bool(self.inf_or_nan_tracker.item())
self.inf_or_nan_tracker.zero_()
overflow = self.local_overflow
#overflow = self.has_overflow_partitioned_grads_serial()
overflow_gpu = get_accelerator().ByteTensor([overflow])
dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.dp_process_group)
else:
params = []
for group in self.fp16_groups:
for param in group:
params.append(param)
overflow = self.has_overflow_serial(params, is_grad_list=partition_gradients)
overflow_gpu = get_accelerator().ByteTensor([overflow])
# Since each model parallel GPU carries only part of the model,
# make sure overflow flag is synced across all the model parallel GPUs
self._model_parallel_all_reduce(tensor=overflow_gpu, op=dist.ReduceOp.MAX)
overflow = overflow_gpu[0].item()
return bool(overflow)
# `x` is a torch.Tensor
@staticmethod
def _has_inf_or_nan(x, j=None):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
@instrument_w_nvtx
def backward(self, loss, retain_graph=False):
"""
:attr:`backward` performs the following steps:
1. fp32_loss = loss.float()
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
"""
if self.swap_optimizer:
self.optimizer_swapper.pre_backward()
see_memory_usage(f"Before backward", force=False)
if self.custom_loss_scaler:
scaled_loss = self.external_loss_scale * loss
scaled_loss.backward()
else:
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
self._get_param_coordinator(training=True).reset_step()
if self.swap_optimizer:
self.optimizer_swapper.post_backward()
def get_fp32_grad_partitions(self) -> Dict[int, Dict[int, Tensor]]:
"""get fp32 gradient partition dictionary
accessed as grad_dict[parameter_group_index][parameter_index]
"""
self.reduce_and_partition_stream.synchronize()
grad_dict = collections.defaultdict(dict)
if self.offload_optimizer:
for group in self.fp16_groups:
for param_idx, param in enumerate(group):
group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)]
fp32_grad = self.fp32_partitioned_groups_flat[group_idx].grad.narrow(0, dest_offset, num_elements)
grad_dict[group_idx][param_idx] = fp32_grad
else:
for group_idx, group in self.averaged_gradients.items():
for param_idx, gradient in enumerate(group):
grad_dict[group_idx][param_idx] = gradient.float()
return grad_dict
def _fp32_state_allgather(self, param, fp32_state):
reduce_buffer = torch.zeros(self.partition_count * fp32_state.numel(),
dtype=torch.float32,
device=param.device).flatten()
my_rank = dist.get_rank(group=self.dp_process_group)
partitions = [
reduce_buffer.narrow(0,
fp32_state.numel() * i, fp32_state.numel()) for i in range(self.partition_count)
]
partitions[my_rank].data.copy_(fp32_state.data, non_blocking=False)
dist.all_gather(partitions, partitions[my_rank], group=self.dp_process_group)
return reduce_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape)
def get_fp32_grad_for_param(self, param) -> Tensor:
if not param.requires_grad:
return None
self.reduce_and_partition_stream.synchronize()
if self.offload_optimizer:
group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)]
fp32_grad = self.fp32_partitioned_groups_flat[group_idx].grad.narrow(0, dest_offset,
num_elements).to(device=param.device)
else:
fp32_grad = self.__param_id_to_grad_partition[param.ds_id].float()
return self._fp32_state_allgather(param, fp32_grad)
def get_full_hp_param(self, param, optim_state_key=None) -> Tensor:
if not param.requires_grad:
return None
self.reduce_and_partition_stream.synchronize()
group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)]
if self._swappable_optimizer_subgroup(group_idx):
self._optimizer_states_and_gradient_swap_in(group_idx)
fp32_param = self.fp32_partitioned_groups_flat[group_idx]
if optim_state_key is None:
fp32_opt_state = fp32_param.narrow(0, dest_offset, num_elements).to(device=param.device)
else:
fp32_opt_state = self.optimizer.state[fp32_param][optim_state_key].narrow(
0, dest_offset, num_elements).to(device=param.device)
hp_param = self._fp32_state_allgather(param, fp32_opt_state)
if self._swappable_optimizer_subgroup(group_idx):
self._optimizer_states_and_gradient_swap_out(group_idx)
return hp_param
@instrument_w_nvtx
def _partition_all_parameters(self):
self.parameter_offload.partition_all_parameters()
def check_overflow(self, partition_gradients=True):
self._check_overflow(partition_gradients)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
self.trainable_param_groups = self._get_trainable_parameter_groups()
param_groups = property(_get_param_groups, _set_param_groups)
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
if self.custom_loss_scaler:
return self.external_loss_scale
else:
return self.loss_scaler.cur_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
cur_scale = property(_get_loss_scale, _set_loss_scale)
def _get_lean_tensors(self, padded_flattened_tensor, group_tensors, paddings):
# Remove paddings from flattened tensor
individual_tensors = self.unflatten(padded_flattened_tensor, group_tensors)
lean_lengths = [t.numel() - pad for t, pad in zip(group_tensors, paddings)]
lean_tensors = [t[:len] for t, len in zip(individual_tensors, lean_lengths)]
#logger.info(f'rank {dist.get_rank()}: lean_tensors = {[t.numel() for t in lean_tensors]}')
return lean_tensors
#TODO REVISIT this for stage 3
def get_lean_optimizer_state(self):
# Return optimizer states after removing paddings.
# This method assumes that each param group contains a single flattened tensor.
optimizer_groups_state = []
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
lean_state = {}
for key, value in self.optimizer.state[p].items():
if torch.is_tensor(value):
padded_lens = [t.numel() for t in self.fp16_partitioned_groups[i]]
lean_state[key] = self._get_lean_tensors(value, self.fp16_partitioned_groups[i],
self.groups_padding[i])
lean_flat_len = sum([t.numel() for t in lean_state[key]])
else:
lean_state[key] = value
optimizer_groups_state.append(lean_state)
return optimizer_groups_state
def get_groups_without_padding(self, groups_with_padding):
# Return group tensor after removing paddings added for alignment to DP world size.
groups_without_padding = []
for i, group in enumerate(groups_with_padding):
lean_group = self._get_lean_tensors(group, self.fp16_partitioned_groups[i], self.groups_padding[i])
groups_without_padding.append(lean_group)
return groups_without_padding
def _set_fp32_optimizer_param_groups(self):
for sub_group_id, _ in enumerate(self.fp16_groups):
param_group_id = self.sub_group_to_group_id[sub_group_id]
self.optimizer.param_groups[param_group_id]['params'].append(
self.fp32_partitioned_groups_flat[sub_group_id])
def _clear_fp32_optimizer_param_groups(self):
for param_group in self.optimizer.param_groups:
param_group['params'] = []
def _rigid_state_dict(self):
state_dict = {}
state_dict[ZERO_STAGE] = ZeroStageEnum.weights
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict[PARTITION_COUNT] = self.partition_count
self._set_fp32_optimizer_param_groups()
state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict()
state_dict[FP32_FLAT_GROUPS] = self.fp32_partitioned_groups_flat
self._clear_fp32_optimizer_param_groups()
return state_dict
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
if self.elastic_checkpoint:
raise NotImplementedError("ZeRO-3 does not yet support elastic checkpointing, please disable for now.")
if self.swap_optimizer or self.params_in_nvme_and_cpu:
raise NotImplementedError(
"ZeRO-3 does not yet support checkpointing with NVMe offloading, please disable for now.")
return self._rigid_state_dict()
# Restore base optimizer fp32 weights from checkpoint by:
# 1) Merging fp32 weights from checkpoints of all partitions
# 2) Extracting fp32 weights for current partition from merged weights
# 3) Using extracted weights to update base optimizer weights directly.
def _restore_from_fp32_weights(self, all_state_dict):
flat_local_partition = []
for i in range(len(self.fp32_partitioned_groups_flat)):
merged_partitions = [sd['fp32_groups'][i] for sd in all_state_dict]
flat_local_partition.append(self._get_flattened_partition(merged_partitions))
for current, saved in zip(self.fp32_partitioned_groups_flat, flat_local_partition):
current.data.copy_(saved.data)
# Restore base optimizer fp32 weights from ZeRO fp16 weights
def _restore_from_bit16_weights(self):
for fp16_partitions, fp32_partition in zip(self.fp16_partitioned_groups_flat,
self.fp32_partitioned_groups_flat):
fp32_partition.data.copy_(fp16_partitions.data)
# Refresh the fp32 master params from the fp16 copies.
def refresh_fp32_params(self):
self._restore_from_bit16_weights()
# Extract flattened partition for current rank from all partitions
def _get_flattened_partition(self, all_partition_states):
partition_id = dist.get_rank(group=self.dp_process_group)
alignment = dist.get_world_size(group=self.dp_process_group)
param_partitions = [[] for _ in range(len(all_partition_states[0]))]
for i, partition in enumerate(all_partition_states):
for j, param in enumerate(partition):
param_partitions[j].append(param)
local_state_partitions = []
for param_index, param_slices in enumerate(param_partitions):
flattened_merged_tensor = self.flatten_dense_tensors_aligned(param_slices, alignment)
new_partitions = self.get_data_parallel_partitions(flattened_merged_tensor)
local_state_partitions.append(new_partitions[partition_id])
if torch.is_tensor(local_state_partitions[0]):
return self.flatten_dense_tensors_aligned(local_state_partitions, alignment)
# Assume non-tensor states are not partitioned and equal across ranks, so return first one
return local_state_partitions[0]
# Restore base optimizer state from checkpoint by
# 1) Merging optimizer state from checkpoints of all partitions
# 2) Extracting optimizer state for current partition from the merged state
# 3) Using the extracted value to directly update the base optimizer.
def _restore_base_optimizer_state(self, all_state_dict):
base_optimizer_group_states = []
for i in range(len(self.optimizer.param_groups)):
partition_states = {}
all_partition_group_states = [sd['base_optimizer_state'][i] for sd in all_state_dict]
for key in all_partition_group_states[0].keys():
all_partition_states = [all_states[key] for all_states in all_partition_group_states]
partition_states[key] = self._get_flattened_partition(all_partition_states)
base_optimizer_group_states.append(partition_states)
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
for key, saved in base_optimizer_group_states[i].items():
if torch.is_tensor(self.optimizer.state[p][key]):
self.optimizer.state[p][key].data.copy_(saved.data)
else:
self.optimizer.state[p][key] = saved
def _rigid_load_state_dict(self, state_dict, load_optimizer_states=True):
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
if load_optimizer_states:
self._set_fp32_optimizer_param_groups()
self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT])
self._clear_fp32_optimizer_param_groups()
# restore fp32 partitions
for curr_param, saved_param in zip(self.fp32_partitioned_groups_flat, state_dict[FP32_FLAT_GROUPS]):
curr_param.data.copy_(saved_param.data)
# restore fp16 partitions from fp32
for sub_group_id in range(len(self.fp32_partitioned_groups_flat)):
fp32_param = self.fp32_partitioned_groups_flat[sub_group_id]
fp16_param = self.fp16_partitioned_groups_flat[sub_group_id]
fp16_param.data.copy_(fp32_param.data)
# update fp16 unflattened params
for sub_group_id in range(len(self.fp16_partitioned_groups_flat)):
updated_params = self.unflatten(self.fp16_partitioned_groups_flat[sub_group_id],
self.fp16_partitioned_groups[sub_group_id])
for partitioned_param, q in zip(self.fp16_partitioned_groups[sub_group_id], updated_params):
partitioned_param.data = q.data
# TODO: Support different/changing load/save DP degree.
def load_state_dict(self,
state_dict_list,
load_optimizer_states=True,
load_from_fp32_weights=False,
checkpoint_folder=None):
r"""Loading a ZeRO checkpoint
Arguments:
state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition.
Note that the number of saved partitions may differ from number of loading partitions to support
changing GPU count, specifically DP world size, between saving and loading checkpoints.
load_optimizer_states: Boolean indicating whether or not to load base optimizer states
load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32
copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss).
"""
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
if self.elastic_checkpoint:
raise NotImplementedError("ZeRO-3 does not yet support elastic checkpointing, please disable for now.")
if self.swap_optimizer or self.params_in_nvme_and_cpu:
raise NotImplementedError(
"ZeRO-3 does not yet support checkpointing with NVMe offloading, please disable for now.")
self._rigid_load_state_dict(state_dict_list[dist.get_rank(group=self.dp_process_group)],
load_optimizer_states=load_optimizer_states)
if len(self.persistent_parameters) > 0:
self.persistent_parameters[0].partition(self.persistent_parameters)
self.persistent_parameters[0].all_gather(self.persistent_parameters)
def checkpoint_event_prologue(self):
self._partition_all_parameters()
def checkpoint_event_epilogue(self):
if len(self.persistent_parameters) > 0:
self.persistent_parameters[0].all_gather(self.persistent_parameters)
def empty_partition_cache(self):
self.parameter_offload.empty_partition_cache()
def _handle_overflow(cpu_sum, x, i):
import math
rank = dist.get_rank()
if rank == 0:
t_i = -1
for v_i, v in enumerate(x.data.contiguous().view(-1)):
if not math.isfinite(float(v)):
t_i = v_i
break
logger.info(f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}")
def estimate_zero3_model_states_mem_needs(total_params,
largest_layer_params,
num_gpus_per_node=1,
num_nodes=1,
cpu_offload=True,
cpu_offload_params=True,
zero_init=True,
additional_buffer_factor=1.5):
total_gpus = num_nodes * num_gpus_per_node
gpus_factor = 1 / num_nodes
largest_layer_memory = (4 * largest_layer_params)
if cpu_offload:
if cpu_offload_params:
gpu_mem = largest_layer_memory
if zero_init:
cpu_mem = total_params * 18 * gpus_factor * additional_buffer_factor
else:
cpu_mem = total_params * max(4 * num_gpus_per_node, 18 * gpus_factor) * additional_buffer_factor
else:
gpu_mem = largest_layer_memory + int(2 * total_params / total_gpus)
if zero_init:
cpu_mem = total_params * 16 * gpus_factor * additional_buffer_factor
else:
cpu_mem = total_params * max(4 * num_gpus_per_node, 16 * gpus_factor) * additional_buffer_factor
else:
gpu_mem = largest_layer_memory + int(18 * total_params / total_gpus)
if zero_init:
cpu_mem = largest_layer_params * 4 * num_gpus_per_node * additional_buffer_factor
else:
cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor
return int(cpu_mem), int(gpu_mem), largest_layer_memory
def model_to_params(model):
# shared params calculated only once
total_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values())
largest_layer_params = 0
for m in model.modules():
# assuming no shared params within a single layer
layer_params = sum(p.numel() for p in m.parameters(recurse=False))
largest_layer_params = max(largest_layer_params, layer_params)
return total_params, largest_layer_params
def estimate_zero3_model_states_mem_needs_all_live(model,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 3 params, optim states and gradients
for a given ``model`` and hardware setup.
If you have an actual model object, use this function and everything will be derived
automatically.
If it's a hypothetical model, use ``estimate_zero3_model_states_mem_needs_all_cold`` where you have to pass
the ``total_params`` and ``largest_layer_params`` explicitly.
Args:
- ``model``: ``nn.Module`` object
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
total_params, largest_layer_params = model_to_params(model)
estimate_zero3_model_states_mem_needs_all_cold(total_params=total_params,
largest_layer_params=largest_layer_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
additional_buffer_factor=additional_buffer_factor)
def estimate_zero3_model_states_mem_needs_all_cold(total_params,
largest_layer_params,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 3 params, optim states and gradients
for a given ``model`` and hardware setup.
If it's a hypothetical model, use this function where you have to pass
the ``total_params`` and ``largest_layer_params`` explicitly.
If you have an actual model object, use ``estimate_zero3_model_states_mem_needs_all_live`` and everything
will be derived automatically.
Args:
- ``total_params``: total model params
- ``largest_layer_params``: largest layer's params
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
def format_options(cpu_offload, cpu_offload_params, zero_init):
enabled = []
padded_cpu_str = f'{OffloadDeviceEnum.cpu:4}'
param_device = padded_cpu_str if cpu_offload_params else "none"
enabled.append(f"offload_param={param_device}")
optimizer_device = padded_cpu_str if cpu_offload else "none"
enabled.append(f"offload_optimizer={optimizer_device}")
enabled.append(f"zero_init={1 if zero_init else 0}")
return ", ".join(enabled)
nodes_str = "nodes" if num_nodes > 1 else "node"
gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU"
print(
"Estimated memory needed for params, optim states and gradients for a:\n"
f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n"
f"SW: Model with {int(total_params/1e6)}M total params, {int(largest_layer_params/1e6)}M largest layer params."
)
print(" per CPU | per GPU | Options")
for cpu_offload in [True, False]:
for cpu_offload_params in [True, False]:
if not cpu_offload and cpu_offload_params:
continue
for zero_init in [True, False]:
cpu_mem, gpu_mem, largest_layer_memory = estimate_zero3_model_states_mem_needs(
total_params=total_params,
largest_layer_params=largest_layer_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
cpu_offload=cpu_offload,
cpu_offload_params=cpu_offload_params,
zero_init=zero_init,
additional_buffer_factor=additional_buffer_factor)
options_str = format_options(cpu_offload=cpu_offload,
cpu_offload_params=cpu_offload_params,
zero_init=zero_init)
print(f" {cpu_mem/2**30:7.2f}GB | {gpu_mem/2**30:6.2f}GB | {options_str}") | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/stage3.py | stage3.py |
# DeepSpeed Team
#Linear Module to use with ZeRO Stage 3 to allow for parameter memory release
#after the module execution during forward
#Instead of saving variables using save_for_backward, we save variable ids
#Allowing us to retrieve the variable without creating pointer to it
#Which allows for underlying tensor to be garbage collected
#When partitioned as needed by the Zero Stage 3 optimizer
#TODO instead of patching Linear module, we could patch the ctx.save_for_backward
#ctx.saved_tensors so that this approach works for all nn modules that are built upon
#torch.nn.function. However the issue is that many modules uses C++ implementations
#which does not have pytorch implementation. Eg torch.addmm which acts as a functional
#when implemented outside of torch.autograd.Function
import math
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn.modules.module import Module
from deepspeed.runtime.utils import noop_decorator
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
def print_rank_0(message, debug=False, force=False):
if dist.get_rank() == 0 and (debug or force):
print(message)
try:
autocast_custom_fwd = get_accelerator().amp().custom_fwd
autocast_custom_bwd = get_accelerator().amp().custom_bwd
except (ImportError, AttributeError) as exp:
autocast_custom_fwd = noop_decorator
autocast_custom_bwd = noop_decorator
class LinearFunctionForZeroStage3(torch.autograd.Function):
# Note that both forward and backward are @staticmethods
@staticmethod
@autocast_custom_fwd
# bias is an optional argument
def forward(ctx, input, weight, bias=None):
ctx.save_for_backward(input, weight, bias)
if input.dim() == 2 and bias is not None:
# fused op is marginally faster
ret = torch.addmm(bias, input, weight.t())
else:
output = input.matmul(weight.t())
if bias is not None:
output += bias
ret = output
return ret
# This function has only a single output, so it gets only one gradient
@staticmethod
@autocast_custom_bwd
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
input, weight, bias = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
#print(f"backward shaped grad_output {grad_output.shape}, input {input.shape}, weight {weight.shape} and bias {bias.shape if bias is not None else None}")
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
#print(f"Computing grad input weight {weight.shape} grad_output {grad_output.shape}")
grad_input = grad_output.matmul(weight)
#print(f"Computed grad input {grad_input.shape}")
if ctx.needs_input_grad[1]:
#print("Computing grad weight")
dim = grad_output.dim()
if dim > 2:
grad_weight = grad_output.reshape(-1,
grad_output.shape[-1]).t().matmul(input.reshape(-1, input.shape[-1]))
else:
grad_weight = grad_output.t().matmul(input)
#print(f"Computed grad weight grad_weight {grad_weight.shape}")
if bias is not None and ctx.needs_input_grad[2]:
#print("Computing grad bias")
grad_bias = grad_output.sum(0)
#print("Done computing grad bias")
#print("needs bias")
#print(f"backward shaped grad_input {grad_input.shape}, grad_weight {grad_weight.shape}, grad_bias {grad_bias.shape if grad_bias is not None else None}")
return grad_input, grad_weight, grad_bias
def zero3_linear_wrap(input, weight, bias=None):
if bias is None:
return LinearFunctionForZeroStage3.apply(input, weight)
else:
return LinearFunctionForZeroStage3.apply(input, weight, bias)
class LinearModuleForZeroStage3(Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
The weights are pre-transposed and stored as A^T instead of transposing during each
forward. Memory savings proportional to the parameter size.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \text{in\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\text{out\_features}, \text{in\_features})`. The values are
initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in\_features}}`
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{1}{\text{in\_features}}`
Examples::
>>> m = nn.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super(LinearModuleForZeroStage3, self).__init__()
print("Building ZeRO module")
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return LinearFunctionForZeroStage3.apply(input, self.weight, self.bias)
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(self.in_features, self.out_features, self.bias
is not None) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/zero/linear.py | linear.py |
# DeepSpeed Team
"""
batched collective operations for overhead amortization and better
bandwidth utilization
"""
import math
from typing import List
import torch
from torch import Tensor
from deepspeed import comm as dist
# NOTE: Use torch.distributed's ProcessGroup class until we have our own.
from torch.distributed import ProcessGroup
import torch.nn.functional
from deepspeed.utils import instrument_w_nvtx
def _torch_reduce_scatter_fn(input_tensor: Tensor, output_tensor: Tensor, group=None, async_op=False, prof=False):
return instrument_w_nvtx(dist.reduce_scatter_fn)(output_tensor, input_tensor, group=group, async_op=async_op)
@instrument_w_nvtx
@torch.no_grad()
def reduce_scatter_coalesced(
tensors: List[Tensor],
group: ProcessGroup = None,
) -> List[Tensor]:
"""simultaneously reduce-scatter a list of tensors - this can be done more
efficiently than individual reduce scatter calls
TODO. see if PyTorch team wants a c++ version of this for ProcessGroupNCCL
"""
this_rank = dist.get_rank(group)
world_sz = dist.get_world_size(group)
partition_lst_for_each_tensor = [None] * len(tensors)
for tensor_idx, tensor in enumerate(tensors):
flattened_tensor = tensor.view(-1)
chunk_sz = math.ceil(tensor.numel() / world_sz)
partition_lst_for_each_tensor[tensor_idx] = [
flattened_tensor[rank * chunk_sz:rank * chunk_sz + chunk_sz] for rank in range(0, world_sz)
]
padded_partition_sz_for_each_tensor = tuple(math.ceil(t.numel() / world_sz) for t in tensors)
if len(tensors) == 1 and tensors[0].numel() % world_sz == 0:
# if there's only one tensor being reduced and we don't need to pad
# we have an opportunity to avoid a memory allocation
tensor_partition_flat_buffer = tensors[0].view(-1)
else:
# interleave tensor partitions such that the correct reduced partitions of each tensor
# end up at each rank
tensor_partitions_lst_with_padding = []
for rank in range(world_sz):
for tensor_idx in range(len(tensors)):
# add tensor content
tensor_chunk = partition_lst_for_each_tensor[tensor_idx][rank]
tensor_partitions_lst_with_padding.append(tensor_chunk)
# add padding if necessary
padding_sz = padded_partition_sz_for_each_tensor[tensor_idx] - tensor_chunk.numel()
if padding_sz > 0:
tensor_partitions_lst_with_padding.append(
torch.empty(padding_sz, dtype=tensor_chunk.dtype, device=tensor_chunk.device))
tensor_partition_flat_buffer = instrument_w_nvtx(torch.cat)(tensor_partitions_lst_with_padding)
tensor_partition_flat_buffer.div_(world_sz) # pre-divide
tensor_partition_buffer_for_each_rank: List[Tensor] = torch.chunk(tensor_partition_flat_buffer, world_sz)
# batched reduce-scatter call
_torch_reduce_scatter_fn(tensor_partition_flat_buffer,
tensor_partition_buffer_for_each_rank[this_rank],
group=group)
# reverse procedure of the interleaving done previously, done on the
# result of the batched reduce-scatter
output_lst: List[Tensor] = [None] * len(tensors)
offset = 0
for tensor_idx in range(len(tensors)):
output_lst[tensor_idx] = tensor_partition_buffer_for_each_rank[this_rank].narrow(
0, offset, partition_lst_for_each_tensor[tensor_idx][this_rank].numel())
offset += padded_partition_sz_for_each_tensor[tensor_idx]
return output_lst | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/comm/coalesced_collectives.py | coalesced_collectives.py |
# DeepSpeed Team
import torch
from deepspeed import comm as dist
import cupy
import numpy as np
from deepspeed.runtime.compression.cupy import CupyBackend
from deepspeed.accelerator import get_accelerator
class NcclBackend(object):
def __init__(self, mpu=None):
if mpu is None:
self.world_group = dist.new_group(ranks=range(dist.get_world_size()))
else:
self.mpu = mpu
self.world_group = self.mpu.get_data_parallel_group()
self.rank = dist.get_rank(group=self.world_group)
self.size = dist.get_world_size(group=self.world_group)
self.compression_backend = CupyBackend()
self.bool_not_supported = False
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if (TORCH_MAJOR == 1 and TORCH_MINOR >= 10) or TORCH_MAJOR == 2:
self.bool_not_supported = True
def my_igather(self, rank, size, group, sendbuf, recvbuf, root):
req = []
if rank == root:
for idx in range(size):
if idx != rank:
req.append(dist.irecv(recvbuf[idx], src=idx, group=group))
else:
recvbuf[rank] = sendbuf
else:
req.append(dist.isend(sendbuf, group=group, dst=root))
return req
def my_gather(self, rank, size, group, sendbuf, recvbuf, root):
if rank == root:
for idx in range(size):
if idx != rank:
dist.recv(recvbuf[idx], src=idx, group=group)
else:
recvbuf[rank] = sendbuf
else:
dist.send(sendbuf, group=group, dst=root)
def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank):
# all_start_time = time.time()
original_shape = buffer_m.size()
if len(original_shape) > 1:
buffer_m = torch.flatten(buffer_m)
original_size = buffer_m.numel()
worker_error_size = worker_error.numel()
cupy.cuda.Device(local_rank).use()
if original_size != worker_error_size:
empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device)
buffer_m = torch.cat([buffer_m, empty_tensor])
buffer_m.add_(worker_error)
worker_scale = torch.norm(buffer_m) / np.sqrt(buffer_m.numel())
worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
if self.bool_not_supported:
cupy_sign_list_packed = self.compression_backend.compress_by_chunk(
self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool().to(dtype=torch.uint8)), self.size)
else:
cupy_sign_list_packed = self.compression_backend.compress_by_chunk(
self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool()), self.size)
cupy_worker_scale = self.compression_backend.torch2cupy(worker_scale)
cupy_recvbuf_sign = cupy.zeros([self.size, cupy_sign_list_packed[self.rank].size],
dtype=cupy_sign_list_packed[0].dtype)
# cupy_recvbuf_scale = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype)
sign_list_packed = [
self.compression_backend.cupy2torch(cupy_sign_list_packed[idx]) for idx in range(self.size)
]
# worker_scale = self.compression_backend.cupy2torch(cupy_worker_scale)
recvbuf_sign = self.compression_backend.cupy2torch(cupy_recvbuf_sign)
#recvbuf_scale = self.compression_backend.cupy2torch(cupy_recvbuf_scale)
recvbuf_scale = [
torch.zeros(1, dtype=worker_scale.dtype, device=torch.device(get_accelerator().device_name(local_rank)))
for i in range(self.size)
]
# communication phase 1
# gather_start = time.time()
# Alltoall for sign
dist.all_to_all_single(recvbuf_sign, torch.stack(sign_list_packed), group=self.world_group)
# Allgather for scale
dist.all_gather(recvbuf_scale, worker_scale, group=self.world_group)
# gather_end = time.time()
# cupy_sign_list_packed, sign_list_packed, cupy_worker_scale, worker_scale = None, None, None, None
cupy_sign_list_packed = None
cupy_recvbuf_sign = self.compression_backend.torch2cupy(recvbuf_sign)
#cupy_recvbuf_scale = self.compression_backend.torch2cupy(torch.stack(recvbuf_scale))
compensated_server_m = self.compression_backend.cupy2torch(
(cupy.unpackbits(cupy_recvbuf_sign.flatten())).reshape(self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
torch.stack(recvbuf_scale).mul_(1 / self.size)).sum(0)
compensated_server_m.add_(server_error)
server_scale = torch.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel())
server_error.set_(compensated_server_m -
server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
# cupy_server_scale = self.compression_backend.torch2cupy(server_scale)
if self.bool_not_supported:
cupy_server_sign_packed = self.compression_backend.compress_by_chunk(
self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool().to(dtype=torch.uint8)),
1)
else:
cupy_server_sign_packed = self.compression_backend.compress_by_chunk(
self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool()), 1)
compensated_server_m = None
cupy_recvbuf_sign_server = cupy.zeros([self.size, cupy_server_sign_packed[0].size],
dtype=cupy_recvbuf_sign.dtype)
# cupy_recvbuf_sign, recvbuf_sign = None, None
cupy_recvbuf_sign = None
server_sign_packed = [self.compression_backend.cupy2torch(cupy_server_sign_packed[0])]
recvbuf_sign_server = [
self.compression_backend.cupy2torch(cupy_recvbuf_sign_server[idx]) for idx in range(self.size)
]
# server_scale = self.compression_backend.cupy2torch(cupy_server_scale)
cupy_recvbuf_scale_server = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype)
# cupy_recvbuf_scale, recvbuf_scale = None, None
recvbuf_scale_server = [
self.compression_backend.cupy2torch(cupy_recvbuf_scale_server[idx]) for idx in range(self.size)
]
# Communication Phase 2
dist.all_gather(recvbuf_sign_server, server_sign_packed[0], group=self.world_group)
dist.all_gather(recvbuf_scale_server, server_scale, group=self.world_group)
cupy_server_sign_packed = None
# need to convert from a tensor list to a single tensor
# dist.all_gather only provides a tensor list as the recv/output buffer
recvbuf_sign_server = torch.stack(recvbuf_sign_server)
cupy_recvbuf_sign_server = self.compression_backend.torch2cupy(recvbuf_sign_server)
buffer_m.data.copy_(
self.compression_backend.cupy2torch((cupy.unpackbits(cupy_recvbuf_sign_server.flatten())).reshape(
self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
self.compression_backend.cupy2torch(cupy_recvbuf_scale_server)).flatten().data)
if original_size != worker_error_size:
buffer_m = buffer_m[0:original_size]
if len(original_shape) > 1:
buffer_m = buffer_m.reshape(original_shape)
return buffer_m | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/comm/nccl.py | nccl.py |
# DeepSpeed Team
import torch
import cupy
import time
import numpy as np
from mpi4py import MPI
from deepspeed.runtime.compression.cupy import CupyBackend
class MpiBackend(object):
def __init__(self, cuda_aware):
self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
self.cuda_aware = cuda_aware
self.compression_backend = CupyBackend()
def my_igather(self, rank, size, comm, sendbuf, recbuf, root):
req = []
if rank == root:
for idx in range(size):
if idx != rank:
req.append(comm.Irecv(recbuf[idx], source=idx))
else:
recbuf[rank] = sendbuf
else:
req.append(comm.Isend(sendbuf, dest=root))
return req
def gather_cuda(self, rank, world_size, comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale,
cupy_recvbuf_scale):
# We do in-place operations on cupy buffers so we do not return any buffers
requests = []
for idx in range(world_size):
req_sign = self.my_igather(rank, world_size, comm, cupy_sign_list_packed[idx], cupy_recvbuf_sign, root=idx)
requests += req_sign
for idx in range(world_size):
req_scale = self.my_igather(rank, world_size, comm, cupy_worker_scale, cupy_recvbuf_scale, root=idx)
requests += req_scale
MPI.Request.Waitall(requests)
def gather_host(self, rank, world_size, comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale,
cupy_recvbuf_scale):
# In-place operations are not possible for newly created cupy arrays
# so we need to return the new buffers
numpy_recvbuf_sign = np.zeros([world_size, cupy_sign_list_packed[rank].size],
dtype=cupy_sign_list_packed[0].dtype)
numpy_recvbuf_scale = np.zeros([world_size, 1], dtype=cupy_worker_scale.dtype)
# 1. convert from cupy to numpy
numpy_sign_list_packed = cupy_sign_list_packed
for idx in range(world_size):
numpy_sign_list_packed[idx] = cupy.asnumpy(cupy_sign_list_packed[idx])
numpy_worker_scale = cupy.asnumpy(cupy_worker_scale)
numpy_recvbuf_scale = cupy.asnumpy(cupy_recvbuf_scale)
cupy.cuda.get_current_stream().synchronize()
# 2. use numpy buffers for communication
requests = []
for idx in range(world_size):
req_sign = self.my_igather(rank,
world_size,
comm,
numpy_sign_list_packed[idx],
numpy_recvbuf_sign,
root=idx)
requests += req_sign
for idx in range(world_size):
req_scale = self.my_igather(rank, world_size, comm, numpy_worker_scale, numpy_recvbuf_scale, root=idx)
requests += req_scale
MPI.Request.Waitall(requests)
# 3. Convert back from numpy to cupy
cupy_recvbuf_sign = cupy.asarray(numpy_recvbuf_sign)
for idx in range(world_size):
cupy_sign_list_packed[idx] = cupy.asarray(numpy_sign_list_packed[idx])
cupy_worker_scale = cupy.asarray(numpy_worker_scale)
cupy_recvbuf_scale = cupy.asarray(numpy_recvbuf_scale)
cupy.cuda.get_current_stream().synchronize()
return cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale, cupy_recvbuf_scale
def allgather_cuda(self, comm, cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale,
cupy_recvbuf_scale_server):
comm.Allgather(cupy_server_sign_packed, cupy_recvbuf_sign_server)
comm.Allgather(cupy_server_scale, cupy_recvbuf_scale_server)
def allgather_host(self, comm, cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale,
cupy_recvbuf_scale_server):
# 1. Convert cupy to numpy
numpy_recvbuf_sign_server = np.zeros([comm.Get_size(), cupy_server_sign_packed.size],
dtype=cupy_server_sign_packed.dtype)
numpy_recvbuf_scale_server = np.zeros([comm.Get_size(), 1], dtype=cupy_server_scale.dtype)
numpy_server_sign_packed = cupy.asnumpy(cupy_server_sign_packed)
numpy_recvbuf_sign_server = cupy.asnumpy(cupy_recvbuf_sign_server)
numpy_server_scale = cupy.asnumpy(cupy_server_scale)
numpy_recvbuf_scale_server = cupy.asnumpy(cupy_recvbuf_scale_server)
cupy.cuda.get_current_stream().synchronize()
# 2. Communicate numpy buffers
comm.Allgather(numpy_server_sign_packed, numpy_recvbuf_sign_server)
comm.Allgather(numpy_server_scale, numpy_recvbuf_scale_server)
comm.Barrier()
# 3. Convert numpy back to cupy
cupy_server_sign_packed = cupy.asarray(numpy_server_sign_packed)
cupy_recvbuf_sign_server = cupy.asarray(numpy_recvbuf_sign_server)
cupy_server_scale = cupy.asarray(numpy_server_scale)
cupy_recvbuf_scale_server = cupy.asarray(numpy_recvbuf_scale_server)
cupy.cuda.get_current_stream().synchronize()
return cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale, cupy_recvbuf_scale_server
def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank):
all_start_time = time.time()
original_shape = buffer_m.size()
if len(original_shape) > 1:
buffer_m = torch.flatten(buffer_m)
original_size = buffer_m.numel()
worker_error_size = worker_error.numel()
cupy.cuda.Device(local_rank).use()
if original_size != worker_error_size:
empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device)
buffer_m = torch.cat([buffer_m, empty_tensor])
buffer_m.add_(worker_error)
worker_scale = torch.norm(buffer_m) / np.sqrt(torch.numel(buffer_m))
worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
cupy_sign_list_packed = self.compression_backend.compress_by_chunk(
self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool()), self.size)
cupy_worker_scale = self.compression_backend.torch2cupy(worker_scale)
cupy_recvbuf_sign = cupy.zeros([self.size, cupy_sign_list_packed[self.rank].size],
dtype=cupy_sign_list_packed[0].dtype)
cupy_recvbuf_scale = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype)
# Communication Phase 1
gather_start = time.time()
if self.cuda_aware:
self.gather_cuda(self.rank, self.size, self.comm, cupy_sign_list_packed, cupy_recvbuf_sign,
cupy_worker_scale, cupy_recvbuf_scale)
else:
_, cupy_recvbuf_sign, _, cupy_recvbuf_scale = self.gather_host(self.rank, self.size, self.comm,
cupy_sign_list_packed, cupy_recvbuf_sign,
cupy_worker_scale, cupy_recvbuf_scale)
gather_end = time.time()
# cupy_sign_list_packed, cupy_worker_scale, worker_scale = None, None, None
cupy_sign_list_packed = None
compensated_server_m = self.compression_backend.cupy2torch(
(cupy.unpackbits(cupy_recvbuf_sign.flatten())).reshape(self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
self.compression_backend.cupy2torch(cupy_recvbuf_scale).mul_(1 / self.size)).sum(0)
compensated_server_m.add_(server_error)
server_scale = torch.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel())
server_error.set_(compensated_server_m -
server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0))
cupy_server_scale = self.compression_backend.torch2cupy(server_scale)
cupy_server_sign_packed = self.compression_backend.compress_by_chunk(
self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool()), 1)
compensated_server_m = None
cupy_recvbuf_sign_server = cupy.zeros([self.size, cupy_server_sign_packed[0].size],
dtype=cupy_recvbuf_sign.dtype)
cupy_recvbuf_scale_server = cupy.zeros([self.size, 1], dtype=cupy_recvbuf_scale.dtype)
# cupy_recvbuf_sign, cupy_recvbuf_scale = None, None
cupy_recvbuf_sign = None
# Communication Phase 2
if self.cuda_aware:
self.allgather_cuda(self.comm, cupy_server_sign_packed[0], cupy_recvbuf_sign_server, cupy_server_scale,
cupy_recvbuf_scale_server)
else:
_, cupy_recvbuf_sign_server, _, cupy_recvbuf_scale_server = self.allgather_host(
self.comm, cupy_server_sign_packed[0], cupy_recvbuf_sign_server, cupy_server_scale,
cupy_recvbuf_scale_server)
# cupy_server_sign_packed, cupy_server_scale, server_scale = None, None, None
cupy_server_sign_packed = None
buffer_m.data.copy_(
self.compression_backend.cupy2torch((cupy.unpackbits(cupy_recvbuf_sign_server.flatten())).reshape(
self.size, -1)).float().add_(-0.5).mul_(2.0).mul_(
self.compression_backend.cupy2torch(cupy_recvbuf_scale_server)).flatten().data)
if original_size != worker_error_size:
buffer_m = buffer_m[0:original_size]
if len(original_shape) > 1:
buffer_m = buffer_m.reshape(original_shape)
# cupy_recvbuf_sign_server, cupy_recvbuf_scale_server = None, None
return buffer_m | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/comm/mpi.py | mpi.py |
# DeepSpeed Team
"""
DeepSpeed Communication Package: deepspeed.comm
deepspeed.comm
-- import and use deepspeeed.ops.comm
-- use torch.distributed directly if both this package and torch.distributed use the same NCCL version
-- use custom collectives
-- can either use torch.dist or ds.ops.comm?
Note: the old 1-bit compressed allreduce variants that resided in deepspeed.runtime.comm will be moved here as well.
deepspeed.comm API
-- must be kept fully compatible (same signatures) as torch.dist API to ensure backward/cross-framework compatibility.
-- e.g. if a client code used
from deepspeed import comm as dist
instead of
import torch.distributed as dist
The code should work without breaking any of the public torch.distributed functionality
Future:
-- deepspeed groups API should be brought into ds.comm
"""
from enum import Enum
import torch
import os
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout
from .constants import *
from deepspeed.accelerator import get_accelerator
class ReduceOp(Enum):
SUM = 0
PRODUCT = 1
MIN = 2
MAX = 3
BAND = 4
BOR = 5
BXOR = 6
AVG = 7
UNUSED = 8
from deepspeed.utils.comms_logging import CommsLogger
from deepspeed.utils import timer, get_caller_func
from deepspeed.comm.torch import TorchBackend
from deepspeed import utils
from datetime import timedelta
# Current deepspeed.comm backend (cdb) global object for simple access by client code
use_ds_backend = False
cdb = None
# Create global timer for ops
timers = timer.SynchronizedWallClockTimer()
timer_summary = {}
comms_logger = CommsLogger()
# Maintain objects of all initialized ds backends and assign them using the API functions in this file
nccl_backend = None
mpi_backend = None
# This should be set here so all rank/size information from the launcher can be propagated
from deepspeed.comm.utils import *
def _configure_using_config_file(config):
if config.comms_logger_enabled:
comms_logger.configure(config)
def configure(
deepspeed_config=None,
enabled=None,
prof_all=None,
prof_ops=None,
verbose=None,
debug=None,
):
if deepspeed_config is not None:
_configure_using_config_file(deepspeed_config.comms_config)
if enabled is not None:
comms_logger.enabled = enabled
if prof_all is not None:
comms_logger.prof_all = prof_all
if prof_ops is not None:
comms_logger.prof_ops = prof_ops
if verbose is not None:
comms_logger.verbose = verbose
if debug is not None:
comms_logger.debug = debug
# Logging wrapper for timing ops
def timed_op(func):
def log_wrapper(*args, **kwargs):
# Add enabled flag so that overhead to each comm op is two if conditions at most
if comms_logger.enabled:
if ('prof' in kwargs
and kwargs['prof']) or comms_logger.prof_all or ('log_name' in kwargs
and kwargs['log_name'] in comms_logger.prof_ops):
# Need func args for their defaults
func_args = get_default_args(func)
func_args.update(kwargs)
msg_size = get_msg_size_from_args(func, *args, **kwargs)
log_name = get_debug_log_name(func_args, comms_logger.debug)
timers(log_name).start()
# Return the op, then stop the op's timer
try:
return func(*args, **kwargs)
finally:
if comms_logger.enabled:
# Need to make op blocking for accurate logging
get_accelerator().synchronize()
# If we're using MPI, we can't simply sync the stream
if cdb.using_mpi:
cdb.barrier()
if ('prof' in kwargs and kwargs['prof']) or comms_logger.prof_all or (
'log_name' in kwargs and kwargs['log_name'] in comms_logger.prof_ops):
log_name = get_debug_log_name(func_args, comms_logger.debug)
raw_name = func.__name__
timers(log_name).stop()
# need temp var since 'elapsed' resets events
time_elapsed = timers(log_name).elapsed(reset=False)
comms_logger.append(raw_name, log_name, time_elapsed, msg_size)
return log_wrapper
# For compatibility with torch distributed's init_process_group, we shall retain the signature from PyTorch code.
# DeepSpeed NCCL/MPI backend may not need all these params as we will have our own implementation.
# Please read full torch.distributed API docs from https://pytorch.org/docs/stable/distributed.html
# UNUSED: Future helper function to initialize DS backends
def init_deepspeed_backend(ds_backend):
global cdb
global nccl_backend
global mpi_backend
global use_ds_backend
if ds_backend == NCCL_BACKEND:
utils.logger.warn("NCCL backend in DeepSpeed not yet implemented")
elif ds_backend == MPI_BACKEND:
utils.logger.warn("MPI backend in DeepSpeed not yet implemented")
elif ds_backend == GLOO_BACKEND:
utils.logger.warn("Gloo backend in DeepSpeed not yet implemented")
else:
utils.logger.warn(f"DeepSpeed does not support {ds_backend} backend")
def is_initialized():
#assert cdb is not None, 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb is None:
return False
else:
return cdb.is_initialized()
def destroy_process_group(group=None):
global cdb
return cdb.destroy_process_group(group=group)
def new_group(ranks):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.new_group(ranks)
def is_available() -> bool:
# Returns ``True`` if the deepspeed comm package is available.
# TODO: load other ops. Clients including deepspeed itself should use deepspeed.comm to import
# any communication related primitives from this package.
# use hasattr(deepspeed.csrc.ops, "_comm") or something
return True
def set_backend(backend_name):
if not use_ds_backend:
utils.logger.error(
"DeepSpeed communication backend is required. Please use deepspeed.comm.init_distributed(backend, use_deepspeed=True) to use this functionality"
)
raise RuntimeError('Error: Custom DeepSpeed backend called without initializing DeepSpeed distributed.')
global cdb
global nccl_backend
global mpi_backend
try:
if backend_name == NCCL_BACKEND:
if nccl_backend is not None and nccl_backend.is_initialized():
cdb = nccl_backend
elif backend_name == MPI_BACKEND:
if mpi_backend is not None and mpi_backend.is_initialized():
cdb = mpi_backend
except Exception as inst:
print(inst)
@timed_op
def broadcast(tensor, src, group=None, async_op=False, prof=False, log_name='broadcast', debug=get_caller_func()):
global cdb
return cdb.broadcast(tensor=tensor, src=src, group=group, async_op=async_op)
@timed_op
def all_gather(tensor_list,
tensor,
group=None,
async_op=False,
prof=False,
log_name='all_gather',
debug=get_caller_func()):
global cdb
return cdb.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op)
def has_reduce_scatter_tensor():
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.has_reduce_scatter_tensor()
def reduce_scatter_fn(output_tensor,
tensor,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
debug=get_caller_func()):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb.has_reduce_scatter_tensor():
return reduce_scatter_tensor(output_tensor,
tensor,
op=op,
group=group,
async_op=async_op,
prof=prof,
debug=debug)
else:
if get_rank() == 0:
utils.logger.warning_once("unable to find torch.distributed.reduce_scatter_tensor. will fall back to "
"torch.distributed.all_gather which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
input_tensor_lst = list(torch.chunk(tensor, cdb.get_world_size(group)))
return reduce_scatter(output_tensor,
input_tensor_lst,
op=op,
group=group,
async_op=async_op,
prof=prof,
debug=debug)
@timed_op
def reduce_scatter_tensor(output_tensor,
tensor,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='reduce_scatter_tensor',
debug=get_caller_func()):
global cdb
return cdb.reduce_scatter_tensor(output_tensor=output_tensor,
input_tensor=tensor,
op=op,
group=group,
async_op=async_op)
@timed_op
def all_gather_into_tensor(output_tensor,
tensor,
group=None,
async_op=False,
prof=False,
log_name='all_gather_into_tensor',
debug=get_caller_func()):
global cdb
return cdb.all_gather_into_tensor(output_tensor=output_tensor, input_tensor=tensor, group=group, async_op=async_op)
def has_all_gather_into_tensor():
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.has_all_gather_into_tensor()
def allgather_fn(output_tensor, input_tensor, group=None, async_op=False, debug=get_caller_func()):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
if cdb.has_all_gather_into_tensor():
return all_gather_into_tensor(output_tensor, input_tensor, group=group, async_op=async_op, debug=debug)
else:
if get_rank() == 0:
utils.logger.warning_once("unable to find torch.distributed.all_gather_into_tensor. will fall back to "
"torch.distributed.all_gather which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
output_tensors = list(torch.chunk(output_tensor, cdb.get_world_size(group)))
return all_gather(output_tensors, input_tensor, group=group, async_op=async_op, debug=debug)
@timed_op
def all_to_all_single(output,
tensor,
output_split_sizes=None,
input_split_sizes=None,
group=None,
async_op=False,
prof=False,
log_name='all_to_all_single',
debug=get_caller_func()):
global cdb
return cdb.all_to_all_single(output=output,
input=tensor,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=group,
async_op=async_op)
@timed_op
def send(tensor, dst, group=None, tag=0, prof=False, log_name='send', debug=get_caller_func()):
global cdb
return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag)
@timed_op
def recv(tensor, src=None, group=None, tag=0, prof=False, log_name='recv', debug=get_caller_func()):
global cdb
return cdb.recv(tensor=tensor, src=src, group=group, tag=tag)
@timed_op
def isend(tensor, dst, group=None, tag=0, prof=False, log_name='isend', debug=get_caller_func()):
global cdb
return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag)
@timed_op
def irecv(tensor, src=None, group=None, tag=0, prof=False, log_name='irecv', debug=get_caller_func()):
global cdb
return cdb.recv(tensor=tensor, src=src, group=group, tag=tag)
@timed_op
def gather(tensor,
gather_list=None,
dst=0,
group=None,
async_op=False,
prof=False,
log_name='gather',
debug=get_caller_func()):
global cdb
return cdb.gather(tensor=tensor, gather_list=gather_list, dst=dst, group=group, async_op=async_op)
@timed_op
def scatter(tensor,
scatter_list=None,
src=0,
group=None,
async_op=False,
prof=False,
log_name='scatter',
debug=get_caller_func()):
global cdb
return cdb.scatter(tensor=tensor, scatter_list=scatter_list, src=src, group=group, async_op=async_op)
@timed_op
def barrier(group=None, async_op=False, device_ids=None, prof=False, log_name='barrier', debug=get_caller_func()):
global cdb
return cdb.barrier(group=group, async_op=async_op, device_ids=device_ids)
@timed_op
def monitored_barrier(group=None,
timeout=None,
wait_all_ranks=False,
prof=False,
log_name='monitored_barrier',
debug=get_caller_func()):
global cdb
return cdb.barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks)
def log_summary():
global cdb
barrier(log_name='log_summary_barrier')
if cdb.get_rank() == 0:
comms_logger.log_all()
barrier(log_name='log_summary_barrier')
@timed_op
def reduce(tensor,
dst,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='reduce',
debug=get_caller_func()):
global cdb
return cdb.reduce(tensor=tensor, dst=dst, op=op, group=group, async_op=async_op)
@timed_op
def reduce_scatter(output,
input_list,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='reduce_scatter',
debug=get_caller_func()):
global cdb
return cdb.reduce_scatter(output=output, input_list=input_list, op=op, group=group, async_op=async_op)
def has_all_reduce_coalesced():
""""""
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
assert cdb.has_all_reduce_coalesced is not None, 'has_all_reduce_coalesced is not yet defined'
return cdb.has_all_reduce_coalesced
def has_coalescing_manager():
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
assert cdb.has_coalescing_manager is not None, 'has_coalescing_manager is not yet defined'
return cdb.has_coalescing_manager
def all_gather_coalesced(output_tensors, input_tensors, group=None, async_op=False):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.all_gather_coalesced(output_tensors, input_tensors, group=group, async_op=async_op)
@timed_op
def all_reduce(tensor,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='all_reduce',
debug=get_caller_func()):
#if profile_comm:
# context of the timers?
# timers.start()
# TensorBoard logging for comm calls.?
global cdb
#print(f'op = {op}, cdb= {cdb.name}')
return cdb.all_reduce(tensor, op, group, async_op)
@timed_op
def all_reduce_coalesced(tensors,
op=ReduceOp.SUM,
group=None,
async_op=False,
prof=False,
log_name='all_reduce',
debug=get_caller_func()):
global cbd
return cdb.all_reduce_coalesced(tensors, op, group, async_op)
def get_world_group():
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_world_group()
def get_world_size(group=None) -> int:
"""
Returns the number of processes in the current process group
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
Returns:
The world size of the process group
-1, if not part of the group
"""
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_world_size(group)
def get_rank(group=None):
"""
Returns the rank of the current process in the provided ``group`` or the
default group if none was provided.
Rank is a unique identifier assigned to each process within a distributed
process group. They are always consecutive integers ranging from 0 to
``world_size``.
Args:
group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
Returns:
The rank of the process group
-1, if not part of the group
"""
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_rank(group)
def get_local_rank():
"""
Helper function to get local rank after a backend has been set and initialized
Args:
None
Returns:
local rank (= GPU device ID)
"""
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return get_local_rank_from_launcher()
def get_global_rank(group=None, group_rank=0):
global cdb
assert cdb is not None and cdb.is_initialized(
), 'DeepSpeed backend not set, please initialize it using init_process_group()'
return cdb.get_global_rank(group, group_rank)
# Main DeepSpeed Comms. public API.
def init_distributed(dist_backend=None,
auto_mpi_discovery=True,
distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT,
verbose=True,
timeout=default_pg_timeout,
init_method=None,
dist_init_required=None,
config=None,
rank=-1,
world_size=-1):
''' Initialize dist backend, potentially performing MPI discovery if needed
Arguments:
dist_backend: Optional (str). torch distributed backend, e.g., nccl, mpi, gloo
auto_mpi_discovery Optional (bool). if distributed environment variables are not set, attempt to discover them from MPI
distributed_port: Optional (int). torch distributed backend port
verbose: Optional (bool). verbose logging
timeout: Optional (timedelta). Timeout for operations executed against the process group. Default value equals 30 minutes.
init_method: Optional (string). Torch distributed, URL specifying how to initialize the process group. Default is βenv://β if no init_method or store is specified.
config: Optional (dict). DeepSpeed configuration for setting up comms options (e.g. Comms profiling)
rank: Optional (int). The current manually specified rank. Some init_method like βtcp://β need the rank and world_size as well (see: https://pytorch.org/docs/stable/distributed.html#tcp-initialization)
world_size: Optional (int). Desired world_size for the TCP or Shared file-system initialization.
'''
global cdb
configure(deepspeed_config=config)
if dist_init_required is None:
dist_init_required = cdb is None or not cdb.is_initialized()
if cdb is None and torch.distributed.is_initialized():
# The user initialized torch.dist themselves, create cdb and short-circuit
cdb = TorchBackend(dist_backend, timeout, init_method)
return
if dist_init_required is False:
assert (
cdb is not None and cdb.is_initialized() is True
), "Distributed backend is not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()"
else:
# Initialize torch distributed if needed
required_env = ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
if auto_mpi_discovery and not all(map(lambda v: v in os.environ, required_env)):
if verbose:
utils.logger.info("Not using the DeepSpeed or dist launchers, attempting to detect MPI environment...")
if in_aml() and not in_dlts():
patch_aml_env_for_torch_nccl_backend(verbose=verbose)
elif in_aws_sm():
patch_aws_sm_env_for_torch_nccl_backend(verbose=verbose)
else:
mpi_discovery(distributed_port=distributed_port, verbose=verbose)
if cdb is not None and cdb.is_initialized():
if int(os.getenv('RANK', '0')) == 0:
utils.logger.info('Distributed backend already initialized')
else:
assert isinstance(timeout, timedelta)
if dist_backend == None:
dist_backend = get_accelerator().communication_backend_name()
if int(os.getenv('RANK', '0')) == 0:
utils.logger.info('Initializing TorchBackend in DeepSpeed with backend {}'.format(dist_backend))
# Create a torch backend object, initialize torch distributed, and assign to cdb
cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size)
def mpi_discovery(distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT, verbose=True):
'''
Discovery MPI environment via mpi4py and map to relevant dist state
'''
from mpi4py import MPI
import subprocess
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
world_size = comm.Get_size()
master_addr = None
if rank == 0:
hostname_cmd = ["hostname -I"]
result = subprocess.check_output(hostname_cmd, shell=True)
master_addr = result.decode('utf-8').split()[0]
master_addr = comm.bcast(master_addr, root=0)
# Determine local rank by assuming hostnames are unique
proc_name = MPI.Get_processor_name()
all_procs = comm.allgather(proc_name)
local_rank = sum([i == proc_name for i in all_procs[:rank]])
os.environ['RANK'] = str(rank)
os.environ['WORLD_SIZE'] = str(world_size)
os.environ['LOCAL_RANK'] = str(local_rank)
os.environ['MASTER_ADDR'] = master_addr
os.environ['MASTER_PORT'] = str(distributed_port)
if verbose:
utils.logger.info(
"Discovered MPI settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}".
format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT']))
if cdb is not None and cdb.is_initialized():
assert cdb.get_rank() == rank, "MPI rank {} does not match torch rank {}".format(rank, cdb.get_rank())
assert cdb.get_world_size() == world_size, "MPI world size {} does not match torch world size {}".format(
world_size, cdb.get_world_size())
def in_aml():
# Are we running inside an Azure Machine Learning (AML) environment?
return 'AZUREML_EXPERIMENT_ID' in os.environ
def in_aws_sm():
# Are we running inside an AWS SageMaker environment?
return 'SM_TRAINING_ENV' in os.environ
def in_dlts():
# Are we running on a DLTS cluster?
return 'DLTS_JOB_ID' in os.environ
def patch_aml_env_for_torch_nccl_backend(master_port=6105, verbose=True):
"""Helper routine to get and set environment variables.
This is adapted from Azure ML's documentation available from:
https://azure.github.io/azureml-web/docs/cheatsheet/distributed-training/#environment-variables-from-openmpi
"""
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
single_node = int(os.environ["OMPI_COMM_WORLD_LOCAL_SIZE"]) == int(os.environ["WORLD_SIZE"])
if not single_node:
master_node_params = os.environ["AZ_BATCH_MASTER_NODE"].split(":")
os.environ["MASTER_ADDR"] = master_node_params[0]
# Do not overwrite master port with that defined in AZ_BATCH_MASTER_NODE
if "MASTER_PORT" not in os.environ:
os.environ["MASTER_PORT"] = str(master_port)
else:
os.environ["MASTER_ADDR"] = os.environ["AZ_BATCHAI_MPI_MASTER_NODE"]
os.environ["MASTER_PORT"] = DEFAULT_AML_MASTER_PORT
if verbose:
utils.logger.info("NCCL_SOCKET_IFNAME original value = {}".format(os.environ["NCCL_SOCKET_IFNAME"]))
os.environ["NCCL_SOCKET_IFNAME"] = DEFAULT_AML_NCCL_SOCKET_IFNAME
os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]
if verbose:
utils.logger.info(
"Discovered AzureML settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
.format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT']))
def patch_aws_sm_env_for_torch_nccl_backend(verbose=True):
"""Helper routine to get and set environment variables when running inside an AWS SageMaker environment.
"""
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
if verbose:
utils.logger.info(
"Discovered AWS SageMaker settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}"
.format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'],
os.environ['MASTER_PORT'])) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/comm/comm.py | comm.py |
# DeepSpeed Team
import os
import inspect
from deepspeed.utils import get_caller_func
def get_local_rank_from_launcher():
# DeepSpeed launcher will set it so get from there
rank = os.environ.get('LOCAL_RANK')
if rank is None:
rank = os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK')
# Make it a single process job and set rank to 0
if rank is None:
rank = 0
return int(rank)
def get_world_rank_from_launcher():
# DeepSpeed launcher will set it so get from there
rank = os.environ.get('RANK')
if rank is None:
rank = os.environ.get('OMPI_COMM_WORLD_RANK')
# Make it a single process job and set rank to 0
if rank is None:
rank = 0
return int(rank)
def get_world_size_from_launcher():
# DeepSpeed launcher will set it so get from there
size = os.environ.get('WORLD_SIZE')
rank = os.environ.get('RANK')
if size is None:
size = os.environ.get('OMPI_COMM_WORLD_SIZE')
# Make it a single process job and set size to 1
if size is None:
size = 1
if rank == 0:
print(f"set world size to {size}")
return int(size)
def get_default_args(func):
signature = inspect.signature(func)
return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
# We need this hacky function since torch doesn't consistently name or place the input tensor args
def get_tensor_position(func):
sig_params = inspect.signature(func).parameters
arg = None
# most colls
if 'tensor' in sig_params:
arg = 'tensor'
# reduce scatter coll
elif 'input_list' in sig_params:
arg = 'input_list'
# all_to_all and torch multiGPU colls
elif 'input_tensor_list' in sig_params:
arg = 'input_tensor_list'
if arg is None:
return -1
else:
return list(sig_params).index(arg)
def get_tensor_kwarg(func, kwargs):
func_args = get_default_args(func)
func_args.update(kwargs)
arg = None
if 'tensor' in func_args:
arg = func_args['tensor']
elif 'input_list' in func_args:
arg = func_args['input_list']
elif 'input_tensor_list' in func_args:
arg = func_args['input_tensor_list']
return arg
def get_msg_size_from_args(func, *args, **kwargs):
# 3 cases:
# - tensor arg is in args
# - tensor arg is in kwargs
# - tensor arg is not present (e.g. barrier)
tensor_arg_position = -1
tensor_arg = None
# check if tensor arg is in args
if len(args) > 0:
tensor_arg_position = get_tensor_position(func)
if tensor_arg_position > -1:
tensor_arg = args[get_tensor_position(func)]
# check if tensor arg is in kwargs
if tensor_arg is None and len(kwargs) > 0:
tensor_arg = get_tensor_kwarg(func, kwargs)
# if tensor arg is not present, no data is being transmitted
if tensor_arg is None:
return 0
else:
# Sum of tensor sizes for list colls such as torch's all_to_all
# NOTE: msg_size for list colls will not be the actual size transmitted by a given MPI/NCCL call within the coll op. Instead, it's the total amount of data transmitted.
if type(tensor_arg) is list:
return sum(x.element_size() * x.nelement() for x in tensor_arg)
else:
return tensor_arg.element_size() * tensor_arg.nelement()
def get_debug_log_name(func_args, debug):
if debug:
return func_args['log_name'] + ' | [Caller Func: ' + get_caller_func() + ']'
else:
return func_args['log_name'] | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/comm/utils.py | utils.py |
# DeepSpeed Team
from deepspeed import utils
from .utils import *
from .backend import *
from .comm import *
import torch
def is_torch_two():
TORCH_MAJOR = int(torch.__version__.split('.')[0])
if TORCH_MAJOR >= 2:
return True
else:
return False
def has_coalescing_manager():
has_c10d = hasattr(torch.distributed, 'distributed_c10d')
return has_c10d and hasattr(torch.distributed.distributed_c10d, '_coalescing_manager')
def has_all_reduce_coalesced():
return hasattr(torch.distributed, "all_reduce_coalesced")
def get_coalescing_manager(group, device, reqs):
if is_torch_two():
return torch.distributed.distributed_c10d._coalescing_manager(group, device=device, reqs=reqs)
else:
return torch.distributed.distributed_c10d._coalescing_manager(group, reqs)
class TorchBackend(Backend):
"""
A light-weight wrapper class for torch.distributed API.
Only a subset of functions are wrapped. Once the init_process_group
is initialized, standard torch.distributed.* can be used directly
so no need to wrap all the functions. We can keep adding wrappers as
needed.
"""
def __init__(self, backend, timeout, init_method, rank=-1, world_size=-1, name='torch'):
super(TorchBackend, self).__init__()
self.has_all_reduce_coalesced = has_all_reduce_coalesced()
self.has_coalescing_manager = has_coalescing_manager()
self.all_gather_function = self.get_all_gather_function()
self.reduce_scatter_function = self.get_reduce_scatter_function()
self.initialized = True
self.name = name
# Future functionality to support ds.initialize() on a single GPU
# The idea is to fake that dist backend is initialized even when
# it is not so we can run on a single GPU without doing any init_process_group
self.single_gpu_mode = True
self.init_process_group(backend, timeout, init_method, rank, world_size)
@classmethod
def get_all_gather_function(self):
if hasattr(torch.distributed, "all_gather_into_tensor"):
return torch.distributed.all_gather_into_tensor
elif hasattr(torch.distributed, "_all_gather_base"):
return torch.distributed._all_gather_base
return None
@classmethod
def get_reduce_scatter_function(self):
if hasattr(torch.distributed, "reduce_scatter_tensor"):
return torch.distributed.reduce_scatter_tensor
elif hasattr(torch.distributed, "_reduce_scatter_base"):
return torch.distributed._reduce_scatter_base
return None
def has_all_gather_into_tensor(self):
return self.all_gather_function is not None
def has_reduce_scatter_tensor(self):
return self.reduce_scatter_function is not None
def init_process_group(self, backend, timeout, init_method, rank, world_size):
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend,
timeout=timeout,
init_method=init_method,
rank=rank,
world_size=world_size)
self.using_mpi = torch.distributed.get_backend() == 'mpi'
def all_reduce(self, tensor, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False):
op = self._reduce_op(op)
return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=async_op)
def all_reduce_coalesced(self, tensors, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False):
""" proxy func to torch.distributed.all_reduce_coalesced,
which is included in PyTorch 1.13 and above
"""
if not self.has_all_reduce_coalesced:
raise RuntimeError(f"Current torch version does not have all_reduce_coalesced "
f"api (torch.__version__: {torch.__version__})")
op = self._reduce_op(op)
return torch.distributed.all_reduce_coalesced(tensors=tensors, op=op, group=group, async_op=async_op)
def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False):
return torch.distributed.reduce(tensor=tensor, dst=dst, op=self._reduce_op(op), group=group, async_op=async_op)
def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_op=False):
return torch.distributed.reduce_scatter(output=output,
input_list=input_list,
op=self._reduce_op(op),
group=group,
async_op=async_op)
def broadcast(self, tensor, src, group=None, async_op=False):
return torch.distributed.broadcast(tensor=tensor, src=src, group=group, async_op=async_op)
def all_gather(self, tensor_list, tensor, group=None, async_op=False):
return torch.distributed.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op)
def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_op=False):
if self.has_all_gather_into_tensor():
return self.all_gather_function(output_tensor=output_tensor,
input_tensor=input_tensor,
group=group,
async_op=async_op)
else:
utils.logger.warning("unable to find torch.distributed._all_gather_base. will fall back to "
"torch.distributed.all_gather which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
pass
def all_gather_coalesced(self, output_tensors, input_tensors, group=None, async_op=False):
""""""
assert len(output_tensors) == len(input_tensors), ""
if hasattr(torch.distributed.distributed_c10d, '_all_gather_base_coalesced'):
# customized PyTorch
return torch.distributed.distributed_c10d._all_gather_base_coalesced(output_tensors,
input_tensors,
group=group,
async_op=async_op)
elif has_coalescing_manager():
reqs = []
with get_coalescing_manager(group, input_tensors[0].device, reqs):
for output, input in zip(output_tensors, input_tensors):
handle = torch.distributed.distributed_c10d.all_gather_into_tensor(output,
input,
group=group,
async_op=True)
reqs.append(handle)
if async_op:
return reqs[-1]
else:
reqs[-1].wait()
def reduce_scatter_tensor(self, output_tensor, input_tensor, op=ReduceOp.SUM, group=None, async_op=False):
if self.has_reduce_scatter_tensor():
return self.reduce_scatter_function(output_tensor,
input_tensor,
op=self._reduce_op(op),
group=group,
async_op=async_op)
else:
utils.logger.warning("unable to find torch.distributed.reduce_scatter_tensor. will fall back to "
"torch.distributed.reduce_scatter which will result in suboptimal performance. "
"please consider upgrading your pytorch installation.")
pass
def all_to_all_single(self,
output,
input,
output_split_sizes=None,
input_split_sizes=None,
group=None,
async_op=False):
return torch.distributed.all_to_all_single(output=output,
input=input,
output_split_sizes=output_split_sizes,
input_split_sizes=input_split_sizes,
group=group,
async_op=async_op)
def send(self, tensor, dst, group=None, tag=0):
return torch.distributed.send(tensor=tensor, dst=dst, group=group, tag=tag)
def recv(self, tensor, src=None, group=None, tag=0):
return torch.distributed.recv(tensor=tensor, src=src, group=group, tag=tag)
def isend(self, tensor, dst, group=None, tag=0):
return torch.distributed.isend(tensor=tensor, dst=dst, group=group, tag=tag)
def irecv(self, tensor, src=None, group=None, tag=0):
return torch.distributed.irecv(tensor=tensor, src=src, group=group, tag=tag)
def gather(self, tensor, gather_list=None, dst=0, group=None, async_op=False):
return torch.distributed.gather(tensor=tensor,
gather_list=gather_list,
dst=dst,
group=group,
async_op=async_op)
def scatter(self, tensor, scatter_list=None, src=0, group=None, async_op=False):
return torch.distributed.scatter(tensor=tensor,
scatter_list=scatter_list,
src=src,
group=group,
async_op=async_op)
def barrier(self, group=torch.distributed.GroupMember.WORLD, async_op=False, device_ids=None):
if group is None:
group = torch.distributed.GroupMember.WORLD
return torch.distributed.barrier(group=group, async_op=async_op, device_ids=device_ids)
def monitored_barrier(self, group=torch.distributed.GroupMember.WORLD, timeout=None, wait_all_ranks=False):
if group is None:
group = torch.distributed.GroupMember.WORLD
return torch.distributed.monitored_barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks)
def get_rank(self, group=None):
return torch.distributed.get_rank(group=group)
def get_world_size(self, group=None):
return torch.distributed.get_world_size(group=group)
def is_initialized(self):
return torch.distributed.is_initialized()
def get_backend(self, group=None):
return torch.distributed.get_backend(group=group)
def new_group(self, ranks):
return torch.distributed.new_group(ranks)
def get_global_rank(self, group, group_rank):
if hasattr(torch.distributed.distributed_c10d, "get_global_rank"):
from torch.distributed.distributed_c10d import get_global_rank as _get_global_rank
else:
from torch.distributed.distributed_c10d import _get_global_rank
return _get_global_rank(group, group_rank)
def get_world_group(self):
return torch.distributed.group.WORLD
def destroy_process_group(self, group=None):
return torch.distributed.destroy_process_group(group=group)
def _reduce_op(self, op):
'''
Helper function. If the op provided is not a torch.dist.ReduceOp, convert it and return
'''
if not isinstance(op, torch.distributed.ReduceOp):
if op == ReduceOp.SUM:
op = torch.distributed.ReduceOp.SUM
elif op == ReduceOp.PRODUCT:
op = torch.distributed.ReduceOp.PRODUCT
elif op == ReduceOp.AVG:
op = torch.distributed.ReduceOp.AVG
elif op == ReduceOp.MIN:
op = torch.distributed.ReduceOp.MIN
elif op == ReduceOp.MAX:
op = torch.distributed.ReduceOp.MAX
elif op == ReduceOp.BAND:
op = torch.distributed.ReduceOp.BAND
elif op == ReduceOp.BOR:
op = torch.distributed.ReduceOp.BOR
elif op == ReduceOp.BXOR:
op = torch.distributed.ReduceOp.BXOR
return op
# This will become a light-weight wrapper around torch.distributed functions
# TODO: create some example to show how this wrapper can help profile communication
# TODO: make sure there is no performance regression with this approach
# TODO: explore monkey-patching if this does not work | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/comm/torch.py | torch.py |
# DeepSpeed Team
from .compress import get_module_name
from .constants import *
from .helper import recursive_getattr
from deepspeed.utils import logger
class compression_scheduler():
'''
Used to schedule different compression methods
'''
def __init__(self, model, compression_config):
self.model = model
self.compression_config = compression_config
self.make_init()
self.training_steps = 0
self.weight_quantization_enabled = False
self.verbose = {
WEIGHT_QUANTIZATION: False,
ACTIVATION_QUANTIZATION: False,
SPARSE_PRUNING: False,
HEAD_PRUNING: False,
ROW_PRUNING: False,
CHANNEL_PRUNING: False
}
def make_init(self):
self.different_compression_methods = {}
for method, method_content in self.compression_config.items():
if LAYER_REDUCTION in method:
continue
self.different_compression_methods[method] = {
TECHNIQUE_ENABLED: False,
SHARED_PARAMETERS: None,
DIFFERENT_GROUPS: []
}
exist_module_name = set()
shared_parameters = method_content[SHARED_PARAMETERS]
self.different_compression_methods[method][TECHNIQUE_ENABLED] = shared_parameters[TECHNIQUE_ENABLED]
self.different_compression_methods[method][SHARED_PARAMETERS] = shared_parameters
for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items():
module_name_list = []
for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]:
module_name, exist_module_name = get_module_name(group_name,
self.model,
key_word,
exist_module_name,
verbose=False)
module_name_list.extend(module_name)
if module_name_list:
self.different_compression_methods[method][DIFFERENT_GROUPS].append(
[group_name, module_name_list,
method_parameters.copy().pop('params')])
def check_weight_quantization(self):
# check weight quantization
wq = self.different_compression_methods[WEIGHT_QUANTIZATION]
if not wq[TECHNIQUE_ENABLED]:
return
else:
shared_parameters = wq[SHARED_PARAMETERS]
if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
for group_name, module_name_list, method_parameters in wq[DIFFERENT_GROUPS]:
for module_name in module_name_list:
module = recursive_getattr(self.model, module_name)
module.weight_quantization_enabled = True
if not self.verbose[WEIGHT_QUANTIZATION]:
logger.info(f'Weight quantization is enabled at step {self.training_steps}')
self.weight_quantization_enabled = True
self.verbose[WEIGHT_QUANTIZATION] = True
def check_activation_quantization(self):
# check activation quantization
aq = self.different_compression_methods[ACTIVATION_QUANTIZATION]
if not aq[TECHNIQUE_ENABLED]:
return
else:
shared_parameters = aq[SHARED_PARAMETERS]
if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
for group_name, module_name_list, method_parameters in aq[DIFFERENT_GROUPS]:
for module_name in module_name_list:
module = recursive_getattr(self.model, module_name)
module.activation_quantization_enabled = True
if not self.verbose[ACTIVATION_QUANTIZATION]:
logger.info(f'Activation quantization is enabled at step {self.training_steps}')
self.verbose[ACTIVATION_QUANTIZATION] = True
def check_sparse_pruning(self):
# check sparse pruning
sp = self.different_compression_methods[SPARSE_PRUNING]
if not sp[TECHNIQUE_ENABLED]:
return
else:
shared_parameters = sp[SHARED_PARAMETERS]
if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
for group_name, module_name_list, method_parameters in sp[DIFFERENT_GROUPS]:
for module_name in module_name_list:
module = recursive_getattr(self.model, module_name)
module.sparse_pruning_enabled = True
if not self.verbose[SPARSE_PRUNING]:
logger.info(f'Sparse pruning is enabled at step {self.training_steps}')
self.verbose[SPARSE_PRUNING] = True
def check_head_pruning(self):
# check head pruning
hp = self.different_compression_methods[HEAD_PRUNING]
if not hp[TECHNIQUE_ENABLED]:
return
else:
shared_parameters = hp[SHARED_PARAMETERS]
if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
for group_name, module_name_list, method_parameters in hp[DIFFERENT_GROUPS]:
for module_name in module_name_list:
module = recursive_getattr(self.model, module_name)
module.head_pruning_enabled = True
if not self.verbose[HEAD_PRUNING]:
logger.info(f'Head pruning is enabled at step {self.training_steps}')
self.verbose[HEAD_PRUNING] = True
def check_row_pruning(self):
# check row pruning
rp = self.different_compression_methods[ROW_PRUNING]
if not rp[TECHNIQUE_ENABLED]:
return
else:
shared_parameters = rp[SHARED_PARAMETERS]
if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
for group_name, module_name_list, method_parameters in rp[DIFFERENT_GROUPS]:
for module_name in module_name_list:
module = recursive_getattr(self.model, module_name)
module.row_pruning_enabled = True
if not self.verbose[ROW_PRUNING]:
logger.info(f'Row pruning is enabled at step {self.training_steps}')
self.verbose[ROW_PRUNING] = True
def check_channel_pruning(self):
# check channel pruning
cp = self.different_compression_methods[CHANNEL_PRUNING]
if not cp[TECHNIQUE_ENABLED]:
return
else:
shared_parameters = cp[SHARED_PARAMETERS]
if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]:
for group_name, module_name_list, method_parameters in cp[DIFFERENT_GROUPS]:
for module_name in module_name_list:
module = recursive_getattr(self.model, module_name)
module.channel_pruning_enabled = True
if not self.verbose[CHANNEL_PRUNING]:
logger.info(f'Channel pruning is enabled at step {self.training_steps}')
self.verbose[CHANNEL_PRUNING] = True
def check_all_modules(self):
# check all different compression methods we have
self.check_weight_quantization()
self.check_activation_quantization()
self.check_sparse_pruning()
self.check_head_pruning()
self.check_row_pruning()
self.check_channel_pruning()
def step(self, step_zero_check=False):
if not step_zero_check:
self.training_steps += 1
self.check_all_modules() | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/compression/scheduler.py | scheduler.py |
# DeepSpeed Team
import re
from .helper import compression_preparation, fix_compression, recursive_getattr, is_module_compressible
from .config import get_compression_config
from ..runtime.config_utils import dict_raise_error_on_duplicate_keys
from .constants import *
import os
import json
def check_deepspeed_config(config):
if isinstance(config, dict):
return config
elif os.path.exists(config):
return json.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
else:
raise ValueError(
f"Expected a string path to an existing deepspeed config, or a dictionary. Received: {config}")
def get_module_name(group_name, model, key_word, exist_module_name, mpu=None, verbose=True):
'''
get the associated module name from the model based on the key_word provided by users
'''
return_module_name = []
for name, module in model.named_modules():
module_check = is_module_compressible(module, mpu)
if re.search(key_word, name) is not None and module_check:
if name in exist_module_name and verbose:
# logger.warning
raise ValueError(
f"{name} is already added to compression, please check your config file for {group_name}.")
if name not in exist_module_name:
exist_module_name.add(name)
return_module_name.append(name)
return return_module_name, exist_module_name
def get_compress_methods(model, compress_methods, mpu=None):
# extract the compression module for each method in compress_methods
layer_added_compress_methods = []
for method, method_content in compress_methods.items():
if LAYER_REDUCTION in method:
continue
# for loop different methods, i.e., weight quantization, activation quantization etc
exist_module_name = set()
shared_parameters = method_content[SHARED_PARAMETERS] # get all the shared parameters
for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items():
# for loop different groups, i.e., weight quantization group 1, weight quantization group 2 etc
module_name_list = []
related_module_name_list = []
if method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]:
# this is used for head/row/channel pruning, if users provide the related module scope, we can shrink the layer dim for them
# otherwise we just mask those as zeros
for key_word, related_key_words in zip(method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE],
method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]):
module_name, exist_module_name = get_module_name(group_name,
model,
key_word,
exist_module_name,
mpu=mpu)
module_name_list.append(module_name)
tmp_related_module_name_list = []
for rkw in related_key_words:
# related key word can be a list, for instance the QKV for O matrix in Attention
module_name, _ = get_module_name(group_name, model, rkw, set(), mpu=mpu)
tmp_related_module_name_list.append(module_name)
related_module_name_list.append(tmp_related_module_name_list)
else:
for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]:
module_name, exist_module_name = get_module_name(group_name,
model,
key_word,
exist_module_name,
mpu=mpu)
module_name_list.append(module_name)
if module_name_list:
# combine shared parameters with each group
combined_method_parameters = {
**(method_parameters.copy().pop(DIFFERENT_GROUPS_PARAMETERS)),
**shared_parameters
}
compression_item = [module_name_list, related_module_name_list, {method: combined_method_parameters}]
layer_added_compress_methods.append(compression_item)
return layer_added_compress_methods
def init_compression(model, deepspeed_config, teacher_model=None, mpu=None):
"""
Compress a model: replace linear/conv2d layer with deepspeed compression-aware modules
Args:
model (`torch.nn.Module`)
The model to compress.
deepspeed_config (`DeepSpeedConfig`)
The path of ds_config
mpu
The mpu module for Row/Column parallelism
"""
compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config))
if hasattr(model, 'module'):
c_model = model.module
else:
c_model = model
# For layer reduction
if compress_methods[LAYER_REDUCTION][LAYER_REDUCTION_ENABLED]:
assert teacher_model is not None, "Teacher model is required for layer reduction"
student_initialization(c_model, teacher_model, deepspeed_config)
layer_added_compress_methods = get_compress_methods(c_model, compress_methods, mpu=mpu)
compression_preparation(c_model, layer_added_compress_methods, mpu)
return model
def redundancy_clean(model, deepspeed_config, mpu=None):
"""
Remove the redundancy of a model
Args:
model (`torch.nn.Module`)
The model to compress.
deepspeed_config (`DeepSpeedConfig`)
The path of ds_config
mpu
The mpu module for Row/Column parallelism
"""
compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config))
if hasattr(model, 'module'):
c_model = model.module
else:
c_model = model
layer_added_compress_methods_tmp = get_compress_methods(c_model, compress_methods, mpu=mpu)
# sort methods
order_list = [
WEIGHT_QUANTIZATION, SPARSE_PRUNING, ROW_PRUNING, HEAD_PRUNING, CHANNEL_PRUNING, ACTIVATION_QUANTIZATION
]
layer_added_compress_methods = sorted(layer_added_compress_methods_tmp,
key=lambda x: order_list.index(list(x[2].keys())[0]))
for module_name_lists, related_module_name_lists, compression_technique in layer_added_compress_methods:
stored_mask = []
need_mask = True if related_module_name_lists else False
for i, mnl in enumerate(module_name_lists):
for module_name in mnl:
mask = fix_compression(c_model, module_name, compression_technique, dim_reduction=need_mask)
if need_mask:
stored_mask.append(mask)
if need_mask:
for rmnl in related_module_name_lists[i]:
for j, module_name in enumerate(rmnl):
mask = fix_compression(c_model,
module_name,
compression_technique,
mask=stored_mask[j],
dim_reduction=True)
return model
def student_initialization(student_model, teacher_model, deepspeed_config):
'''
Given a student model and a teacher model, select the
Args:
student_model (`torch.nn.Module`)
The model we will update weight
teacher_model (`torch.nn.Module`)
The model guide the student to learn
deepspeed_config (`DeepSpeedConfig`)
The path of ds_config
'''
config = get_compression_config(check_deepspeed_config(deepspeed_config))
compress_methods = config[LAYER_REDUCTION]
module_name_prefix = compress_methods[MODULE_NAME_PREFIX]
teacher_layer = compress_methods[TEACHER_LAYER]
student_layer = [i for i in range(len(teacher_layer))]
other_module_name = compress_methods[OTHER_MODULE_NAME]
'''
name_prefix (`str`)
The prefix name before the layer #.
Example 1: bert.encoder.layer, for BERT_base model's prefix name
Example 2: transformer.h, for GPT-2 hugging face prefix name
teacher_layer (`list of intergers`)
The layer of teacher will be used for student's reinitializedion
Example 1: [1,3,5,7,9], means we want to matches the 2nd/4th/6th/8th/10th layer of teacher to the first 5 layers of student
student_layer (`list` or None)
The layer of student need to be re-intiialized
Example 1: None, means we want to reinitialize all the layers
Example 1: [0,1,2,3,4], means we want to reinitialize the first 5 layers
other_module_name (`list of string`)
The modules will be used for student's reinitializedion
Example 1: ['bert.pooler', 'bert.embeddings', 'classifier'], means we want to apply the weight in teacher's embedding/pooler/classier module to the student
Example 2: ['transformer.w', 'transformer.ln_f', 'lm_head'], means we want to apply the weight in teacher's embeddingn layers module to the student
Note that teacher_layer should matches student layer
'''
assert len(student_layer) == len(teacher_layer)
for s_name, t_name in zip(student_layer, teacher_layer):
s_module = recursive_getattr(student_model, module_name_prefix + '.' + str(s_name))
t_module = recursive_getattr(teacher_model, module_name_prefix + '.' + str(t_name))
for s_param, t_param in zip(s_module.parameters(), t_module.parameters()):
s_param.data.copy_(t_param.data)
for name in other_module_name:
s_module = recursive_getattr(student_model, name)
t_module = recursive_getattr(teacher_model, name)
print(name)
for s_param, t_param in zip(s_module.parameters(), t_module.parameters()):
s_param.data.copy_(t_param.data) | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/compression/compress.py | compress.py |
# DeepSpeed Team
#########################################
# Compression Methods
# It has several sub-components
# #########################################
COMPRESSION_TRAINING = "compression_training"
SHARED_PARAMETERS = "shared_parameters"
DIFFERENT_GROUPS = "different_groups"
TECHNIQUE_ENABLED = "enabled"
TECHNIQUE_SCHEDULE_OFFSET = "schedule_offset"
DIFFERENT_GROUPS_PARAMETERS = "params"
DIFFERENT_GROUPS_MODULE_SCOPE = "modules"
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT = "*"
DIFFERENT_GROUPS_RELATED_MODULE_SCOPE = "related_modules"
DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT = None
# COMPRESSION_TRAINING_ENABLED = "enabled"
# COMPRESSION_TRAINING_ENABLED_DEFAULT = False
####
# Layer Reduction
####
LAYER_REDUCTION = "layer_reduction"
LAYER_REDUCTION_ENABLED = "enabled"
LAYER_REDUCTION_ENABLED_DEFAULT = False
KEEP_NUMBER_LAYER = "keep_number_layer"
MODULE_NAME_PREFIX = "module_name_prefix"
TEACHER_LAYER = "teacher_layer"
OTHER_MODULE_NAME = "other_module_name"
####
# Weight Quantzation
####
WEIGHT_QUANTIZATION = "weight_quantization"
WEIGHT_QUANTIZATION_PERIOD = "quantization_period"
WEIGHT_QUANTIZATION_PERIOD_DEFAULT = 1
WEIGHT_QUANTIZE_IN_FORWARD_ENABLED = "quantize_weight_in_forward"
WEIGHT_QUANTIZE_IN_FORWARD_ENABLED_DEFAULT = False
WEIGHT_QUANTIZE_ENABLED = TECHNIQUE_ENABLED
WEIGHT_QUANTIZE_ENABLED_DEFAULT = False
WEIGHT_QUANTIZE_KERNEL = "quantizer_kernel"
WEIGHT_QUANTIZE_KERNEL_DEFAULT = False
WEIGHT_QUANTIZE_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT = 0
WEIGHT_QUANTIZE_GROUPS = "quantize_groups"
WEIGHT_QUANTIZE_GROUPS_DEFAULT = 1
WEIGHT_QUANTIZE_VERBOSE = "quantize_verbose"
WEIGHT_QUANTIZE_VERBOSE_DEFAULT = False
WEIGHT_QUANTIZE_TYPE = "quantization_type"
WEIGHT_QUANTIZE_TYPE_DEFAULT = "symmetric"
WEIGHT_QUANTIZE_SYMMETRIC = "symmetric"
WEIGHT_QUANTIZE_ASYMMETRIC = "asymmetric"
WEIGHT_QUANTIZE_ROUNDING = "rounding"
WEIGHT_QUANTIZE_ROUNDING_DEFAULT = "nearest"
WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING = "stochastic"
WEIGHT_QUANTIZE_NEAREST_ROUNDING = "nearest"
# maybe deleted for a cleaner version
WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE = "fp16_mixed_quantize"
WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED = "enabled"
WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT = False
WEIGHT_QUANTIZE_CHANGE_RATIO = "quantize_change_ratio"
WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT = 0.001
WEIGHT_QUANTIZE_START_BITS = "start_bits"
WEIGHT_QUANTIZE_TARGET_BITS = "target_bits"
###
# Activation Quantization
###
ACTIVATION_QUANTIZATION = "activation_quantization"
ACTIVATION_QUANTIZATION_ENABLED = TECHNIQUE_ENABLED
ACTIVATION_QUANTIZATION_ENABLED_DEFAULT = False
ACTIVATION_QUANTIZE_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT = 1000
ACTIVATION_QUANTIZE_TYPE = "quantization_type"
ACTIVATION_QUANTIZE_TYPE_DEFAULT = "symmetric"
ACTIVATION_QUANTIZE_SYMMETRIC = "symmetric"
ACTIVATION_QUANTIZE_ASYMMETRIC = "asymmetric"
ACTIVATION_QUANTIZE_RANGE = 'range_calibration'
ACTIVATION_QUANTIZE_RANGE_DEFAULT = 'dynamic'
ACTIVATION_QUANTIZE_RANGE_STATIC = 'static'
ACTIVATION_QUANTIZE_RANGE_DYNAMIC = 'dynamic'
ACTIVATION_QUANTIZE_BITS = "bits"
###
# Sparse Pruning
###
SPARSE_PRUNING = "sparse_pruning"
SPARSE_PRUNING_ENABLED = TECHNIQUE_ENABLED
SPARSE_PRUNING_ENABLED_DEFAULT = False
SPARSE_PRUNING_METHOD = "method"
SPARSE_PRUNING_METHOD_DEFAULT = "l1"
SPARSE_PRUNING_METHOD_L1 = "l1"
SPARSE_PRUNING_METHOD_TOPK = "topk"
SPARSE_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
SPARSE_PRUNING_DENSE_RATIO = "dense_ratio"
###
# Row Pruning
###
ROW_PRUNING = "row_pruning"
ROW_PRUNING_ENABLED = TECHNIQUE_ENABLED
ROW_PRUNING_ENABLED_DEFAULT = False
ROW_PRUNING_METHOD = "method"
ROW_PRUNING_METHOD_DEFAULT = "l1"
ROW_PRUNING_METHOD_L1 = "l1"
ROW_PRUNING_METHOD_TOPK = "topk"
ROW_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
ROW_PRUNING_DENSE_RATIO = "dense_ratio"
###
# Head Pruning
###
HEAD_PRUNING = "head_pruning"
HEAD_PRUNING_ENABLED = TECHNIQUE_ENABLED
HEAD_PRUNING_ENABLED_DEFAULT = False
HEAD_PRUNING_METHOD = "method"
HEAD_PRUNING_METHOD_DEFAULT = "topk"
HEAD_PRUNING_METHOD_L1 = "l1"
HEAD_PRUNING_METHOD_TOPK = "topk"
HEAD_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
HEAD_PRUNING_NUM_HEADS = "num_heads"
HEAD_PRUNING_DENSE_RATIO = "dense_ratio"
###
# Channel Pruning
###
CHANNEL_PRUNING = "channel_pruning"
CHANNEL_PRUNING_ENABLED = TECHNIQUE_ENABLED
CHANNEL_PRUNING_ENABLED_DEFAULT = False
CHANNEL_PRUNING_METHOD = "method"
CHANNEL_PRUNING_METHOD_DEFAULT = "l1"
CHANNEL_PRUNING_METHOD_L1 = "l1"
CHANNEL_PRUNING_METHOD_TOPK = "topk"
CHANNEL_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET
CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000
CHANNEL_PRUNING_DENSE_RATIO = "dense_ratio" | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/compression/constants.py | constants.py |
# DeepSpeed Team
import torch
from .basic_layer import Embedding_Compress, LinearLayer_Compress, Conv2dLayer_Compress, BNLayer_Compress, ColumnParallelLinear_Compress, RowParallelLinear_Compress
from .constants import *
def recursive_getattr(model, module_name):
"""
Recursively get the attribute of a module.
Args:
model (`torch.nn.Module`)
The model to get the attribute from.
module_name (`str`)
The name of the module to get the attribute from.
"""
split_list = module_name.split('.')
output = model
for name in split_list:
output = getattr(output, name)
return output
def recursive_setattr(model, module_name, module):
"""
Recursively set the attribute of a module.
Args:
model (`torch.nn.Module`)
The model to set the attribute in.
module_name (`str`)
The name of the module to set the attribute in.
module (`torch.nn.Module`)
The module to set the attribute to.
"""
split_list = module_name.split('.')
output = model
for name in split_list[:-1]:
output = getattr(output, name)
output.__setattr__(split_list[-1], module)
def module_replacement(model, module_name, compression_technique=None, mpu=None):
"""
Replace a module with a new module.
Args:
model (`torch.nn.Module`)
The model to replace the module in.
module_name (`str`)
The name of the module to replace.
compression_technique (`str`)
The compression technique to use for the new module.
"""
# Get the old module
old_module = recursive_getattr(model, module_name)
need_bias = False
if hasattr(old_module, 'bias') and old_module.bias is not None:
need_bias = True
# Initialize the new module
if isinstance(old_module, LinearLayer_Compress) or isinstance(old_module, torch.nn.Linear):
if isinstance(old_module, LinearLayer_Compress):
new_module = old_module
else:
new_module = LinearLayer_Compress(old_module.in_features, old_module.out_features,
bias=need_bias).to(device=old_module.weight.device,
dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
elif isinstance(old_module, Conv2dLayer_Compress) or isinstance(old_module, torch.nn.Conv2d):
if isinstance(old_module, Conv2dLayer_Compress):
new_module = old_module
else:
new_module = Conv2dLayer_Compress(old_module.in_channels, old_module.out_channels, old_module.kernel_size, old_module.stride, old_module.padding, \
old_module.dilation, old_module.groups, need_bias, \
old_module.padding_mode).to(device=old_module.weight.device, dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
elif isinstance(old_module, torch.nn.BatchNorm2d):
new_module = BNLayer_Compress(old_module.num_features, old_module.eps, old_module.momentum, old_module.affine,
old_module.track_running_stats).to(old_module.weight.device,
old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
new_module.running_mean.data = old_module.running_mean.data
new_module.running_var.data = old_module.running_var.data
elif isinstance(old_module, Embedding_Compress) or isinstance(old_module, torch.nn.Embedding):
if isinstance(old_module, Embedding_Compress):
new_module = old_module
else:
new_module = Embedding_Compress(old_module.num_embeddings, old_module.embedding_dim, old_module.padding_idx, old_module.max_norm, old_module.norm_type, \
old_module.scale_grad_by_freq, old_module.sparse).to(device=old_module.weight.device, dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
elif mpu is not None and (isinstance(old_module, ColumnParallelLinear_Compress)
or isinstance(old_module, mpu.ColumnParallelLinear)):
if isinstance(old_module, ColumnParallelLinear_Compress):
new_module = old_module
else:
new_module = ColumnParallelLinear_Compress(mpu,
old_module.input_size,
old_module.output_size,
gather_output=old_module.gather_output,
skip_bias_add=old_module.skip_bias_add,
bias=need_bias).to(device=old_module.weight.device,
dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
elif mpu is not None and (isinstance(old_module, RowParallelLinear_Compress)
or isinstance(old_module, mpu.RowParallelLinear)):
if isinstance(old_module, RowParallelLinear_Compress):
new_module = old_module
else:
new_module = RowParallelLinear_Compress(mpu,
old_module.input_size,
old_module.output_size,
input_is_parallel=old_module.input_is_parallel,
skip_bias_add=old_module.skip_bias_add,
bias=need_bias).to(device=old_module.weight.device,
dtype=old_module.weight.dtype)
new_module.weight.data = old_module.weight.data
if need_bias:
new_module.bias.data = old_module.bias.data
else:
new_module = None
if compression_technique is not None:
for k, v in compression_technique.items():
if k == SPARSE_PRUNING:
if v[SPARSE_PRUNING_ENABLED]:
new_module.enable_sparse_pruning(v[SPARSE_PRUNING_DENSE_RATIO], v[SPARSE_PRUNING_METHOD])
elif k == ROW_PRUNING:
if v[ROW_PRUNING_ENABLED]:
new_module.enable_row_pruning(v[ROW_PRUNING_DENSE_RATIO], v[ROW_PRUNING_METHOD])
elif k == HEAD_PRUNING:
if v[HEAD_PRUNING_ENABLED]:
new_module.enable_head_pruning(v[HEAD_PRUNING_DENSE_RATIO], v[HEAD_PRUNING_METHOD],
v[HEAD_PRUNING_NUM_HEADS])
elif k == ACTIVATION_QUANTIZATION:
if v[ACTIVATION_QUANTIZATION_ENABLED]:
new_module.enable_activation_quantization(v[ACTIVATION_QUANTIZE_BITS], v[ACTIVATION_QUANTIZE_TYPE],
v[ACTIVATION_QUANTIZE_RANGE])
elif k == WEIGHT_QUANTIZATION:
if v[WEIGHT_QUANTIZE_ENABLED]:
new_module.enable_weight_quantization(v[WEIGHT_QUANTIZE_START_BITS],
v[WEIGHT_QUANTIZE_TARGET_BITS],
v[WEIGHT_QUANTIZATION_PERIOD],
v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED],
v[WEIGHT_QUANTIZE_TYPE], v[WEIGHT_QUANTIZE_GROUPS])
elif k == CHANNEL_PRUNING:
if v[CHANNEL_PRUNING_ENABLED]:
new_module.enable_channel_pruning(v[CHANNEL_PRUNING_DENSE_RATIO], v[CHANNEL_PRUNING_METHOD])
else:
raise NotImplementedError('Compression technique {} is not implemented'.format(k))
# Replace the old module with the new one
recursive_setattr(model, module_name, new_module)
def is_module_compressible(module, mpu=None):
ret = isinstance(module, torch.nn.Linear) or \
isinstance(module, torch.nn.Conv2d) or \
isinstance(module, torch.nn.Embedding) or \
isinstance(module, torch.nn.BatchNorm2d)
if mpu is not None:
ret = ret or isinstance(module, mpu.RowParallelLinear) or isinstance(module, mpu.ColumnParallelLinear)
return ret
def compression_preparation(model, compression_techinique_list, mpu):
"""
Prepare the compression techniques of a model.
Args:
model (`torch.nn.Module`)
The model to prepare the compression techniques of.
compression_techinique_list (`list`)
The list of compression techniques to prepare the model to.
list[]
"""
# Here we first replace all module with our linear wrapper
for module_name, module in model.named_modules():
if is_module_compressible(module, mpu):
module_replacement(model, module_name, mpu=mpu)
for module_name_lists, _, compression_technique in compression_techinique_list:
for mnl in module_name_lists:
for module_name in mnl:
module_replacement(model, module_name, compression_technique)
return model
def fix_compression(model, module_name, compression_technique, mask=None, dim_reduction=False):
"""
Fix the compression technique of a module.
Args:
model (`torch.nn.Module`)
The model to fix the compression technique of.
module_name (`str`)
The name of the module to fix the compression technique of.
compression_technique (`str`)
The compression technique to fix the module to.
"""
# Here we can make things much simpler by just replacing the module
module = recursive_getattr(model, module_name)
for k, v in compression_technique.items():
if k == WEIGHT_QUANTIZATION and v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] and v[WEIGHT_QUANTIZE_ENABLED]:
return module.fix_weight_quantization()
elif k == SPARSE_PRUNING and v[SPARSE_PRUNING_ENABLED]:
return module.fix_sparse_pruning_helper()
elif k == ROW_PRUNING and (v[ROW_PRUNING_ENABLED] or mask is not None):
return module.fix_row_col_pruning_helper(mask, dim_reduction=dim_reduction)
elif k == HEAD_PRUNING and (v[HEAD_PRUNING_ENABLED] or mask is not None):
return module.fix_head_pruning_helper(mask, v[HEAD_PRUNING_NUM_HEADS], dim_reduction=dim_reduction)
elif k == CHANNEL_PRUNING and (v[CHANNEL_PRUNING_ENABLED] or mask is not None):
return module.fix_channel_pruning_helper(mask, dim_reduction=dim_reduction)
def convert_conv1d_to_linear(model, convert_type):
'''
This is a help function to convert conv1d to linear (e.g., convert GPT2 from HF)
'''
if hasattr(model, 'module'):
c_model = model.module
else:
c_model = model
for name, module in c_model.named_modules():
if isinstance(module, convert_type):
old_module = recursive_getattr(c_model, name)
new_module = torch.nn.Linear(old_module.weight.data.size(0),
old_module.weight.data.size(1),
bias=True if old_module.bias is not None else False)
new_module.weight.data = old_module.weight.data.t().contiguous()
if new_module.bias is not None:
new_module.bias.data = old_module.bias.data.view(-1)
recursive_setattr(c_model, name, new_module)
return model | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/compression/helper.py | helper.py |
# DeepSpeed Team
from .constants import *
import copy
from ..runtime.config_utils import get_scalar_param
def get_compression_config(param_dict):
#
output = {}
if COMPRESSION_TRAINING not in param_dict.keys():
param_dict[COMPRESSION_TRAINING] = {}
sub_param_dict = param_dict[COMPRESSION_TRAINING]
output[WEIGHT_QUANTIZATION] = get_weight_quantization(sub_param_dict)
output[ACTIVATION_QUANTIZATION] = get_activation_quantization(sub_param_dict)
output[SPARSE_PRUNING] = get_sparse_pruning(sub_param_dict)
output[ROW_PRUNING] = get_row_pruning(sub_param_dict)
output[HEAD_PRUNING] = get_head_pruning(sub_param_dict)
output[CHANNEL_PRUNING] = get_channel_pruning(sub_param_dict)
output[LAYER_REDUCTION] = get_layer_reduction(sub_param_dict)
return output
def get_layer_reduction(param_dict):
output = {}
output[LAYER_REDUCTION_ENABLED] = LAYER_REDUCTION_ENABLED_DEFAULT
if get_layer_reduction_enabled(param_dict):
output[LAYER_REDUCTION_ENABLED] = get_layer_reduction_enabled(param_dict)
for key, val in get_layer_reduction_params(param_dict).items():
output[key] = val
return output
def get_layer_reduction_enabled(param_dict):
if LAYER_REDUCTION in param_dict.keys():
return get_scalar_param(param_dict[LAYER_REDUCTION], LAYER_REDUCTION_ENABLED, LAYER_REDUCTION_ENABLED_DEFAULT)
else:
return False
def get_layer_reduction_params(param_dict):
if LAYER_REDUCTION in param_dict.keys():
layer_reduction_params = copy.copy(param_dict[LAYER_REDUCTION])
layer_reduction_params.pop(LAYER_REDUCTION_ENABLED)
return layer_reduction_params
else:
return False
def get_quantize_enabled(param_dict):
if COMPRESSION_TRAINING not in param_dict.keys():
return False
sub_param_dict = param_dict[COMPRESSION_TRAINING]
output = get_weight_quantization_shared_parameters(sub_param_dict)
return output[WEIGHT_QUANTIZE_ENABLED]
def get_weight_quantization(param_dict):
output = {}
if WEIGHT_QUANTIZATION not in param_dict.keys():
param_dict[WEIGHT_QUANTIZATION] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
sub_param_dict = param_dict[WEIGHT_QUANTIZATION]
# shared parameters
output[SHARED_PARAMETERS] = get_weight_quantization_shared_parameters(sub_param_dict)
# each sub-groups
if output[SHARED_PARAMETERS][WEIGHT_QUANTIZE_ENABLED]:
assert DIFFERENT_GROUPS in sub_param_dict.keys(
), f"Weigh Quantization is enabled, {DIFFERENT_GROUPS} must be specified"
output[DIFFERENT_GROUPS] = get_weight_quantization_different_groups(sub_param_dict)
return output
def get_weight_quantization_shared_parameters(param_dict):
output = {}
if SHARED_PARAMETERS in param_dict.keys():
sub_param_dict = param_dict[SHARED_PARAMETERS]
output[WEIGHT_QUANTIZE_ENABLED] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_ENABLED,
WEIGHT_QUANTIZE_ENABLED_DEFAULT)
output[WEIGHT_QUANTIZE_KERNEL] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_KERNEL,
WEIGHT_QUANTIZE_KERNEL_DEFAULT)
output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_SCHEDULE_OFFSET,
WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT)
output[WEIGHT_QUANTIZE_GROUPS] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_GROUPS,
WEIGHT_QUANTIZE_GROUPS_DEFAULT)
output[WEIGHT_QUANTIZE_VERBOSE] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_VERBOSE,
WEIGHT_QUANTIZE_VERBOSE_DEFAULT)
output[WEIGHT_QUANTIZE_TYPE] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_TYPE,
WEIGHT_QUANTIZE_TYPE_DEFAULT)
output[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] = get_scalar_param(sub_param_dict,
WEIGHT_QUANTIZE_IN_FORWARD_ENABLED,
WEIGHT_QUANTIZE_IN_FORWARD_ENABLED_DEFAULT)
assert output[WEIGHT_QUANTIZE_TYPE] in [
WEIGHT_QUANTIZE_SYMMETRIC, WEIGHT_QUANTIZE_ASYMMETRIC
], f"Invalid weight quantize type. Supported types: [{WEIGHT_QUANTIZE_SYMMETRIC}, {WEIGHT_QUANTIZE_ASYMMETRIC}]"
output[WEIGHT_QUANTIZE_ROUNDING] = get_scalar_param(sub_param_dict, WEIGHT_QUANTIZE_ROUNDING,
WEIGHT_QUANTIZE_ROUNDING_DEFAULT)
assert output[WEIGHT_QUANTIZE_ROUNDING] in [
WEIGHT_QUANTIZE_NEAREST_ROUNDING, WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING
], f"Invalid weight quantize rounding. Supported types: [{WEIGHT_QUANTIZE_NEAREST_ROUNDING}, {WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING}]"
if WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE in sub_param_dict.keys():
output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = get_scalar_param(
sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED,
WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT)
output[WEIGHT_QUANTIZE_CHANGE_RATIO] = get_scalar_param(
sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], WEIGHT_QUANTIZE_CHANGE_RATIO,
WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT)
else:
output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT
output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT
else:
output[WEIGHT_QUANTIZE_ENABLED] = WEIGHT_QUANTIZE_ENABLED_DEFAULT
output[WEIGHT_QUANTIZE_KERNEL] = WEIGHT_QUANTIZE_KERNEL_DEFAULT
output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT
output[WEIGHT_QUANTIZE_GROUPS] = WEIGHT_QUANTIZE_GROUPS_DEFAULT
output[WEIGHT_QUANTIZE_VERBOSE] = WEIGHT_QUANTIZE_VERBOSE_DEFAULT
output[WEIGHT_QUANTIZE_TYPE] = WEIGHT_QUANTIZE_TYPE_DEFAULT
output[WEIGHT_QUANTIZE_ROUNDING] = WEIGHT_QUANTIZE_ROUNDING_DEFAULT
output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT
output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT
return output
def get_weight_quantization_different_groups(param_dict):
output = {}
sub_param_dict = param_dict[DIFFERENT_GROUPS]
def get_params(name, group_dict):
assert WEIGHT_QUANTIZE_START_BITS in group_dict.keys(
), f"{WEIGHT_QUANTIZE_START_BITS} must be specified for weight quantization group {name}"
assert WEIGHT_QUANTIZE_TARGET_BITS in group_dict.keys(
), f"{WEIGHT_QUANTIZE_TARGET_BITS} must be specified for weight quantization group {name}"
group_dict[WEIGHT_QUANTIZATION_PERIOD] = get_scalar_param(group_dict, WEIGHT_QUANTIZATION_PERIOD,
WEIGHT_QUANTIZATION_PERIOD_DEFAULT)
return group_dict
for k, v in sub_param_dict.items():
output[k] = {}
output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
return output
def get_activation_quantization(param_dict):
output = {}
if ACTIVATION_QUANTIZATION not in param_dict.keys():
param_dict[ACTIVATION_QUANTIZATION] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
sub_param_dict = param_dict[ACTIVATION_QUANTIZATION]
# shared parameters
output[SHARED_PARAMETERS] = get_activation_quantization_shared_parameters(sub_param_dict)
# each sub-groups
if output[SHARED_PARAMETERS][ACTIVATION_QUANTIZATION_ENABLED]:
assert DIFFERENT_GROUPS in sub_param_dict.keys(
), f"Activation Quantization is enabled, {DIFFERENT_GROUPS} must be specified"
output[DIFFERENT_GROUPS] = get_activation_quantization_different_groups(sub_param_dict)
return output
def get_activation_quantization_shared_parameters(param_dict):
output = {}
if SHARED_PARAMETERS in param_dict.keys():
sub_param_dict = param_dict[SHARED_PARAMETERS]
output[ACTIVATION_QUANTIZATION_ENABLED] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZATION_ENABLED,
ACTIVATION_QUANTIZATION_ENABLED_DEFAULT)
output[ACTIVATION_QUANTIZE_TYPE] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZE_TYPE,
ACTIVATION_QUANTIZE_TYPE_DEFAULT)
assert output[ACTIVATION_QUANTIZE_TYPE] in [
ACTIVATION_QUANTIZE_SYMMETRIC, ACTIVATION_QUANTIZE_ASYMMETRIC
], f"Invalid activation quantize type. Supported types: [{ACTIVATION_QUANTIZE_SYMMETRIC}, {ACTIVATION_QUANTIZE_ASYMMETRIC}]"
output[ACTIVATION_QUANTIZE_RANGE] = get_scalar_param(sub_param_dict, ACTIVATION_QUANTIZE_RANGE,
ACTIVATION_QUANTIZE_RANGE_DEFAULT)
assert output[ACTIVATION_QUANTIZE_RANGE] in [
ACTIVATION_QUANTIZE_RANGE_DYNAMIC, ACTIVATION_QUANTIZE_RANGE_STATIC
], f"Invalid activation quantize range calibration. Supported types: [{ACTIVATION_QUANTIZE_RANGE_DYNAMIC}, {ACTIVATION_QUANTIZE_RANGE_STATIC}]"
output[ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict,
ACTIVATION_QUANTIZE_SCHEDULE_OFFSET,
ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT)
else:
output[ACTIVATION_QUANTIZATION_ENABLED] = ACTIVATION_QUANTIZATION_ENABLED_DEFAULT
output[ACTIVATION_QUANTIZE_TYPE] = ACTIVATION_QUANTIZE_TYPE_DEFAULT
output[ACTIVATION_QUANTIZE_RANGE] = ACTIVATION_QUANTIZE_RANGE_DEFAULT
output[ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT
return output
def get_activation_quantization_different_groups(param_dict):
output = {}
sub_param_dict = param_dict[DIFFERENT_GROUPS]
def get_params(name, group_dict):
assert ACTIVATION_QUANTIZE_BITS in group_dict.keys(
), f"{ACTIVATION_QUANTIZE_BITS} must be specified for activation quantization group {name}"
return group_dict
for k, v in sub_param_dict.items():
output[k] = {}
output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
return output
def get_sparse_pruning(param_dict):
output = {}
if SPARSE_PRUNING not in param_dict.keys():
param_dict[SPARSE_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
sub_param_dict = param_dict[SPARSE_PRUNING]
# shared parameters
output[SHARED_PARAMETERS] = get_sparse_pruning_shared_parameters(sub_param_dict)
# each sub-groups
if output[SHARED_PARAMETERS][SPARSE_PRUNING_ENABLED]:
assert DIFFERENT_GROUPS in sub_param_dict.keys(
), f"Sparse Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
output[DIFFERENT_GROUPS] = get_sparse_pruning_different_groups(sub_param_dict)
return output
def get_sparse_pruning_shared_parameters(param_dict):
output = {}
if SHARED_PARAMETERS in param_dict.keys():
sub_param_dict = param_dict[SHARED_PARAMETERS]
output[SPARSE_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_ENABLED,
SPARSE_PRUNING_ENABLED_DEFAULT)
output[SPARSE_PRUNING_METHOD] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_METHOD,
SPARSE_PRUNING_METHOD_DEFAULT)
assert output[SPARSE_PRUNING_METHOD] in [
SPARSE_PRUNING_METHOD_L1, SPARSE_PRUNING_METHOD_TOPK
], f"Invalid sparse pruning method. Supported types: [{SPARSE_PRUNING_METHOD_L1}, {SPARSE_PRUNING_METHOD_TOPK}]"
output[SPARSE_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, SPARSE_PRUNING_SCHEDULE_OFFSET,
SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT)
else:
output[SPARSE_PRUNING_ENABLED] = SPARSE_PRUNING_ENABLED_DEFAULT
output[SPARSE_PRUNING_METHOD] = SPARSE_PRUNING_METHOD_DEFAULT
output[SPARSE_PRUNING_SCHEDULE_OFFSET] = SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT
return output
def get_sparse_pruning_different_groups(param_dict):
output = {}
sub_param_dict = param_dict[DIFFERENT_GROUPS]
def get_params(name, group_dict):
assert SPARSE_PRUNING_DENSE_RATIO in group_dict.keys(
), f"{SPARSE_PRUNING_DENSE_RATIO} must be specified for sparse pruning group {name}"
return group_dict
for k, v in sub_param_dict.items():
output[k] = {}
output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
return output
def get_row_pruning(param_dict):
output = {}
if ROW_PRUNING not in param_dict.keys():
param_dict[ROW_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
sub_param_dict = param_dict[ROW_PRUNING]
# shared parameters
output[SHARED_PARAMETERS] = get_row_pruning_shared_parameters(sub_param_dict)
# each sub-groups
if output[SHARED_PARAMETERS][ROW_PRUNING_ENABLED]:
assert DIFFERENT_GROUPS in sub_param_dict.keys(
), f"Row Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
output[DIFFERENT_GROUPS] = get_row_pruning_different_groups(sub_param_dict)
return output
def get_row_pruning_shared_parameters(param_dict):
output = {}
if SHARED_PARAMETERS in param_dict.keys():
sub_param_dict = param_dict[SHARED_PARAMETERS]
output[ROW_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, ROW_PRUNING_ENABLED,
ROW_PRUNING_ENABLED_DEFAULT)
output[ROW_PRUNING_METHOD] = get_scalar_param(sub_param_dict, ROW_PRUNING_METHOD, ROW_PRUNING_METHOD_DEFAULT)
assert output[ROW_PRUNING_METHOD] in [
ROW_PRUNING_METHOD_L1, ROW_PRUNING_METHOD_TOPK
], f"Invalid row pruning method. Supported types: [{ROW_PRUNING_METHOD_L1}, {ROW_PRUNING_METHOD_TOPK}]"
output[ROW_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, ROW_PRUNING_SCHEDULE_OFFSET,
ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT)
else:
output[ROW_PRUNING_ENABLED] = ROW_PRUNING_ENABLED_DEFAULT
output[ROW_PRUNING_METHOD] = ROW_PRUNING_METHOD_DEFAULT
output[ROW_PRUNING_SCHEDULE_OFFSET] = ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT
return output
def get_row_pruning_different_groups(param_dict):
output = {}
sub_param_dict = param_dict[DIFFERENT_GROUPS]
def get_params(name, group_dict):
assert ROW_PRUNING_DENSE_RATIO in group_dict.keys(
), f"{ROW_PRUNING_DENSE_RATIO} must be specified for row pruning group {name}"
return group_dict
for k, v in sub_param_dict.items():
output[k] = {}
output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
return output
def get_head_pruning(param_dict):
output = {}
if HEAD_PRUNING not in param_dict.keys():
param_dict[HEAD_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
sub_param_dict = param_dict[HEAD_PRUNING]
# shared parameters
output[SHARED_PARAMETERS] = get_head_pruning_shared_parameters(sub_param_dict)
# each sub-groups
if output[SHARED_PARAMETERS][HEAD_PRUNING_ENABLED]:
assert DIFFERENT_GROUPS in sub_param_dict.keys(
), f"Head Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
output[DIFFERENT_GROUPS] = get_head_pruning_different_groups(sub_param_dict)
return output
def get_head_pruning_shared_parameters(param_dict):
output = {}
if SHARED_PARAMETERS in param_dict.keys():
sub_param_dict = param_dict[SHARED_PARAMETERS]
output[HEAD_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, HEAD_PRUNING_ENABLED,
HEAD_PRUNING_ENABLED_DEFAULT)
output[HEAD_PRUNING_METHOD] = get_scalar_param(sub_param_dict, HEAD_PRUNING_METHOD,
HEAD_PRUNING_METHOD_DEFAULT)
assert output[HEAD_PRUNING_METHOD] in [
HEAD_PRUNING_METHOD_L1, HEAD_PRUNING_METHOD_TOPK
], f"Invalid head pruning method. Supported types: [{HEAD_PRUNING_METHOD_L1}, {HEAD_PRUNING_METHOD_TOPK}]"
output[HEAD_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, HEAD_PRUNING_SCHEDULE_OFFSET,
HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT)
if output[HEAD_PRUNING_ENABLED]:
assert HEAD_PRUNING_NUM_HEADS in sub_param_dict.keys(
), f"{HEAD_PRUNING_NUM_HEADS} must be specified for head pruning"
output[HEAD_PRUNING_NUM_HEADS] = sub_param_dict[HEAD_PRUNING_NUM_HEADS]
else:
output[HEAD_PRUNING_ENABLED] = HEAD_PRUNING_ENABLED_DEFAULT
output[HEAD_PRUNING_METHOD] = HEAD_PRUNING_METHOD_DEFAULT
output[HEAD_PRUNING_SCHEDULE_OFFSET] = HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT
return output
def get_head_pruning_different_groups(param_dict):
output = {}
sub_param_dict = param_dict[DIFFERENT_GROUPS]
def get_params(name, group_dict):
assert HEAD_PRUNING_DENSE_RATIO in group_dict.keys(
), f"dense_ratio must be specified for head pruning group {name}"
return group_dict
for k, v in sub_param_dict.items():
output[k] = {}
output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
return output
def get_channel_pruning(param_dict):
output = {}
if CHANNEL_PRUNING not in param_dict.keys():
param_dict[CHANNEL_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}}
sub_param_dict = param_dict[CHANNEL_PRUNING]
# shared parameters
output[SHARED_PARAMETERS] = get_channel_pruning_shared_parameters(sub_param_dict)
# each sub-groups
if output[SHARED_PARAMETERS][CHANNEL_PRUNING_ENABLED]:
assert DIFFERENT_GROUPS in sub_param_dict.keys(
), f"Sparse Pruning is enabled, {DIFFERENT_GROUPS} must be specified"
output[DIFFERENT_GROUPS] = get_channel_pruning_different_groups(sub_param_dict)
return output
def get_channel_pruning_shared_parameters(param_dict):
output = {}
if SHARED_PARAMETERS in param_dict.keys():
sub_param_dict = param_dict[SHARED_PARAMETERS]
output[CHANNEL_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_ENABLED,
CHANNEL_PRUNING_ENABLED_DEFAULT)
output[CHANNEL_PRUNING_METHOD] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_METHOD,
CHANNEL_PRUNING_METHOD_DEFAULT)
assert output[CHANNEL_PRUNING_METHOD] in [
CHANNEL_PRUNING_METHOD_L1, CHANNEL_PRUNING_METHOD_TOPK
], f"Invalid channel pruning method. Supported types: [{CHANNEL_PRUNING_METHOD_L1}, {CHANNEL_PRUNING_METHOD_TOPK}]"
output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = get_scalar_param(sub_param_dict, CHANNEL_PRUNING_SCHEDULE_OFFSET,
CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT)
else:
output[CHANNEL_PRUNING_ENABLED] = CHANNEL_PRUNING_ENABLED_DEFAULT
output[CHANNEL_PRUNING_METHOD] = CHANNEL_PRUNING_METHOD_DEFAULT
output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT
return output
def get_channel_pruning_different_groups(param_dict):
output = {}
sub_param_dict = param_dict[DIFFERENT_GROUPS]
def get_params(name, group_dict):
assert CHANNEL_PRUNING_DENSE_RATIO in group_dict.keys(
), f"{CHANNEL_PRUNING_DENSE_RATIO} must be specified for channel pruning group {name}"
return group_dict
for k, v in sub_param_dict.items():
output[k] = {}
output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params(k, sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS])
output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param(sub_param_dict[k], DIFFERENT_GROUPS_MODULE_SCOPE,
DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT)
output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param(
sub_param_dict[k], DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT)
return output | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/compression/config.py | config.py |
# DeepSpeed Team
import torch
import math
from torch import nn
from torch.nn import init
import deepspeed.comm as dist
from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer
from deepspeed.utils import logger
g_mpu = None
class QuantAct(nn.Module):
"""
Class to quantize given activations. Note that when using this function, the input acttivation quantization range will be fixed for all
tokens/images for inference. This generally will affect some accuracy but achieve better latency performance.
Parameters:
----------
act_range_momentum : float, default 0.95
Momentum for updating the activation quantization range.
quant_mode : str, default 'symmetric'
"""
def __init__(self, act_range_momentum=0.95, quant_mode='symmetric'):
super(QuantAct, self).__init__()
self.act_range_momentum = act_range_momentum
self.quant_mode = quant_mode
if quant_mode == 'symmetric':
self.act_function = SymQuantizer.apply
else:
self.act_function = AsymQuantizer.apply
self.register_buffer('x_min_max', torch.zeros(2))
def forward(self, x, num_bits, *args):
"""
x: the activation that we need to quantize
num_bits: the number of bits we need to quantize the activation to
*args: some extra arguments that are useless but needed for align with the interface of other quantization functions
"""
if self.training:
x_min = x.data.min()
x_max = x.data.max()
# Initialization
if self.x_min_max[0] == self.x_min_max[1]:
self.x_min_max[0] = x_min
self.x_min_max[1] = x_max
# if do not need momentum, please set self.act_range_momentum = 0
self.x_min_max[0] = self.x_min_max[0] * self.act_range_momentum + x_min * (1 - self.act_range_momentum)
self.x_min_max[1] = self.x_min_max[1] * self.act_range_momentum + x_max * (1 - self.act_range_momentum)
x_q = self.act_function(x, num_bits, self.x_min_max[0], self.x_min_max[1])
return x_q
class Embedding_Compress(nn.Embedding):
def __init__(self, *kargs):
super(Embedding_Compress, self).__init__(*kargs)
self.weight.start_bits = None
self.weight.target_bits = None
self.weight.q_period = None
self.weight_quantization_enabled_in_forward = False
self.weight_quantization_enabled = False
def extra_repr(self):
return 'num_embeddings={}, embedding_dim={}, weight_quantization={}'.format(
self.num_embeddings, self.embedding_dim, self.weight.target_bits)
def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
weight_quantization_enabled_in_forward, quantization_type, num_groups):
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = quantization_period
self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
if self.weight_quantization_enabled_in_forward:
logger.warning(
"************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
)
if self.weight.target_bits >= 3:
if quantization_type == 'symmetric':
self.weight_quantizer = SymQuantizer.apply
else:
self.weight_quantizer = AsymQuantizer.apply
elif self.weight.target_bits == 2:
assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization'
self.weight_quantizer = TernaryQuantizer.apply
elif self.weight.target_bits == 1:
assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization'
self.weight_quantizer = BinaryQuantizer.apply
# for embedding, we always use token-wise quantization
self.weight_quantize_num_groups = self.weight.size(0)
def fix_weight_quantization(self):
self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups).data
self.weight_quantization_enabled_in_forward = False
return None
def forward(self, input):
if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups)
else:
weight = self.weight
out = nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type,
self.scale_grad_by_freq, self.sparse)
return out
class LinearLayer_Compress(nn.Linear):
"""
Linear layer with compression.
"""
def __init__(self, *kargs, bias=True):
super(LinearLayer_Compress, self).__init__(*kargs, bias=bias)
self.sparse_pruning_method = None
self.row_pruning_method = None
self.head_pruning_method = None
self.activation_quantization_method = None
self.weight.start_bits = None
self.weight.target_bits = None
self.weight.q_period = None
self.weight_quantization_enabled_in_forward = False
self.weight_quantization_enabled = False
self.sparse_pruning_enabled = False
self.row_pruning_enabled = False
self.head_pruning_enabled = False
self.activation_quantization_enabled = False
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}, sparse pruning={}, row pruning={}, head pruning={}, activation quantization={}, weight_quantization={}'.format(
self.in_features, self.out_features, self.bias is not None, self.sparse_pruning_method is not None, \
self.row_pruning_method is not None, self.head_pruning_method is not None, self.activation_quantization_method is not None, self.weight.target_bits)
def enable_sparse_pruning(self, ratio, method):
# Here, we support two cases: L1 norm based pruning and topk based pruning
self.sparse_pruning_ratio = ratio
self.sparse_pruning_method = method
if method == 'l1':
weight_norm = torch.abs(self.weight.data)
mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False)
mask = mask.view(self.weight.size())
mask = mask.to(self.weight.device)
elif method == 'topk':
self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size()))
self.sparse_mask_scores.data = self.sparse_mask_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5))
mask = None
else:
raise NotImplementedError
self.register_buffer('sparse_pruning_mask', mask)
def enable_row_pruning(self, ratio, method):
# Here, we support two cases: L1 norm based pruning and topk based pruning
self.row_pruning_ratio = ratio
self.row_pruning_method = method
if method == 'l1':
# compute the l1 norm of each column
weight_norm = torch.norm(self.weight.data, p=1, dim=1)
mask = TopKBinarizer.apply(weight_norm, self.row_pruning_ratio, False)
mask = mask.view(-1, 1)
mask = mask.to(self.weight.device)
elif method == 'topk':
self.row_mask_scores = nn.Parameter(torch.Tensor(self.weight.size(0), 1))
self.row_mask_scores.data = self.row_mask_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.row_mask_scores, a=math.sqrt(5))
mask = None
else:
raise NotImplementedError
self.register_buffer('row_pruning_mask', mask)
def enable_head_pruning(self, ratio, method, num_heads):
# Here, we support only topk based pruning
self.num_heads = num_heads
self.head_pruning_ratio = ratio
self.head_pruning_method = method
if method not in ['topk']:
raise NotImplementedError
else:
self.head_pruning_ratio = ratio
self.head_pruning_scores = nn.Parameter(torch.Tensor(1,
self.num_heads)) # we apply the pruning to O matrix
self.head_pruning_scores.data = self.head_pruning_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.head_pruning_scores, a=math.sqrt(5))
def fix_sparse_pruning_helper(self):
mask = self.get_mask(pruning_type='sparse')
self.weight.data = self.weight.data * mask
del self.sparse_pruning_mask
if self.sparse_pruning_method == 'topk':
del self.sparse_mask_scores
self.sparse_pruning_method = None
self.sparse_pruning_enabled = False
return None
def fix_row_col_pruning_helper(self, mask=None, dim_reduction=False):
# This function is used for row/col pruning
# particularly, if we have two back-to-back layers, F1 and F2; when
# we remove rows from F1, we also need to remove columns from F2
# However, if we only have one layer, F1, then we only need to mask pruned
# rows as 0 in F1
if mask is None:
mask = self.get_mask(pruning_type='row').bool()
if dim_reduction:
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data[mask.view(-1), :])
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
if self.bias is not None:
self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
self.out_features = self.weight.size(0)
else:
self.weight.data = self.weight.data * mask.view(-1, 1)
if self.bias is not None:
self.bias.data = self.bias.data * mask.view(-1)
del self.row_pruning_mask
if self.row_pruning_method == 'topk':
del self.row_mask_scores
self.row_pruning_method = None
else:
# this is generally for column pruning
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data[:, mask.view(-1)])
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
self.in_features = self.weight.size(1)
mask = None
self.row_pruning_enabled = False
return mask
def fix_head_pruning_helper(self, mask=None, num_heads=None, dim_reduction=False):
# similar as row/col pruning, head pruning also needs to prune QKV which is associated with O matrix
num_heads = num_heads if num_heads else self.num_heads
if mask is None:
if self.head_pruning_method == 'topk':
mask = self.get_mask(pruning_type='head').bool()
if dim_reduction:
shape = self.weight.size(0)
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data.t().reshape(num_heads,
-1)[mask.view(-1), :].reshape(-1,
shape).t())
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
else:
shape = self.weight.size()
self.weight.data = (self.weight.data.t().reshape(self.num_heads, -1) * mask.view(-1, 1)).reshape(
shape[1], shape[0]).t()
if self.head_pruning_method == 'topk':
del self.head_pruning_scores
self.head_pruning_method = None
else:
raise NotImplementedError
else:
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
shape = self.weight.size(1)
self.weight = nn.Parameter(self.weight.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1, shape))
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
if self.bias is not None:
self.bias = nn.Parameter(self.bias.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1))
self.head_pruning_enabled = False
return mask
def get_mask(self, pruning_type='row'):
if pruning_type == 'sparse':
if self.sparse_pruning_method == 'l1':
return self.sparse_pruning_mask.to(self.weight.device)
elif self.sparse_pruning_method == 'topk':
return TopKBinarizer.apply(self.sparse_mask_scores, self.sparse_pruning_ratio, False)
else:
raise NotImplementedError
if pruning_type == 'row':
if self.row_pruning_method == 'l1':
return self.row_pruning_mask.to(self.weight.device)
elif self.row_pruning_method == 'topk':
return TopKBinarizer.apply(self.row_mask_scores, self.row_pruning_ratio, False)
else:
raise NotImplementedError
elif pruning_type == 'head':
if self.head_pruning_method == 'topk':
return TopKBinarizer.apply(self.head_pruning_scores, self.head_pruning_ratio, False)
else:
raise NotImplementedError
else:
raise NotImplementedError
def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
weight_quantization_enabled_in_forward, quantization_type, num_groups):
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = quantization_period
self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
if self.weight_quantization_enabled_in_forward:
logger.warning(
"************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
)
if self.weight.target_bits >= 3:
if quantization_type == 'symmetric':
self.weight_quantizer = SymQuantizer.apply
else:
self.weight_quantizer = AsymQuantizer.apply
elif self.weight.target_bits == 2:
assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization'
self.weight_quantizer = TernaryQuantizer.apply
elif self.weight.target_bits == 1:
assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization'
self.weight_quantizer = BinaryQuantizer.apply
self.weight_quantize_num_groups = num_groups
def fix_weight_quantization(self):
self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups).data
self.weight_quantization_enabled_in_forward = False
return None
def enable_activation_quantization(self, bits, quantization_type, range_calibration):
assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now'
self.activation_quantization_bits = bits
self.activation_quantization_method = f"{quantization_type}_{range_calibration}"
if range_calibration == 'static':
self.activation_quantizer = QuantAct(quant_mode=quantization_type)
else:
if quantization_type == 'symmetric':
self.activation_quantizer = SymQuantizer.apply
else:
self.activation_quantizer = AsymQuantizer.apply
def head_pruning_reshape(self, w, mask):
shape = w.shape
return (w.t().reshape(self.num_heads, -1) * mask.view(-1, 1)).reshape(shape[1], shape[0]).t()
def forward(self, input, skip_bias_add=False):
if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups)
bias = self.bias
else:
weight = self.weight
bias = self.bias
if self.sparse_pruning_enabled and self.sparse_pruning_method:
mask = self.get_mask(pruning_type='sparse')
weight = weight * mask.view(self.weight.size())
if self.row_pruning_enabled and self.row_pruning_method:
mask = self.get_mask(pruning_type='row')
weight = weight * mask.view(-1, 1)
if bias is not None:
bias = bias * mask.view(-1)
if self.head_pruning_enabled and self.head_pruning_method:
mask = self.get_mask(pruning_type='head')
weight = self.head_pruning_reshape(weight, mask)
if self.activation_quantization_enabled:
if 'dynamic' in self.activation_quantization_method:
num_groups = input.numel() // input.size(-1)
else:
num_groups = 1
input = self.activation_quantizer(input, self.activation_quantization_bits, None, None, num_groups)
if skip_bias_add:
# used for mpu linear layers
output = nn.functional.linear(input, weight, None)
return output, bias
else:
output = nn.functional.linear(input, weight, bias)
return output
class Conv2dLayer_Compress(nn.Conv2d):
"""
Conv2D layer with compression.
"""
def __init__(self, *kargs):
super(Conv2dLayer_Compress, self).__init__(*kargs)
self.sparse_pruning_method = None
self.channel_pruning_method = None
self.activation_quantization_method = None
self.weight.start_bits = None
self.weight.target_bits = None
self.weight.q_period = None
self.weight_quantization_enabled_in_forward = False
self.sparse_pruning_enabled = False
self.channel_pruning_enabled = False
self.activation_quantization_enabled = False
def __repr__(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0, ) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1, ) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0, ) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
output = s.format(**self.__dict__)
return output + ' sparse pruning={}, channel pruning={}, activation quantization={}, weight_quantization={}'.format(
self.sparse_pruning_method is not None, self.channel_pruning_method is not None,
self.activation_quantization_method is not None, self.weight.target_bits)
def enable_sparse_pruning(self, ratio, method):
self.sparse_pruning_ratio = ratio
self.sparse_pruning_method = method
if method == 'l1':
weight_norm = torch.abs(self.weight.data)
mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False)
mask = mask.view(self.weight.size())
mask = mask.to(self.weight.device)
elif method == 'topk':
self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size()))
self.sparse_mask_scores.data = self.sparse_mask_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5))
mask = None
else:
raise NotImplementedError
self.register_buffer('sparse_pruning_mask', mask)
def enable_channel_pruning(self, ratio, method):
# Here, we support two cases: L1 norm based pruning and topk based pruning
self.channel_pruning_ratio = ratio
self.channel_pruning_method = method
if method == 'l1':
# compute the l1 norm of each conv2d kernel (the last three dimension)
weight_norm = torch.norm(self.weight.data, p=1, dim=[1, 2, 3])
mask = TopKBinarizer.apply(weight_norm, self.channel_pruning_ratio, False)
mask = mask.view(-1, 1, 1, 1)
mask = mask.to(self.weight.device)
elif method == 'topk':
self.channel_mask_scores = nn.Parameter(torch.Tensor(self.weight.size(0), 1, 1, 1))
self.channel_mask_scores.data = self.channel_mask_scores.data.to(self.weight.device)
init.kaiming_uniform_(self.channel_mask_scores, a=math.sqrt(5))
mask = None
else:
raise NotImplementedError
self.register_buffer('channel_pruning_mask', mask)
def fix_sparse_pruning_helper(self):
mask = self.get_mask(pruning_type='sparse')
self.weight.data = self.weight.data * mask
del self.sparse_pruning_mask
if self.sparse_pruning_method == 'topk':
del self.sparse_mask_scores
self.sparse_pruning_method = None
self.sparse_pruning_enabled = False
return None
def fix_channel_pruning_helper(self, mask=None, dim_reduction=False):
if mask is None:
if self.channel_pruning_method in ['l1', 'topk']:
mask = self.get_mask(pruning_type='channel').bool()
if dim_reduction:
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data[mask.view(-1), ...])
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
if self.bias is not None:
self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
else:
self.weight.data = self.weight.data * mask.view(-1, 1, 1, 1)
if self.bias is not None:
self.bias.data = self.bias.data * mask.view(-1)
del self.channel_pruning_mask
if self.channel_pruning_method == 'topk':
del self.channel_mask_scores
self.channel_pruning_method = None
else:
raise NotImplementedError
else:
start_bits = self.weight.start_bits
target_bits = self.weight.target_bits
q_period = self.weight.q_period
self.weight = nn.Parameter(self.weight.data[:, mask.view(-1), ...])
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = q_period
mask = None
self.channel_pruning_enabled = False
return mask
def get_mask(self, pruning_type='sparse'):
if pruning_type == 'sparse':
if self.sparse_pruning_method == 'l1':
return self.sparse_pruning_mask.to(self.weight.device)
elif self.sparse_pruning_method == 'topk':
return TopKBinarizer.apply(self.sparse_mask_scores, self.sparse_pruning_ratio, False)
else:
raise NotImplementedError
elif pruning_type == 'channel':
if self.channel_pruning_method == 'l1':
return self.channel_pruning_mask.to(self.weight.device)
elif self.channel_pruning_method == 'topk':
return TopKBinarizer.apply(self.channel_mask_scores, self.channel_pruning_ratio, False)
else:
raise NotImplementedError
else:
raise NotImplementedError
def fix_weight_quantization(self):
self.weight.data = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups).data
self.weight_quantization_enabled_in_forward = False
return None
def enable_weight_quantization(self, start_bits, target_bits, quantization_period,
weight_quantization_enabled_in_forward, quantization_type, num_groups):
self.weight.start_bits = start_bits
self.weight.target_bits = target_bits
self.weight.q_period = quantization_period
self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward
if self.weight_quantization_enabled_in_forward:
assert self.weight.target_bits >= 4, 'Only >=4 bits weight quantization are supported during forward pass for now'
logger.warning(
"************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************"
)
if quantization_type == 'symmetric':
self.weight_quantizer = SymQuantizer.apply
else:
self.weight_quantizer = AsymQuantizer.apply
self.weight_quantize_num_groups = num_groups
def enable_activation_quantization(self, bits, quantization_type, range_calibration):
assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now'
self.activation_quantization_bits = bits
self.activation_quantization_method = f"{quantization_type}_{range_calibration}"
if range_calibration == 'static':
self.activation_quantizer = QuantAct(quant_mode=quantization_type)
else:
if quantization_type == 'symmetric':
self.activation_quantizer = SymQuantizer.apply
else:
self.activation_quantizer = AsymQuantizer.apply
def forward(self, input):
if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled:
weight = self.weight_quantizer(self.weight, self.weight.target_bits, None, None,
self.weight_quantize_num_groups)
bias = self.bias
else:
weight = self.weight
bias = self.bias
if self.sparse_pruning_enabled and self.sparse_pruning_method:
mask = self.get_mask(pruning_type='sparse')
weight = weight * mask.view(self.weight.size())
if self.channel_pruning_enabled:
mask = self.get_mask(pruning_type='channel')
weight = weight * mask.view(-1, 1, 1, 1)
if bias is not None:
bias = bias * mask.view(-1)
if self.activation_quantization_enabled:
if 'dynamic' in self.activation_quantization_method:
num_groups = input.numel() // input[0].numel()
else:
num_groups = 1
input = self.activation_quantizer(input, self.activation_quantization_bits, None, None, num_groups)
return nn.functional.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
class BNLayer_Compress(nn.BatchNorm2d):
def fix_channel_pruning_helper(self, mask, dim_reduction=True):
self.weight = nn.Parameter(self.weight.data[mask.view(-1)])
self.bias = nn.Parameter(self.bias.data[mask.view(-1)])
self.running_mean = self.running_mean[mask.view(-1)]
self.running_var = self.running_var[mask.view(-1)]
def _reduce(input_):
"""All-reduce the the input tensor across model parallel group."""
group = g_mpu.get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if dist.get_world_size(group=group) == 1:
return input_
# All-reduce.
dist.all_reduce(input_, group=group)
return input_
def split_tensor_along_last_dim(tensor, num_partitions, contiguous_split_chunks=False):
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
assert tensor.size()[last_dim] % num_partitions == 0
last_dim_size = tensor.size()[last_dim] // num_partitions
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
def _split(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
group = g_mpu.get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if dist.get_world_size(group=group) == 1:
return input_
# Split along last dimension.
world_size = dist.get_world_size(group=group)
input_list = split_tensor_along_last_dim(input_, world_size)
# Note: torch.split does not create contiguous tensors by default.
rank = dist.get_rank(group=group)
output = input_list[rank].contiguous()
return output
def _gather(input_):
"""Gather tensors and concatinate along the last dimension."""
group = g_mpu.get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if dist.get_world_size(group=group) == 1:
return input_
# Size and dimension.
last_dim = input_.dim() - 1
rank = dist.get_rank(group=group)
world_size = dist.get_world_size(group=group)
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank] = input_
dist.all_gather(tensor_list, input_, group=group)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=last_dim).contiguous()
return output
class _CopyToModelParallelRegion(torch.autograd.Function):
"""Pass the input to the model parallel region."""
@staticmethod
def forward(ctx, input_):
return input_
@staticmethod
def backward(ctx, grad_output):
return _reduce(grad_output)
class _ReduceFromModelParallelRegion(torch.autograd.Function):
"""All-redcue the input from the model parallel region."""
@staticmethod
def forward(ctx, input_):
return _reduce(input_)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class _ScatterToModelParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chuck to the rank."""
@staticmethod
def forward(ctx, input_):
return _split(input_)
@staticmethod
def backward(ctx, grad_output):
return _gather(grad_output)
class _GatherFromModelParallelRegion(torch.autograd.Function):
"""Gather the input from model parallel region and concatinate."""
@staticmethod
def forward(ctx, input_):
return _gather(input_)
@staticmethod
def backward(ctx, grad_output):
return _split(grad_output)
# -----------------
# Helper functions.
# -----------------
def copy_to_model_parallel_region(input_):
return _CopyToModelParallelRegion.apply(input_)
def reduce_from_model_parallel_region(input_):
return _ReduceFromModelParallelRegion.apply(input_)
def scatter_to_model_parallel_region(input_):
return _ScatterToModelParallelRegion.apply(input_)
def gather_from_model_parallel_region(input_):
return _GatherFromModelParallelRegion.apply(input_)
class ColumnParallelLinear_Compress(LinearLayer_Compress):
def __init__(self, mpu, input_size, output_size, bias=True, gather_output=True, skip_bias_add=False):
# Keep input parameters
global g_mpu
g_mpu = mpu
self.input_size = input_size
self.output_size = output_size
self.gather_output = gather_output
self.skip_bias_add = skip_bias_add
# Divide the weight matrix along the last dimension.
world_size = mpu.get_model_parallel_world_size()
assert output_size % world_size == 0
self.output_size_per_partition = output_size // world_size
super(ColumnParallelLinear_Compress, self).__init__(self.input_size, self.output_size_per_partition, bias=bias)
def forward(self, input_):
# Set up backprop all-reduce.
input_parallel = copy_to_model_parallel_region(input_)
# Matrix multiply.
if self.skip_bias_add:
output_parallel, bias = super().forward(input_parallel, True)
else:
output_parallel = super().forward(input_parallel)
bias = None
if self.gather_output:
# All-gather across the partitions.
output = gather_from_model_parallel_region(output_parallel)
else:
output = output_parallel
return output, bias
class RowParallelLinear_Compress(LinearLayer_Compress):
def __init__(self, mpu, input_size, output_size, bias=True, input_is_parallel=False, skip_bias_add=False):
# Keep input parameters
global g_mpu
g_mpu = mpu
self.input_size = input_size
self.output_size = output_size
self.input_is_parallel = input_is_parallel
self.skip_bias_add = skip_bias_add
# Divide the weight matrix along the last dimension.
world_size = mpu.get_model_parallel_world_size()
assert input_size % world_size == 0
self.input_size_per_partition = input_size // world_size
super(RowParallelLinear_Compress, self).__init__(self.input_size_per_partition, self.output_size, bias=bias)
def forward(self, input_):
# Set up backprop all-reduce.
if self.input_is_parallel:
input_parallel = input_
else:
input_parallel = scatter_to_model_parallel_region(input_)
# Matrix multiply.
output_parallel, bias = super().forward(input_parallel, True)
# All-reduce across all the partitions.
output_ = reduce_from_model_parallel_region(output_parallel)
if not self.skip_bias_add:
if bias is not None:
output = output_ + bias
else:
output = output_
output_bias = None
else:
output = output_
output_bias = bias
return output, output_bias | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/compression/basic_layer.py | basic_layer.py |
# DeepSpeed Team
import torch
from torch import autograd
import math
class TopKBinarizer(autograd.Function):
"""
Top-k Binarizer.
Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}`
is among the k% highest values of S.
Implementation is inspired from:
https://github.com/yaozhewei/MLPruning
"""
@staticmethod
def forward(ctx, inputs: torch.tensor, threshold: float, sigmoid: bool):
"""
Args:
inputs (`torch.FloatTensor`)
The input matrix from which the binarizer computes the binary mask.
threshold (`float`)
The percentage of weights to keep (the rest is pruned).
`threshold` is a float between 0 and 1.
sigmoid (`bool`)
Whether to apply a sigmoid on the threshold
Returns:
mask (`torch.FloatTensor`)
Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is
retained, 0 - the associated weight is pruned).
"""
# Get the subnetwork by sorting the inputs and using the top threshold
if sigmoid:
threshold = torch.sigmoid(threshold).item()
ctx.sigmoid = sigmoid
mask = inputs.clone()
_, idx = inputs.flatten().sort(descending=True)
j = math.ceil(threshold * inputs.numel())
# flat_out and mask access the same memory.
flat_out = mask.flatten()
flat_out[idx[j:]] = 0.
flat_out[idx[:j]] = 1.
ctx.save_for_backward(mask)
return mask
@staticmethod
def backward(ctx, gradOutput):
mask, = ctx.saved_tensors
if ctx.sigmoid:
return gradOutput.clone(), ((gradOutput * mask).sum()).view(-1), None
else:
return gradOutput.clone(), None, None
class SymQuantizer(torch.autograd.Function):
"""
Symmetric quantization
"""
@staticmethod
def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
"""
Args:
inputs (`torch.FloatTensor`)
The input which needs to be quantized
num_bits (int, >=4)
Number of bits to use for quantization
min_value/max_vlue (torch.FloatTensor)
Used for static activation quantization
num_groups (int)
How many groups to partition the quantization into
Returns:
quantized_input (`torch.FloatTensor`)
Quantized input
"""
assert (min_value is None and max_value is None) or (min_value is not None and max_value is not None
and num_groups == 1)
q_range = 2**num_bits
input_shape = input.shape
if min_value is None:
input = input.reshape(num_groups, -1)
max_input = torch.amax(torch.abs(input), dim=-1).view(num_groups, -1)
else:
max_input = torch.max(min_value.abs(), max_value).view(-1)
scale = 2 * max_input / q_range
output = (input / scale).round().clamp(-q_range // 2, q_range // 2 - 1) * scale
output = output.reshape(input_shape).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None, None, None, None
class AsymQuantizer(torch.autograd.Function):
"""
Asymmetric quantization
"""
@staticmethod
def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
"""
Args:
inputs (`torch.FloatTensor`)
The input which needs to be quantized
num_bits (int, >=4)
Number of bits to use for quantization
min_value/max_vlue (torch.FloatTensor)
Used for static activation quantization
num_groups (int)
How many groups to partition the quantization into
Returns:
quantized_input (`torch.FloatTensor`)
Quantized input
"""
assert (min_value is None and max_value is None) or (min_value is not None and max_value is not None
and num_groups == 1)
q_range = 2**num_bits
input_shape = input.shape
if min_value is None:
input = input.reshape(num_groups, -1)
min_value = input.amin(dim=-1, keepdim=True)
max_value = input.amax(dim=-1, keepdim=True)
scale = (max_value - min_value) / q_range
zero_point = (min_value / scale).round() * scale
output = ((input - zero_point) / scale).round().clamp(0, q_range - 1) * scale + zero_point
output = output.reshape(input_shape).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None, None, None, None
class TernaryQuantizer(torch.autograd.Function):
"""
Ternary quantization
"""
@staticmethod
def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
"""
Args:
inputs (`torch.FloatTensor`)
The input which needs to be quantized
num_bits (int)
Dummy variable
min_value/max_vlue (torch.FloatTensor)
Used for static activation quantization; for now they are dummy variable
num_groups (int)
How many groups to partition the quantization into
Returns:
quantized_input (`torch.FloatTensor`)
Quantized input
"""
assert (min_value is None and max_value is None)
input_flat = input.reshape(num_groups, -1)
n = input_flat.shape[1]
m = input_flat.norm(p=1, dim=1).div(n)
thres = (0.7 * m).view(-1, 1)
pos = (input_flat > thres).type(input.type())
neg = (input_flat < -thres).type(input.type())
mask = (input_flat.abs() > thres).type(input.type())
alpha = ((mask * input_flat).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1)
output = alpha * pos - alpha * neg
output = output.reshape(input.shape).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None, None, None, None
class BinaryQuantizer(torch.autograd.Function):
"""
Binary quantization
"""
@staticmethod
def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
"""
Args:
inputs (`torch.FloatTensor`)
The input which needs to be quantized
num_bits (int)
Dummy variable
min_value/max_vlue (torch.FloatTensor)
Used for static activation quantization; for now they are dummy variable
num_groups (int)
How many groups to partition the quantization into
Returns:
quantized_input (`torch.FloatTensor`)
Quantized input
"""
assert (min_value is None and max_value is None)
input_flat = input.reshape(num_groups, -1)
n = input_flat.shape[1]
m = input_flat.norm(p=1, dim=1, keepdim=True).div(n)
output = input_flat.sign().mul(m)
output = output.reshape(input.shape).contiguous()
return output
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input, None, None, None, None | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/compression/utils.py | utils.py |
# DeepSpeed Team
import os
import pkgutil
import importlib
from .abstract_accelerator import DeepSpeedAccelerator
# During setup stage torch may not be installed, pass on no torch will
# allow op builder related API to be executed.
try:
import torch.cuda
except ImportError:
pass
class CUDA_Accelerator(DeepSpeedAccelerator):
def __init__(self):
self._name = 'cuda'
self._communication_backend_name = 'nccl'
# begin initialize for create_op_builder()
# put all valid class name <--> class type mapping into class_dict
op_builder_dir = self.op_builder_dir()
op_builder_module = importlib.import_module(op_builder_dir)
for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(op_builder_module.__file__)]):
# avoid self references
if module_name != 'all_ops' and module_name != 'builder':
module = importlib.import_module("{}.{}".format(op_builder_dir, module_name))
for member_name in module.__dir__():
if member_name.endswith(
'Builder'
) and member_name != "OpBuilder" and member_name != "CUDAOpBuilder" and member_name != "TorchCPUOpBuilder": # avoid abstract classes
if not member_name in self.class_dict:
self.class_dict[member_name] = getattr(module, member_name)
# end initialize for create_op_builder()
# Device APIs
def device_name(self, device_index=None):
if device_index == None:
return 'cuda'
return 'cuda:{}'.format(device_index)
def device(self, device_index=None):
return torch.cuda.device(device_index)
def set_device(self, device_index):
torch.cuda.set_device(device_index)
def current_device(self):
return torch.cuda.current_device()
def current_device_name(self):
return 'cuda:{}'.format(torch.cuda.current_device())
def device_count(self):
return torch.cuda.device_count()
def synchronize(self, device_index=None):
return torch.cuda.synchronize(device_index)
# RNG APIs
def random(self):
return torch.random
def set_rng_state(self, new_state, device_index=None):
if device_index is None:
return torch.cuda.set_rng_state(new_state)
return torch.cuda.set_rng_state(new_state, device_index)
def get_rng_state(self, device_index=None):
if device_index is None:
return torch.cuda.get_rng_state()
return torch.cuda.get_rng_state(device_index)
def manual_seed(self, seed):
return torch.cuda.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.cuda.manual_seed_all(seed)
def initial_seed(self, seed):
return torch.cuda.initial_seed(seed)
def default_generator(self, device_index):
return torch.cuda.default_generators[device_index]
# Streams/Events
@property
def Stream(self):
return torch.cuda.Stream
def stream(self, stream):
return torch.cuda.stream(stream)
def current_stream(self, device_index=None):
return torch.cuda.current_stream(device_index)
def default_stream(self, device_index=None):
return torch.cuda.default_stream(device_index)
@property
def Event(self):
return torch.cuda.Event
# Memory management
def empty_cache(self):
return torch.cuda.empty_cache()
def memory_allocated(self, device_index=None):
return torch.cuda.memory_allocated(device_index)
def max_memory_allocated(self, device_index=None):
return torch.cuda.max_memory_allocated(device_index)
def reset_max_memory_allocated(self, device_index=None):
return torch.cuda.reset_max_memory_allocated(device_index)
def memory_cached(self, device_index=None):
return torch.cuda.memory_cached(device_index)
def max_memory_cached(self, device_index=None):
return torch.cuda.max_memory_cached(device_index)
def reset_max_memory_cached(self, device_index=None):
return torch.cuda.reset_max_memory_cached(device_index)
def memory_stats(self, device_index=None):
if hasattr(torch.cuda, 'memory_stats'):
return torch.cuda.memory_stats(device_index)
def reset_peak_memory_stats(self, device_index=None):
if hasattr(torch.cuda, 'reset_peak_memory_stats'):
return torch.cuda.reset_peak_memory_stats(device_index)
def memory_reserved(self, device_index=None):
if hasattr(torch.cuda, 'memory_reserved'):
return torch.cuda.memory_reserved(device_index)
def max_memory_reserved(self, device_index=None):
if hasattr(torch.cuda, 'max_memory_reserved'):
return torch.cuda.max_memory_reserved(device_index)
def total_memory(self, device_index=None):
return torch.cuda.get_device_properties(device_index).total_memory
# Data types
def is_bf16_supported(self):
return torch.cuda.is_bf16_supported()
def is_fp16_supported(self):
major, _ = torch.cuda.get_device_capability()
if major >= 7:
return True
else:
return False
# Misc
def amp(self):
if hasattr(torch.cuda, 'amp'):
return torch.cuda.amp
return None
def is_available(self):
return torch.cuda.is_available()
def range_push(self, msg):
if hasattr(torch.cuda.nvtx, 'range_push'):
return torch.cuda.nvtx.range_push(msg)
def range_pop(self):
if hasattr(torch.cuda.nvtx, 'range_pop'):
return torch.cuda.nvtx.range_pop()
def lazy_call(self, callback):
return torch.cuda._lazy_call(callback)
def communication_backend_name(self):
return self._communication_backend_name
# Tensor operations
@property
def BFloat16Tensor(self):
return torch.cuda.BFloat16Tensor
@property
def ByteTensor(self):
return torch.cuda.ByteTensor
@property
def DoubleTensor(self):
return torch.cuda.DoubleTensor
@property
def FloatTensor(self):
return torch.cuda.FloatTensor
@property
def HalfTensor(self):
return torch.cuda.HalfTensor
@property
def IntTensor(self):
return torch.cuda.IntTensor
@property
def LongTensor(self):
return torch.cuda.LongTensor
def pin_memory(self, tensor):
return tensor.pin_memory()
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith('cuda:'):
return True
else:
return False
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401
return "op_builder"
except ImportError:
return "deepspeed.ops.op_builder"
# dict that holds class name <--> class type mapping i.e.
# 'AsyncIOBuilder': <class 'op_builder.async_io.AsyncIOBuilder'>
# this dict will be filled at init stage
class_dict = {}
# create an instance of op builder and return, name specified by class_name
def create_op_builder(self, class_name):
if class_name in self.class_dict:
return self.class_dict[class_name]()
else:
return None
# return an op builder class, name specified by class_name
def get_op_builder(self, class_name):
if class_name in self.class_dict:
return self.class_dict[class_name]
else:
return None
def build_extension(self):
from torch.utils.cpp_extension import BuildExtension
return BuildExtension | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/accelerator/cuda_accelerator.py | cuda_accelerator.py |
# DeepSpeed Team
import abc
from abc import ABC
class DeepSpeedAccelerator(ABC):
def __init__(self):
self._name = None
self._communication_backend_name = None
# Device APIs
@abc.abstractmethod
def device_name(self, device_index):
...
@abc.abstractmethod
def device(self, device_index):
...
@abc.abstractmethod
def set_device(self, device_index):
...
@abc.abstractmethod
def current_device(self):
...
@abc.abstractmethod
def current_device_name(self):
...
@abc.abstractmethod
def device_count(self):
...
@abc.abstractmethod
def synchronize(self, device_index=None):
...
# RNG APIs
@abc.abstractmethod
def random(self):
...
@abc.abstractmethod
def set_rng_state(self, new_state, device_index=None):
...
@abc.abstractmethod
def get_rng_state(self, device_index=None):
...
@abc.abstractmethod
def manual_seed(self, seed):
...
@abc.abstractmethod
def manual_seed_all(self, seed):
...
@abc.abstractmethod
def initial_seed(self, seed):
...
@abc.abstractmethod
def default_generator(self, device_index):
...
# Streams/Events
@property
@abc.abstractmethod
def Stream(self):
...
@abc.abstractmethod
def stream(self, stream):
...
@abc.abstractmethod
def current_stream(self, device_index=None):
...
@abc.abstractmethod
def default_stream(self, device_index=None):
...
@property
@abc.abstractmethod
def Event(self):
...
# Memory management
@abc.abstractmethod
def empty_cache(self):
...
@abc.abstractmethod
def memory_allocated(self, device_index=None):
...
@abc.abstractmethod
def max_memory_allocated(self, device_index=None):
...
@abc.abstractmethod
def reset_max_memory_allocated(self, device_index=None):
...
@abc.abstractmethod
def memory_cached(self, device_index=None):
...
@abc.abstractmethod
def max_memory_cached(self, device_index=None):
...
@abc.abstractmethod
def reset_max_memory_cached(self, device_index=None):
...
@abc.abstractmethod
def memory_stats(self, device_index=None):
...
@abc.abstractmethod
def reset_peak_memory_stats(self, device_index=None):
...
@abc.abstractmethod
def memory_reserved(self, device_index=None):
...
@abc.abstractmethod
def max_memory_reserved(self, device_index=None):
...
@abc.abstractmethod
def total_memory(self, device_index=None):
...
# Data types
@abc.abstractmethod
def is_bf16_supported(self):
...
@abc.abstractmethod
def is_fp16_supported(self):
...
# Misc
@abc.abstractmethod
def amp(self):
...
@abc.abstractmethod
def is_available(self):
...
@abc.abstractmethod
def range_push(self, msg):
...
@abc.abstractmethod
def range_pop(self):
...
@abc.abstractmethod
def lazy_call(self, callback):
...
@abc.abstractmethod
def communication_backend_name(self):
...
# Tensor operations
@property
@abc.abstractmethod
def BFloat16Tensor(self):
...
@property
@abc.abstractmethod
def ByteTensor(self):
...
@property
@abc.abstractmethod
def DoubleTensor(self):
...
@property
@abc.abstractmethod
def FloatTensor(self):
...
@property
@abc.abstractmethod
def HalfTensor(self):
...
@property
@abc.abstractmethod
def IntTensor(self):
...
@property
@abc.abstractmethod
def LongTensor(self):
...
@abc.abstractmethod
def pin_memory(self, tensor):
...
@abc.abstractmethod
def on_accelerator(self, tensor):
...
@abc.abstractmethod
def op_builder_dir(self):
...
# create an instance of op builder, specified by class_name
@abc.abstractmethod
def create_op_builder(self, class_name):
...
# return an op builder class, specified by class_name
@abc.abstractmethod
def get_op_builder(self, class_name):
...
@abc.abstractmethod
def build_extension(self):
... | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/accelerator/abstract_accelerator.py | abstract_accelerator.py |
# DeepSpeed Team
try:
from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1
except ImportError as e:
dsa1 = None
try:
from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa2
except ImportError as e:
dsa2 = None
ds_accelerator = None
def _validate_accelerator(accel_obj):
# because abstract_accelerator has different path during
# build time (accelerator.abstract_accelerator)
# and run time (deepspeed.accelerator.abstract_accelerator)
# and extension would import the
# run time abstract_accelerator/DeepSpeedAccelerator as its base
# class, so we need to compare accel_obj with both base class.
# if accel_obj is instance of DeepSpeedAccelerator in one of
# accelerator.abstractor_accelerator
# or deepspeed.accelerator.abstract_accelerator, consider accel_obj
# is a conforming object
if not ((dsa1 != None and isinstance(accel_obj, dsa1)) or (dsa2 != None and isinstance(accel_obj, dsa2))):
raise AssertionError(f'{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator')
# TODO: turn off is_available test since this breaks tests
#assert accel_obj.is_available(), \
# f'{accel_obj.__class__.__name__} accelerator fails is_available() test'
def get_accelerator():
global ds_accelerator
if ds_accelerator is None:
try:
from intel_extension_for_deepspeed import XPU_Accelerator
except ImportError as e:
pass
else:
ds_accelerator = XPU_Accelerator()
_validate_accelerator(ds_accelerator)
return ds_accelerator
from .cuda_accelerator import CUDA_Accelerator
ds_accelerator = CUDA_Accelerator()
_validate_accelerator(ds_accelerator)
return ds_accelerator
def set_accelerator(accel_obj):
global ds_accelerator
_validate_accelerator(accel_obj)
ds_accelerator = accel_obj
'''
-----------[code] test_get.py -----------
from deepspeed.accelerator import get_accelerator
my_accelerator = get_accelerator()
print(f'{my_accelerator._name=}')
print(f'{my_accelerator._communication_backend=}')
print(f'{my_accelerator.HalfTensor().device=}')
print(f'{my_accelerator.total_memory()=}')
-----------[code] test_get.py -----------
---[output] python test_get.py---------
my_accelerator.name()='cuda'
my_accelerator.communication_backend='nccl'
my_accelerator.HalfTensor().device=device(type='cuda', index=0)
my_accelerator.total_memory()=34089730048
---[output] python test_get.py---------
**************************************************************************
-----------[code] test_set.py -----------
from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator
cu_accel = CUDA_Accelerator()
print(f'{id(cu_accel)=}')
from deepspeed.accelerator import set_accelerator, get_accelerator
set_accelerator(cu_accel)
my_accelerator = get_accelerator()
print(f'{id(my_accelerator)=}')
print(f'{my_accelerator._name=}')
print(f'{my_accelerator._communication_backend=}')
print(f'{my_accelerator.HalfTensor().device=}')
print(f'{my_accelerator.total_memory()=}')
-----------[code] test_set.py -----------
---[output] python test_set.py---------
id(cu_accel)=139648165478304
my_accelerator=<deepspeed.accelerator.cuda_accelerator.CUDA_Accelerator object at 0x7f025f4bffa0>
my_accelerator.name='cuda'
my_accelerator.communication_backend='nccl'
my_accelerator.HalfTensor().device=device(type='cuda', index=0)
my_accelerator.total_memory()=34089730048
---[output] python test_set.py---------
''' | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/accelerator/real_accelerator.py | real_accelerator.py |
# DeepSpeed Team
from .builder import CUDAOpBuilder, installed_cuda_version
class InferenceBuilder(CUDAOpBuilder):
BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE"
NAME = "transformer_inference"
def __init__(self, name=None):
name = self.NAME if name is None else name
super().__init__(name=name)
def absolute_name(self):
return f'deepspeed.ops.transformer.inference.{self.NAME}_op'
def is_compatible(self, verbose=True):
try:
import torch
except ImportError:
self.warning("Please install torch if trying to pre-compile inference kernels")
return False
cuda_okay = True
if not self.is_rocm_pytorch() and torch.cuda.is_available():
sys_cuda_major, _ = installed_cuda_version()
torch_cuda_major = int(torch.version.cuda.split('.')[0])
cuda_capability = torch.cuda.get_device_properties(0).major
if cuda_capability < 6:
self.warning("NVIDIA Inference is only supported on Pascal and newer architectures")
cuda_okay = False
if cuda_capability >= 8:
if torch_cuda_major < 11 or sys_cuda_major < 11:
self.warning("On Ampere and higher architectures please use CUDA 11+")
cuda_okay = False
return super().is_compatible(verbose) and cuda_okay
def filter_ccs(self, ccs):
ccs_retained = []
ccs_pruned = []
for cc in ccs:
if int(cc[0]) >= 6:
ccs_retained.append(cc)
else:
ccs_pruned.append(cc)
if len(ccs_pruned) > 0:
self.warning(f"Filtered compute capabilities {ccs_pruned}")
return ccs_retained
def sources(self):
return [
'csrc/transformer/inference/csrc/pt_binding.cpp',
'csrc/transformer/inference/csrc/gelu.cu',
'csrc/transformer/inference/csrc/relu.cu',
'csrc/transformer/inference/csrc/layer_norm.cu',
'csrc/transformer/inference/csrc/softmax.cu',
'csrc/transformer/inference/csrc/dequantize.cu',
'csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu',
'csrc/transformer/inference/csrc/transform.cu',
]
def extra_ldflags(self):
if not self.is_rocm_pytorch():
return ['-lcurand']
else:
return []
def include_paths(self):
return ['csrc/transformer/inference/includes', 'csrc/includes'] | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/op_builder/transformer_inference.py | transformer_inference.py |
# DeepSpeed Team
import distutils.spawn
import subprocess
from .builder import OpBuilder
class AsyncIOBuilder(OpBuilder):
BUILD_VAR = "DS_BUILD_AIO"
NAME = "async_io"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.aio.{self.NAME}_op'
def sources(self):
return [
'csrc/aio/py_lib/deepspeed_py_copy.cpp', 'csrc/aio/py_lib/py_ds_aio.cpp',
'csrc/aio/py_lib/deepspeed_py_aio.cpp', 'csrc/aio/py_lib/deepspeed_py_aio_handle.cpp',
'csrc/aio/py_lib/deepspeed_aio_thread.cpp', 'csrc/aio/common/deepspeed_aio_utils.cpp',
'csrc/aio/common/deepspeed_aio_common.cpp', 'csrc/aio/common/deepspeed_aio_types.cpp',
'csrc/aio/py_lib/deepspeed_pin_tensor.cpp'
]
def include_paths(self):
return ['csrc/aio/py_lib', 'csrc/aio/common']
def cxx_args(self):
# -O0 for improved debugging, since performance is bound by I/O
CPU_ARCH = self.cpu_arch()
SIMD_WIDTH = self.simd_width()
return [
'-g',
'-Wall',
'-O0',
'-std=c++14',
'-shared',
'-fPIC',
'-Wno-reorder',
CPU_ARCH,
'-fopenmp',
SIMD_WIDTH,
'-laio',
]
def extra_ldflags(self):
return ['-laio']
def check_for_libaio_pkg(self):
libs = dict(
dpkg=["-l", "libaio-dev", "apt"],
pacman=["-Q", "libaio", "pacman"],
rpm=["-q", "libaio-devel", "yum"],
)
found = False
for pkgmgr, data in libs.items():
flag, lib, tool = data
path = distutils.spawn.find_executable(pkgmgr)
if path is not None:
cmd = f"{pkgmgr} {flag} {lib}"
result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
if result.wait() == 0:
found = True
else:
self.warning(f"{self.NAME}: please install the {lib} package with {tool}")
break
return found
def is_compatible(self, verbose=True):
# Check for the existence of libaio by using distutils
# to compile and link a test program that calls io_submit,
# which is a function provided by libaio that is used in the async_io op.
# If needed, one can define -I and -L entries in CFLAGS and LDFLAGS
# respectively to specify the directories for libaio.h and libaio.so.
aio_compatible = self.has_function('io_submit', ('aio', ))
if verbose and not aio_compatible:
self.warning(f"{self.NAME} requires the dev libaio .so object and headers but these were not found.")
# Check for the libaio package via known package managers
# to print suggestions on which package to install.
self.check_for_libaio_pkg()
self.warning(
"If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found."
)
return super().is_compatible(verbose) and aio_compatible | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/op_builder/async_io.py | async_io.py |
# DeepSpeed Team
from .builder import OpBuilder
try:
from packaging import version as pkg_version
except ImportError:
pkg_version = None
class SparseAttnBuilder(OpBuilder):
BUILD_VAR = "DS_BUILD_SPARSE_ATTN"
NAME = "sparse_attn"
def __init__(self):
super().__init__(name=self.NAME)
def absolute_name(self):
return f'deepspeed.ops.sparse_attention.{self.NAME}_op'
def sources(self):
return ['csrc/sparse_attention/utils.cpp']
def cxx_args(self):
return ['-O2', '-fopenmp']
def is_compatible(self, verbose=True):
# Check to see if llvm and cmake are installed since they are dependencies
#required_commands = ['llvm-config|llvm-config-9', 'cmake']
#command_status = list(map(self.command_exists, required_commands))
#deps_compatible = all(command_status)
if self.is_rocm_pytorch():
self.warning(f'{self.NAME} is not compatible with ROCM')
return False
try:
import torch
except ImportError:
self.warning(f"unable to import torch, please install it first")
return False
# torch-cpu will not have a cuda version
if torch.version.cuda is None:
cuda_compatible = False
self.warning(f"{self.NAME} cuda is not available from torch")
else:
major, minor = torch.version.cuda.split('.')[:2]
cuda_compatible = (int(major) == 10 and int(minor) >= 1) or (int(major) >= 11)
if not cuda_compatible:
self.warning(f"{self.NAME} requires CUDA version 10.1+")
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
torch_compatible = (TORCH_MAJOR == 1 and TORCH_MINOR >= 5)
if not torch_compatible:
self.warning(
f'{self.NAME} requires a torch version >= 1.5 and < 2.0 but detected {TORCH_MAJOR}.{TORCH_MINOR}')
try:
import triton
except ImportError:
# auto-install of triton is broken on some systems, reverting to manual install for now
# see this issue: https://github.com/microsoft/DeepSpeed/issues/1710
self.warning(f"please install triton==1.0.0 if you want to use sparse attention")
return False
if pkg_version:
installed_triton = pkg_version.parse(triton.__version__)
triton_mismatch = installed_triton != pkg_version.parse("1.0.0")
else:
installed_triton = triton.__version__
triton_mismatch = installed_triton != "1.0.0"
if triton_mismatch:
self.warning(f"using untested triton version ({installed_triton}), only 1.0.0 is known to be compatible")
return False
return super().is_compatible(verbose) and torch_compatible and cuda_compatible | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/op_builder/sparse_attn.py | sparse_attn.py |
# DeepSpeed Team
import os
import sys
import time
import importlib
from pathlib import Path
import subprocess
import shlex
import shutil
import tempfile
import distutils.ccompiler
import distutils.log
import distutils.sysconfig
from distutils.errors import CompileError, LinkError
from abc import ABC, abstractmethod
from typing import List
YELLOW = '\033[93m'
END = '\033[0m'
WARNING = f"{YELLOW} [WARNING] {END}"
DEFAULT_TORCH_EXTENSION_PATH = "/tmp/torch_extensions"
DEFAULT_COMPUTE_CAPABILITIES = "6.0;6.1;7.0"
try:
import torch
except ImportError:
print(f"{WARNING} unable to import torch, please install it if you want to pre-compile any deepspeed ops.")
else:
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
def installed_cuda_version(name=""):
import torch.utils.cpp_extension
cuda_home = torch.utils.cpp_extension.CUDA_HOME
assert cuda_home is not None, "CUDA_HOME does not exist, unable to compile CUDA op(s)"
# Ensure there is not a cuda version mismatch between torch and nvcc compiler
output = subprocess.check_output([cuda_home + "/bin/nvcc", "-V"], universal_newlines=True)
output_split = output.split()
release_idx = output_split.index("release")
release = output_split[release_idx + 1].replace(',', '').split(".")
# Ignore patch versions, only look at major + minor
cuda_major, cuda_minor = release[:2]
return int(cuda_major), int(cuda_minor)
def get_default_compute_capabilities():
compute_caps = DEFAULT_COMPUTE_CAPABILITIES
import torch.utils.cpp_extension
if torch.utils.cpp_extension.CUDA_HOME is not None and installed_cuda_version()[0] >= 11:
if installed_cuda_version()[0] == 11 and installed_cuda_version()[1] == 0:
# Special treatment of CUDA 11.0 because compute_86 is not supported.
compute_caps += ";8.0"
else:
compute_caps += ";8.0;8.6"
return compute_caps
# list compatible minor CUDA versions - so that for example pytorch built with cuda-11.0 can be used
# to build deepspeed and system-wide installed cuda 11.2
cuda_minor_mismatch_ok = {
10: [
"10.0",
"10.1",
"10.2",
],
11: ["11.0", "11.1", "11.2", "11.3", "11.4", "11.5", "11.6", "11.7", "11.8"],
}
def assert_no_cuda_mismatch(name=""):
cuda_major, cuda_minor = installed_cuda_version(name)
sys_cuda_version = f'{cuda_major}.{cuda_minor}'
torch_cuda_version = ".".join(torch.version.cuda.split('.')[:2])
# This is a show-stopping error, should probably not proceed past this
if sys_cuda_version != torch_cuda_version:
if (cuda_major in cuda_minor_mismatch_ok and sys_cuda_version in cuda_minor_mismatch_ok[cuda_major]
and torch_cuda_version in cuda_minor_mismatch_ok[cuda_major]):
print(f"Installed CUDA version {sys_cuda_version} does not match the "
f"version torch was compiled with {torch.version.cuda} "
"but since the APIs are compatible, accepting this combination")
return True
raise Exception(f">- DeepSpeed Op Builder: Installed CUDA version {sys_cuda_version} does not match the "
f"version torch was compiled with {torch.version.cuda}, unable to compile "
"cuda/cpp extensions without a matching cuda version.")
return True
class OpBuilder(ABC):
_rocm_version = None
_is_rocm_pytorch = None
def __init__(self, name):
self.name = name
self.jit_mode = False
self.build_for_cpu = False
self.error_log = None
@abstractmethod
def absolute_name(self):
'''
Returns absolute build path for cases where the op is pre-installed, e.g., deepspeed.ops.adam.cpu_adam
will be installed as something like: deepspeed/ops/adam/cpu_adam.so
'''
pass
@abstractmethod
def sources(self):
'''
Returns list of source files for your op, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed)
'''
pass
def hipify_extension(self):
pass
@staticmethod
def validate_torch_version(torch_info):
install_torch_version = torch_info['version']
current_torch_version = ".".join(torch.__version__.split('.')[:2])
if install_torch_version != current_torch_version:
raise RuntimeError("PyTorch version mismatch! DeepSpeed ops were compiled and installed "
"with a different version than what is being used at runtime. "
f"Please re-install DeepSpeed or switch torch versions. "
f"Install torch version={install_torch_version}, "
f"Runtime torch version={current_torch_version}")
@staticmethod
def validate_torch_op_version(torch_info):
if not OpBuilder.is_rocm_pytorch():
current_cuda_version = ".".join(torch.version.cuda.split('.')[:2])
install_cuda_version = torch_info['cuda_version']
if install_cuda_version != current_cuda_version:
raise RuntimeError("CUDA version mismatch! DeepSpeed ops were compiled and installed "
"with a different version than what is being used at runtime. "
f"Please re-install DeepSpeed or switch torch versions. "
f"Install CUDA version={install_cuda_version}, "
f"Runtime CUDA version={current_cuda_version}")
else:
current_hip_version = ".".join(torch.version.hip.split('.')[:2])
install_hip_version = torch_info['hip_version']
if install_hip_version != current_hip_version:
raise RuntimeError("HIP version mismatch! DeepSpeed ops were compiled and installed "
"with a different version than what is being used at runtime. "
f"Please re-install DeepSpeed or switch torch versions. "
f"Install HIP version={install_hip_version}, "
f"Runtime HIP version={current_hip_version}")
@staticmethod
def is_rocm_pytorch():
if OpBuilder._is_rocm_pytorch is not None:
return OpBuilder._is_rocm_pytorch
_is_rocm_pytorch = False
try:
import torch
except ImportError:
pass
else:
if TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 5):
_is_rocm_pytorch = hasattr(torch.version, 'hip') and torch.version.hip is not None
if _is_rocm_pytorch:
from torch.utils.cpp_extension import ROCM_HOME
_is_rocm_pytorch = ROCM_HOME is not None
OpBuilder._is_rocm_pytorch = _is_rocm_pytorch
return OpBuilder._is_rocm_pytorch
@staticmethod
def installed_rocm_version():
if OpBuilder._rocm_version:
return OpBuilder._rocm_version
ROCM_MAJOR = '0'
ROCM_MINOR = '0'
if OpBuilder.is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
rocm_ver_file = Path(ROCM_HOME).joinpath(".info/version-dev")
if rocm_ver_file.is_file():
with open(rocm_ver_file, 'r') as file:
ROCM_VERSION_DEV_RAW = file.read()
elif "rocm" in torch.__version__:
ROCM_VERSION_DEV_RAW = torch.__version__.split("rocm")[1]
else:
assert False, "Could not detect ROCm version"
assert ROCM_VERSION_DEV_RAW != "", "Could not detect ROCm version"
ROCM_MAJOR = ROCM_VERSION_DEV_RAW.split('.')[0]
ROCM_MINOR = ROCM_VERSION_DEV_RAW.split('.')[1]
OpBuilder._rocm_version = (int(ROCM_MAJOR), int(ROCM_MINOR))
return OpBuilder._rocm_version
def include_paths(self):
'''
Returns list of include paths, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed)
'''
return []
def nvcc_args(self):
'''
Returns optional list of compiler flags to forward to nvcc when building CUDA sources
'''
return []
def cxx_args(self):
'''
Returns optional list of compiler flags to forward to the build
'''
return []
def is_compatible(self, verbose=True):
'''
Check if all non-python dependencies are satisfied to build this op
'''
return True
def extra_ldflags(self):
return []
def libraries_installed(self, libraries):
valid = False
check_cmd = 'dpkg -l'
for lib in libraries:
result = subprocess.Popen(f'dpkg -l {lib}', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
valid = valid or result.wait() == 0
return valid
def has_function(self, funcname, libraries, verbose=False):
'''
Test for existence of a function within a tuple of libraries.
This is used as a smoke test to check whether a certain library is available.
As a test, this creates a simple C program that calls the specified function,
and then distutils is used to compile that program and link it with the specified libraries.
Returns True if both the compile and link are successful, False otherwise.
'''
tempdir = None # we create a temporary directory to hold various files
filestderr = None # handle to open file to which we redirect stderr
oldstderr = None # file descriptor for stderr
try:
# Echo compile and link commands that are used.
if verbose:
distutils.log.set_verbosity(1)
# Create a compiler object.
compiler = distutils.ccompiler.new_compiler(verbose=verbose)
# Configure compiler and linker to build according to Python install.
distutils.sysconfig.customize_compiler(compiler)
# Create a temporary directory to hold test files.
tempdir = tempfile.mkdtemp()
# Define a simple C program that calls the function in question
prog = "void %s(void); int main(int argc, char** argv) { %s(); return 0; }" % (funcname, funcname)
# Write the test program to a file.
filename = os.path.join(tempdir, 'test.c')
with open(filename, 'w') as f:
f.write(prog)
# Redirect stderr file descriptor to a file to silence compile/link warnings.
if not verbose:
filestderr = open(os.path.join(tempdir, 'stderr.txt'), 'w')
oldstderr = os.dup(sys.stderr.fileno())
os.dup2(filestderr.fileno(), sys.stderr.fileno())
# Workaround for behavior in distutils.ccompiler.CCompiler.object_filenames()
# Otherwise, a local directory will be used instead of tempdir
drive, driveless_filename = os.path.splitdrive(filename)
root_dir = driveless_filename[0] if os.path.isabs(driveless_filename) else ''
output_dir = os.path.join(drive, root_dir)
# Attempt to compile the C program into an object file.
cflags = shlex.split(os.environ.get('CFLAGS', ""))
objs = compiler.compile([filename], output_dir=output_dir, extra_preargs=self.strip_empty_entries(cflags))
# Attempt to link the object file into an executable.
# Be sure to tack on any libraries that have been specified.
ldflags = shlex.split(os.environ.get('LDFLAGS', ""))
compiler.link_executable(objs,
os.path.join(tempdir, 'a.out'),
extra_preargs=self.strip_empty_entries(ldflags),
libraries=libraries)
# Compile and link succeeded
return True
except CompileError:
return False
except LinkError:
return False
except:
return False
finally:
# Restore stderr file descriptor and close the stderr redirect file.
if oldstderr is not None:
os.dup2(oldstderr, sys.stderr.fileno())
if filestderr is not None:
filestderr.close()
# Delete the temporary directory holding the test program and stderr files.
if tempdir is not None:
shutil.rmtree(tempdir)
def strip_empty_entries(self, args):
'''
Drop any empty strings from the list of compile and link flags
'''
return [x for x in args if len(x) > 0]
def cpu_arch(self):
try:
from cpuinfo import get_cpu_info
except ImportError as e:
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return "-march=native"
try:
cpu_info = get_cpu_info()
except Exception as e:
self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
"falling back to `lscpu` to get this information.")
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return "-march=native"
if cpu_info['arch'].startswith('PPC_'):
# gcc does not provide -march on PowerPC, use -mcpu instead
return '-mcpu=native'
return '-march=native'
def is_cuda_enable(self):
try:
assert_no_cuda_mismatch(self.name)
return '-D__ENABLE_CUDA__'
except BaseException:
print(f"{WARNING} {self.name} cuda is missing or is incompatible with installed torch, "
"only cpu ops can be compiled!")
return '-D__DISABLE_CUDA__'
return '-D__DISABLE_CUDA__'
def _backup_cpuinfo(self):
# Construct cpu_info dict from lscpu that is similar to what py-cpuinfo provides
if not self.command_exists('lscpu'):
self.warning(f"{self.name} attempted to query 'lscpu' after failing to use py-cpuinfo "
"to detect the CPU architecture. 'lscpu' does not appear to exist on "
"your system, will fall back to use -march=native and non-vectorized execution.")
return None
result = subprocess.check_output('lscpu', shell=True)
result = result.decode('utf-8').strip().lower()
cpu_info = {}
cpu_info['arch'] = None
cpu_info['flags'] = ""
if 'genuineintel' in result or 'authenticamd' in result:
cpu_info['arch'] = 'X86_64'
if 'avx512' in result:
cpu_info['flags'] += 'avx512,'
elif 'avx512f' in result:
cpu_info['flags'] += 'avx512f,'
if 'avx2' in result:
cpu_info['flags'] += 'avx2'
elif 'ppc64le' in result:
cpu_info['arch'] = "PPC_"
return cpu_info
def simd_width(self):
try:
from cpuinfo import get_cpu_info
except ImportError as e:
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return '-D__SCALAR__'
try:
cpu_info = get_cpu_info()
except Exception as e:
self.warning(f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), "
"falling back to `lscpu` to get this information.")
cpu_info = self._backup_cpuinfo()
if cpu_info is None:
return '-D__SCALAR__'
if cpu_info['arch'] == 'X86_64':
if 'avx512' in cpu_info['flags'] or 'avx512f' in cpu_info['flags']:
return '-D__AVX512__'
elif 'avx2' in cpu_info['flags']:
return '-D__AVX256__'
return '-D__SCALAR__'
def command_exists(self, cmd):
if '|' in cmd:
cmds = cmd.split("|")
else:
cmds = [cmd]
valid = False
for cmd in cmds:
result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)
valid = valid or result.wait() == 0
if not valid and len(cmds) > 1:
print(f"{WARNING} {self.name} requires one of the following commands '{cmds}', but it does not exist!")
elif not valid and len(cmds) == 1:
print(f"{WARNING} {self.name} requires the '{cmd}' command, but it does not exist!")
return valid
def warning(self, msg):
self.error_log = f"{msg}"
print(f"{WARNING} {msg}")
def deepspeed_src_path(self, code_path):
if os.path.isabs(code_path):
return code_path
else:
return os.path.join(Path(__file__).parent.parent.absolute(), code_path)
def builder(self):
from torch.utils.cpp_extension import CppExtension
return CppExtension(name=self.absolute_name(),
sources=self.strip_empty_entries(self.sources()),
include_dirs=self.strip_empty_entries(self.include_paths()),
extra_compile_args={'cxx': self.strip_empty_entries(self.cxx_args())},
extra_link_args=self.strip_empty_entries(self.extra_ldflags()))
def load(self, verbose=True):
from deepspeed.git_version_info import installed_ops, torch_info
if installed_ops[self.name]:
# Ensure the op we're about to load was compiled with the same
# torch/cuda versions we are currently using at runtime.
self.validate_torch_version(torch_info)
if torch.cuda.is_available() and isinstance(self, CUDAOpBuilder):
self.validate_torch_op_version(torch_info)
return importlib.import_module(self.absolute_name())
else:
return self.jit_load(verbose)
def jit_load(self, verbose=True):
if not self.is_compatible(verbose):
raise RuntimeError(
f"Unable to JIT load the {self.name} op due to it not being compatible due to hardware/software issue. {self.error_log}"
)
try:
import ninja # noqa: F401
except ImportError:
raise RuntimeError(f"Unable to JIT load the {self.name} op due to ninja not being installed.")
if isinstance(self, CUDAOpBuilder) and not self.is_rocm_pytorch():
try:
assert_no_cuda_mismatch(self.name)
self.build_for_cpu = False
except BaseException:
self.build_for_cpu = True
self.jit_mode = True
from torch.utils.cpp_extension import load
start_build = time.time()
sources = [self.deepspeed_src_path(path) for path in self.sources()]
extra_include_paths = [self.deepspeed_src_path(path) for path in self.include_paths()]
# Torch will try and apply whatever CCs are in the arch list at compile time,
# we have already set the intended targets ourselves we know that will be
# needed at runtime. This prevents CC collisions such as multiple __half
# implementations. Stash arch list to reset after build.
torch_arch_list = None
if "TORCH_CUDA_ARCH_LIST" in os.environ:
torch_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST")
os.environ["TORCH_CUDA_ARCH_LIST"] = ""
op_module = load(name=self.name,
sources=self.strip_empty_entries(sources),
extra_include_paths=self.strip_empty_entries(extra_include_paths),
extra_cflags=self.strip_empty_entries(self.cxx_args()),
extra_cuda_cflags=self.strip_empty_entries(self.nvcc_args()),
extra_ldflags=self.strip_empty_entries(self.extra_ldflags()),
verbose=verbose)
build_duration = time.time() - start_build
if verbose:
print(f"Time to load {self.name} op: {build_duration} seconds")
# Reset arch list so we are not silently removing it for other possible use cases
if torch_arch_list:
os.environ["TORCH_CUDA_ARCH_LIST"] = torch_arch_list
return op_module
class CUDAOpBuilder(OpBuilder):
def compute_capability_args(self, cross_compile_archs=None):
"""
Returns nvcc compute capability compile flags.
1. `TORCH_CUDA_ARCH_LIST` takes priority over `cross_compile_archs`.
2. If neither is set default compute capabilities will be used
3. Under `jit_mode` compute capabilities of all visible cards will be used plus PTX
Format:
- `TORCH_CUDA_ARCH_LIST` may use ; or whitespace separators. Examples:
TORCH_CUDA_ARCH_LIST="6.1;7.5;8.6" pip install ...
TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6+PTX" pip install ...
- `cross_compile_archs` uses ; separator.
"""
ccs = []
if self.jit_mode:
# Compile for underlying architectures since we know those at runtime
for i in range(torch.cuda.device_count()):
CC_MAJOR, CC_MINOR = torch.cuda.get_device_capability(i)
cc = f"{CC_MAJOR}.{CC_MINOR}"
if cc not in ccs:
ccs.append(cc)
ccs = sorted(ccs)
ccs[-1] += '+PTX'
else:
# Cross-compile mode, compile for various architectures
# env override takes priority
cross_compile_archs_env = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
if cross_compile_archs_env is not None:
if cross_compile_archs is not None:
print(
f"{WARNING} env var `TORCH_CUDA_ARCH_LIST={cross_compile_archs_env}` overrides `cross_compile_archs={cross_compile_archs}`"
)
cross_compile_archs = cross_compile_archs_env.replace(' ', ';')
else:
if cross_compile_archs is None:
cross_compile_archs = get_default_compute_capabilities()
ccs = cross_compile_archs.split(';')
ccs = self.filter_ccs(ccs)
if len(ccs) == 0:
raise RuntimeError(
f"Unable to load {self.name} op due to no compute capabilities remaining after filtering")
args = []
for cc in ccs:
num = cc[0] + cc[2]
args.append(f'-gencode=arch=compute_{num},code=sm_{num}')
if cc.endswith('+PTX'):
args.append(f'-gencode=arch=compute_{num},code=compute_{num}')
return args
def filter_ccs(self, ccs: List[str]):
"""
Prune any compute capabilities that are not compatible with the builder. Should log
which CCs have been pruned.
"""
return ccs
def version_dependent_macros(self):
# Fix from apex that might be relevant for us as well, related to https://github.com/NVIDIA/apex/issues/456
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
return version_ge_1_1 + version_ge_1_3 + version_ge_1_5
def is_compatible(self, verbose=True):
return super().is_compatible(verbose)
def builder(self):
try:
assert_no_cuda_mismatch(self.name)
self.build_for_cpu = False
except BaseException:
self.build_for_cpu = True
if self.build_for_cpu:
from torch.utils.cpp_extension import CppExtension as ExtensionBuilder
else:
from torch.utils.cpp_extension import CUDAExtension as ExtensionBuilder
compile_args = {'cxx': self.strip_empty_entries(self.cxx_args())} if self.build_for_cpu else \
{'cxx': self.strip_empty_entries(self.cxx_args()), \
'nvcc': self.strip_empty_entries(self.nvcc_args())}
cuda_ext = ExtensionBuilder(name=self.absolute_name(),
sources=self.strip_empty_entries(self.sources()),
include_dirs=self.strip_empty_entries(self.include_paths()),
libraries=self.strip_empty_entries(self.libraries_args()),
extra_compile_args=compile_args)
if self.is_rocm_pytorch():
# hip converts paths to absolute, this converts back to relative
sources = cuda_ext.sources
curr_file = Path(__file__).parent.parent # ds root
for i in range(len(sources)):
src = Path(sources[i])
if src.is_absolute():
sources[i] = str(src.relative_to(curr_file))
else:
sources[i] = str(src)
cuda_ext.sources = sources
return cuda_ext
def hipify_extension(self):
if self.is_rocm_pytorch():
from torch.utils.hipify import hipify_python
hipify_python.hipify(
project_directory=os.getcwd(),
output_directory=os.getcwd(),
header_include_dirs=self.include_paths(),
includes=[os.path.join(os.getcwd(), '*')],
extra_files=[os.path.abspath(s) for s in self.sources()],
show_detailed=True,
is_pytorch_extension=True,
hipify_extra_files_only=True,
)
def cxx_args(self):
if sys.platform == "win32":
return ['-O2']
else:
return ['-O3', '-std=c++14', '-g', '-Wno-reorder']
def nvcc_args(self):
if self.build_for_cpu:
return []
args = ['-O3']
if self.is_rocm_pytorch():
ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version()
args += [
'-std=c++14', '-U__HIP_NO_HALF_OPERATORS__', '-U__HIP_NO_HALF_CONVERSIONS__',
'-U__HIP_NO_HALF2_OPERATORS__',
'-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR,
'-DROCM_VERSION_MINOR=%s' % ROCM_MINOR
]
else:
cuda_major, _ = installed_cuda_version()
args += [
'-allow-unsupported-compiler' if sys.platform == "win32" else '', '--use_fast_math',
'-std=c++17' if sys.platform == "win32" and cuda_major > 10 else '-std=c++14',
'-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', '-U__CUDA_NO_HALF2_OPERATORS__'
]
if os.environ.get('DS_DEBUG_CUDA_BUILD', '0') == '1':
args.append('--ptxas-options=-v')
args += self.compute_capability_args()
return args
def libraries_args(self):
if self.build_for_cpu:
return []
if sys.platform == "win32":
return ['cublas', 'curand']
else:
return []
class TorchCPUOpBuilder(CUDAOpBuilder):
def extra_ldflags(self):
if self.build_for_cpu:
return ['-fopenmp']
if not self.is_rocm_pytorch():
return ['-lcurand']
return []
def cxx_args(self):
import torch
args = []
if not self.build_for_cpu:
if not self.is_rocm_pytorch():
CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, "lib64")
else:
CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.ROCM_HOME, "lib")
args += super().cxx_args()
args += [
f'-L{CUDA_LIB64}',
'-lcudart',
'-lcublas',
'-g',
]
CPU_ARCH = self.cpu_arch()
SIMD_WIDTH = self.simd_width()
CUDA_ENABLE = self.is_cuda_enable()
args += [
CPU_ARCH,
'-fopenmp',
SIMD_WIDTH,
CUDA_ENABLE,
]
return args | Adeepspeed | /Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/op_builder/builder.py | builder.py |
## Adept
### Installation
`pip install git+ssh://[email protected]/fictivekin/adept-python.git`
### Using the package
Import the Adept class from the package, pass through your account credentials
```
from adept import Adept
a = Adept(
accound_id='9b5dd41deff8ae7e3767fc6566cb25ff3ca66438',
account_key='251692b3899f1e30fe4b7037185488aad37c46f8',
cloudfront_hostname='some.cloudfront.host.com'
)
```
You can then generate a URL for given image operations on an S3 asset (and specified bucket) or image URL.
An example using an S3 asset:
```
operations = ['maxwidth-400', 'cropcenter-400x300']
a.generate_url(
bucket='gimmebar-assets',
asset_key='526fc2761c899.jpg',
operations=operations,
)
```
| Adept | /Adept-0.0.2.tar.gz/Adept-0.0.2/README.md | README.md |
import hashlib
import hmac
from urllib import quote as urlquote
from .errors import OperationError, AccountError
class Adept(object):
def __init__(self, account_id, account_key, cloudfront_hostname, default_bucket=None):
if account_id is not None:
self.account_id = account_id
else:
raise AccountError('Please provide a valid account ID')
if account_key is not None:
self.account_key = account_key
else:
raise AccountError('Please provide a valid account key')
if cloudfront_hostname is not None:
self.cloudfront_hostname = cloudfront_hostname
else:
raise AccountError('Please provide a valid CloudFront hostname')
if default_bucket is not None:
self.default_bucket = default_bucket
def _generate_hash(self, path):
request_hash = hmac.new(
self.account_key,
path,
hashlib.sha1
)
return request_hash.hexdigest()
def generate_url(self, bucket=None, asset_key=None, asset_url=None, operations=['identity'], secure=True):
"""
Given an S3 key and bucket/URL, perform given image operations on an image and return the URL.
"""
if len(operations) < 1:
raise OperationError('You didn\'t provide any operations to perform on the image')
protocol = 'https://' if secure else 'http://'
base_url = '%s%s' % (protocol, self.cloudfront_hostname)
if asset_key is not None:
if bucket is None:
if self.default_bucket is None:
raise OperationError('No S3 bucket has been provided.')
else:
bucket = self.default_bucket
path = '/%s/%s/%s/%s' % (
bucket,
asset_key,
'/'.join(operations),
self.account_id,
)
request_hash = self._generate_hash(path)
adept_url = ('%s%s/%s' % (
base_url,
path,
request_hash)
)
elif asset_url is not None:
path = '/loader/%s/%s' % (
'/'.join(operations),
self.account_id
)
loader_uri = '%s?url=%s' % (path, asset_url)
request_hash = self._generate_hash(loader_uri)
adept_url = '%s%s/%s?url=%s' % (
base_url,
path,
request_hash,
urlquote(asset_url)
)
else:
raise OperationError("Asset key or URL must be provided")
return adept_url | Adept | /Adept-0.0.2.tar.gz/Adept-0.0.2/adept/__init__.py | __init__.py |
### MIT license
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/LICENSE.md | LICENSE.md |
Adhesion
========
*Adhesion.* This code implements adhesive interaction for [ContactMechanics](https://https://github.com/ComputationalMechanics/ContactMechanics).
The methods that are implemented in this code are described in the following papers:
- [Pastewka, Robbins, PNAS 111, 3298 (2014)](https://doi.org/10.1073/pnas.1320846111)
Build status
------------
The following badge should say _build passing_. This means that all automated tests completed successfully for the master branch.
Tests: [](https://github.com/ContactEngineering/Adhesion/actions/workflows/tests.yml)
Building documentation: [](https://github.com/ContactEngineering/Adhesion/actions/workflows/publish.yml)
Installation
------------
You need Python 3 and [FFTW3](http://www.fftw.org/) to run Adhesion. All Python dependencies can be installed automatically by invoking
#### Installation directly with pip
```bash
# install Adhesion
pip install Adhesion
```
The last command will install other dependencies including
[muFFT](https://gitlab.com/muspectre/muspectre.git),
[NuMPI](https://github.com/IMTEK-Simulation/NuMPI.git) and [runtests](https://github.com/bccp/runtests.git)
Note: sometimes [muFFT](https://gitlab.com/muspectre/muspectre.git) will not find the FFTW3 installation you expect.
You can specify the directory where you installed [FFTW3](http://www.fftw.org/)
by setting the environment variable `FFTWDIR` (e.g. to `$USER/.local`)
#### Installation from source directory
If you cloned the repository. You can install the dependencies with
```
pip install [--user] numpy
pip install [--user] pylint
pip install [--user] cython
pip install [--user] mpi4py #optional
pip3 install [--user] -r requirements.txt
```
in the source directory. Adhesion can be installed by invoking
```pip3 install [--user] .```
in the source directoy. The command line parameter --user is optional and leads to a local installation in the current user's `$HOME/.local` directory.
#### Installation problems with lapack and openblas
`bicubic.cpp` is linked with `lapack`, that is already available as a dependency of `numpy`.
If during build, `setup.py` fails to link to one of the lapack implementations
provided by numpy, as was experienced for mac, try providing following environment variables:
```bash
export LDFLAGS="-L/usr/local/opt/openblas/lib $LDFLAGS"
export CPPFLAGS="-I/usr/local/opt/openblas/include $CPPFLAGS"
export PKG_CONFIG_PATH="/usr/local/opt/openblas/lib/pkgconfig:$PKG_CONFIG_PATH"
export LDFLAGS="-L/usr/local/opt/lapack/lib $LDFLAGS"
export CPPFLAGS="-I/usr/local/opt/lapack/include $CPPFLAGS"
export PKG_CONFIG_PATH="/usr/local/opt/lapack/lib/pkgconfig:$PKG_CONFIG_PATH"
```
where the paths have probably to be adapted to your particular installation method
(here it was an extra homebrew installation).
Updating Adhesion
-------------
If you update Adhesion (whether with pip or `git pull` if you cloned the repository),
you may need to uninstall `NuMPI`, `muSpectre` and or `runtests`, so that the
newest version of them will be installed.
Testing
-------
To run the automated tests, go to the main source directory and execute the following:
```
pytest
```
Tests that are parallelizable have to run with [runtests](https://github.com/AntoineSIMTEK/runtests)
```
python run-tests.py
```
You can choose the number of processors with the option `--mpirun="mpirun -np 4"`. For development purposes you can go beyond the number of processors of your computer using `--mpirun="mpirun -np 10 --oversubscribe"`
Other usefull flags:
- `--xterm`: one window per processor
- `--xterm --pdb`: debugging
Development
-----------
To use the code without installing it, e.g. for development purposes, use the `env.sh` script to set the environment:
```source /path/to/Adhesion/env.sh [python3]```
Note that the parameter to `env.sh` specifies the Python interpreter for which the environment is set up. Adhesion contains portions that need to be compiled, make sure to run
```python setup.py build```
whenever any of the Cython (.pyx) sources are modified.
Please read [CONTRIBUTING](CONTRIBUTING.md) if you plan to contribute to this code.
Usage
-----
The code is documented via Python's documentation strings that can be accesses via the `help` command or by appending a questions mark `?` in ipython/jupyter. There are two command line tools available that may be a good starting point. They are in the `commandline` subdirectory:
- `soft_wall.py`: Command line front end for calculations with soft (possibly adhesive) interactions between rigid and elastic flat. This is a stub rather than a fully featured command line tool that can be used as a starting point for modified script. The present implementation is set up for a solution of Martin MΓΌser's contact mechanics challenge.
Compiling the documentation
---------------------------
- After changes to the Adhesion source, you have to build again: ```python setup.py build```
- Navigate into the docs folder: ```cd docs/```
- Automatically generate reStructuredText files from the source: ```sphinx-apidoc -o source/ ../Adhesion```
Do just once, or if you have added/removed classes or methods. In case of the latter, be sure to remove the previous source before: ```rm -rf source/```
- Build html files: ```make html```
- The resulting html files can be found in the ```Adhesion/docs/_build/html/``` folder. Root is ```Adhesion/docs/_build/html/index.html```.
For convenience, all these steps are implemented in `compile_doc.sh`.
Funding
-------
Development of this project is funded by the [European Research Council](https://erc.europa.eu) within [Starting Grant 757343](https://cordis.europa.eu/project/id/757343) and by the [Deutsche Forschungsgemeinschaft](https://www.dfg.de/en) within projects [PA 2023/2](https://gepris.dfg.de/gepris/projekt/258153560) and [EXC 2193](https://gepris.dfg.de/gepris/projekt/390951807).
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/README.md | README.md |
Contributing to PyCo
====================
Code style
----------
Always follow [PEP-8](https://www.python.org/dev/peps/pep-0008/), with the following exception: "One big exception to PEP 8 is our preference of longer line lengths. Weβre well into the 21st Century, and we have high-resolutiob computer screens that can fit way more than 79 characters on a screen. Donβt limit lines of code to 79 characters if it means the code looks significantly uglier or is harder to read." (Taken from [Django's contribuing guidelines](https://docs.djangoproject.com/en/dev/internals/contributing/writing-code/coding-style/).)
Development branches
--------------------
New features should be developed always in its own branch. When creating your own branch,
please suffix that branch by the year of creation on a description of what is contains.
For example, if you are working on an implementation for line scans and you started that
work in 2018, the branch could be called "18_line_scans".
Commits
-------
Prepend you commits with a shortcut indicating the type of changes they contain:
* BUG: Bug fix
* CI: Changes to the CI configuration
* DOC: Changes to documentation strings or documentation in general (not only typos)
* ENH: Enhancement (e.g. a new feature)
* MAINT: Maintenance (e.g. fixing a typo)
* TST: Changes to the unit test environment
* WIP: Work in progress
* API: changes to the user exposed API
The changelog will be based on the content of the commits with tag BUG, API and ENH.
Examples:
- If your are working on a new feature, use ENH on the commit making the feature ready. Before use the WIP tag.
- use TST when your changes only deal with the testing environment. If you fix a bug and implement the test for it, use BUG.
- minor changes that doesn't change the codes behaviour (for example rewrite file in a cleaner or slightly efficienter way) belong to the tag MAINT
- if you change documentation files without changing the code, use DOC; if you also change code in the same commit, use another shortcut
Authors
-------
Add yourself to the AUTHORS file using the email address that you are using for your
commits. We use this information to automatically generate copyright statements for
all files from the commit log.
Writing tests
-------------
Older tests are written using the `unittest` syntax. We now use `pytest` (that
understands almost all unittest syntax), because it is compatible with the
parallel test runner [runtests](https://github.com/AntoineSIMTEK/runtests).
If a whole test file should only be run in serial
and/or is incompatible with `runtests` (`unittest`), include following line:
```python
pytestmark = pytest.mark.skipif(MPI.COMM_WORLD.Get_size()> 1,
reason="tests only serial funcionalities, please execute with pytest")
```
The file will executed in a run with `pytest` and not with a (parallel) run with
`python3 run-tests.py`
#### MPI Tests
In order to vary the number of processors used in the tests, you should always
explictely use the communicator defined as fixture in `tests/conftest.py` instead
of `MPI.COMM_WORLD`.
```python
def test_parallel(comm):
substrate = PeriodicFFTElasticHalfSpace(...., commincator=comm)
# Take care not to let your functions use their default value
# for the communicator !
```
Note: a single test function that should be run only with one processor:
```python
def test_parallel(comm_serial):
pass
```
### Debug plots in the tests
Often when you develop your test you need to plot and print things to see what
happens. It is a good idea to let the plots ready for use:
```python
if False:
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plt.colorbar(ax.pcolormesh(- system.substrate.force), label="pressure")
plt.show(block=True)
```
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/CONTRIBUTING.md | CONTRIBUTING.md |
import time
import scipy.optimize
starttime=time.time()
import numpy as np
from ContactMechanics import FreeFFTElasticHalfSpace
from SurfaceTopography import make_sphere
from FFTEngine import PFFTEngine
from NuMPI.Optimization import LBFGS
from NuMPI.Tools.Reduction import Reduction
from Adhesion import VDW82smoothMin
from System import SmoothContactSystem
from NuMPI import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
pnp = Reduction(comm=comm)
class iter_inspector():
def __init__(self):
self.neval = 0
self.energies = []
self.maxgradients =[]
def __call__(self, system):
self.neval += 1
self.energies.append(system.energy)
self.maxgradients.append(pnp.max(abs(system.force)))
class decorated_objective:
def __init__(self, system, objective):
self.system = system
self.objective = objective
self.neval = 0
self.energies = []
self.maxgradients =[]
def __call__(self, *args, **kwargs):
val = self.objective(*args, **kwargs)
self.neval += 1
self.energies.append(system.energy)
self.maxgradients.append(pnp.max(abs(system.force)))
return val
import matplotlib.pyplot as plt
fig, (axt, axit) = plt.subplots(2, 1, sharex=True)
ns = [128,256, 512]
nrepetition = 2
for method, name in zip([LBFGS,"L-BFGS-B"],
["NuMPI", "Scipy"]):
times = np.zeros((len(ns), nrepetition))
nits = np.zeros((len(ns), nrepetition))
nevals =np.zeros((len(ns), nrepetition))
for i, n in enumerate(ns):
# sphere radius:
r_s = 10.0
# contact radius
r_c = .2
# peak pressure
p_0 = 2.5
# equivalent Young's modulus
E_s = 102.#102.
# work of adhesion
w = 1.0
# tolerance for optimizer
tol = 1e-12
# tolerance for contact area
gap_tol = 1e-6
nx, ny = n, n
sx = 21.0
z0 = 0.05 # needed to get small tolerance, but very very slow
fftengine = PFFTEngine((2*nx, 2*ny), comm=comm)
# the "Min" part of the potential (linear for small z) is needed for the LBFGS without bounds
inter = VDW82smoothMin(w * z0 ** 8 / 3, 16 * np.pi * w * z0 ** 2, gamma=w, pnp = pnp)
# Parallel SurfaceTopography Patch
substrate = FreeFFTElasticHalfSpace((nx,ny), young=E_s, physical_sizes=(sx, sx), fft=fftengine, pnp=pnp)
#print(substrate._comp_nb_grid_pts)
#print(fftengine.nb_domain_grid_pts)
surface = make_sphere(radius=r_s, nb_grid_pts=(nx, ny), physical_sizes=(sx, sx),
subdomain_locations=substrate.topography_subdomain_locations,
nb_subdomain_grid_pts=substrate.topography_nb_subdomain_grid_pts,
pnp=pnp,
standoff=float('inf'))
ext_surface = make_sphere(r_s, (2 * nx, 2 * ny), (2 * sx, 2 * sx),
centre=(sx / 2, sx / 2),
subdomain_locations=substrate.subdomain_locations,
nb_subdomain_grid_pts=substrate.nb_subdomain_grid_pts,
pnp=pnp,
standoff=float('inf'))
system = SmoothContactSystem(substrate, inter, surface)
penetration = 0
disp0 = ext_surface.heights() + penetration
disp0 = np.where(disp0 > 0, disp0, 0)
#disp0 = system.shape_minimisation_input(disp0)
maxcor = 10
for j in range(nrepetition):
starttime =time.time()
counter = iter_inspector()
objective_monitor = decorated_objective(system, system.objective(penetration, gradient=True))
result = scipy.optimize.minimize(objective_monitor,
disp0, method=method, jac=True,
options=dict(gtol=1e-6 * abs(w/z0),
ftol=1e-25,
maxcor=maxcor))
nevals[i,j]= objective_monitor.neval
times[i,j] = time.time() - starttime
nits[i,j] = result.nit
print(method)
print(result.message)
print("nevals: {}".format(objective_monitor.neval))
print(result.nit)
print(times[i,j])
converged = result.success
assert converged
axt.plot(ns, np.mean(times, axis=1), "o",label="{}".format(name))
l, =axit.plot(ns, np.mean(nits, axis=1), "o",label="{}, nits".format(name))
axit.plot(ns, np.mean(nevals, axis=1), "+",c = l.get_color(), label="{}, nfeval".format(name))
axit.set_xlabel("lateral nb_grid_pts (-)")
axt.set_ylabel("execution time (s)")
axit.set_ylabel("# of iterations")
axit.legend(fancybox=True, framealpha=0.5)
axt.legend(fancybox=True, framealpha=0.5)
fig.savefig("LBFGS_scipy_vs_NuMPI.png") | Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/LBFGS_scipy_vsNuMPI.py | LBFGS_scipy_vsNuMPI.py |
from sympy import Symbol, pprint
import sympy
from copy import deepcopy
# free variable
dr = Symbol('Ξr')
# spline parameters
c0 = Symbol('C0', real=True)
c1 = Symbol('C1', real=True)
c2 = Symbol('C2', real=True)
c3 = Symbol('C3', real=True)
c4 = Symbol('C4', real=True)
dr_c = Symbol('Ξrc', real=True)
dr_t = Symbol('Ξrt', real=True)
dr_m = Symbol('Ξrm', real=True)
dgam = Symbol('(ΞΞ³-Ξ³)', negative=True)
# boundary parameters
gam = Symbol('Ξ³', positive=True)
dV_t = Symbol('dV_t', real=True)
ddV_t = Symbol('ddV_t', real=True)
ddV_m = Symbol('ddV_M', real=True)
fun = c0 - c1*dr - c2/2*dr**2 - c3/3*dr**3 - c4/4*dr**4
dfun = sympy.diff(fun, dr)
ddfun = sympy.diff(dfun, dr)
print("Spline:")
pprint(fun)
print("Slope:")
pprint(dfun)
print("Curvature:")
pprint(ddfun)
# boundary conditions (satisfied if equal to zero)
bnds = dict()
bnds[1] = dfun.subs(dr, dr_t) - dV_t
bnds[2] = ddfun.subs(dr, dr_t) - ddV_t
bnds[3] = fun.subs(dr, dr_c)
bnds[4] = dfun.subs(dr, dr_c)
bnds[5] = ddfun.subs(dr, dr_c)
stored_bnds = deepcopy(bnds)
def pbnd(boundaries):
print("\n")
for key, bnd in boundaries.items():
print("Boundary condition {}:".format(key))
pprint(bnd)
# assuming the origin for Ξr is at the cutoff (Ξrc):
bnds[3] = bnds[3].subs(dr_c, 0.) # everything is zero at r_cut
bnds[4] = bnds[4].subs(dr_c, 0.) # everything is zero at r_cut
bnds[5] = bnds[5].subs(dr_c, 0.) # everything is zero at r_cut
print()
print('#####################################')
print("For r_t <= r_min:")
# all at once?
coeff_sol = sympy.solve(bnds.values(), [c0, c1, c2, c3, c4])
print('\nCoefficients')
pprint(coeff_sol)
# solving for Ξrt
print('substituted polynomial')
polynomial = fun.subs(coeff_sol)
pprint(polynomial)
# Ξrm is a minimum (actually, the global minimum), the one not at zero
dpolynomial = sympy.diff(polynomial, dr)
print("substituted polynomial's derivative:")
pprint(dpolynomial)
sols = sympy.solve(dpolynomial, dr)
for sol in sols:
if sol != 0:
sol_drm = sol
print("\nsolution for Ξr_m")
pprint(sol_drm)
# Ξ³-condition:
Ξ³fun = sympy.simplify(polynomial.subs(dr, sol_drm) + gam)
print('\nΞ³-condition is not solvable analytically.')
print('objective function:')
pprint(Ξ³fun)
dΞ³fun = sympy.simplify(sympy.diff(Ξ³fun, dr_t))
print('\nobjective function derivative:')
pprint(dΞ³fun)
# not solvable in sympy, but good initial guess for optimisation can be
# obtained for case where r_min = r_t (the default case)
guess_Ξ³fun = Ξ³fun.subs({"dV_t": 0, "ddV_t": ddV_m})
guess_sol = sympy.solve(guess_Ξ³fun, dr_t)[0]
print('\ninitial guess: note that you need to evaluate the curvature at r_min '
'for the square root te be guaranteed to be real (i.e, than the '
'curvature and Ξ³ have the same sign')
pprint(guess_sol)
print()
print("for usage in code:")
print("\nCoefficients: ", [coeff_sol[c0], coeff_sol[c1], coeff_sol[c2], coeff_sol[c3], coeff_sol[c4]])
print("\nobjective_fun: ", Ξ³fun)
print("\nobjective_derivative: ", dΞ³fun)
print("\ninitial guess for Ξr_t: ", guess_sol)
print("\nsol for Ξr_m: ", sol_drm)
print()
print('#####################################')
print("For r_t > r_min:")
bnds[6] = fun.subs(dr, dr_t) - dgam
# all at once is a mess, better to split the solution:
coeff_sol = sympy.solve(list(bnds.values())[:-1], [c0, c1, c2, c3, c4])
print('\nCoefficients')
pprint(coeff_sol)
print("Ξ³-Condition:")
pprint(bnds[6])
print("Ξ³-Condition, substituted:")
pprint(bnds[6].subs(coeff_sol))
bnd6_sol = sympy.solve(bnds[6].subs(coeff_sol), dr_t)
print("Ξ³-Condition, solved for Ξrt (first one is the correct one):")
pprint(bnd6_sol)
print("βΞ³/β(ddV_t) :")
dΞrt = sympy.diff(bnd6_sol[0], ddV_t)
pprint(dΞrt)
print("β(Ξrt)/β(ddV_t) at zero :")
dΞrt_0 = sympy.limit(dΞrt, ddV_t, 0)
pprint(dΞrt_0)
print("β(Ξrt)/β(ddV_t) at zero with substitution:")
a = Symbol('a', positive = True)
b = Symbol('b', positive = True)
subs = {dV_t: a/3, dgam:b/-12}
resubs = {a:3*dV_t, b:-12*dgam}
sub_ddrt = dΞrt.subs(subs)
subdΞrt_0 = sympy.limit(sub_ddrt, ddV_t, 0)
pprint(subdΞrt_0)
pprint(subdΞrt_0.subs(resubs))
# solving for Ξrt
print('substituted polynomial')
polynomial = sympy.simplify(fun.subs(coeff_sol))
pprint(polynomial)
print()
print('#####################################')
print("For r_t = r_infl:")
bnds[2] = ddfun.subs(dr, dr_t)
bnds[6] = fun.subs(dr, dr_t) - dgam
# all at once is a mess, better to split the solution:
coeff_sol = sympy.solve(list(bnds.values())[:-1], [c0, c1, c2, c3, c4])
print('\nCoefficients')
pprint(coeff_sol)
print("Ξ³-Condition:")
pprint(bnds[6])
print("Ξ³-Condition, substituted:")
pprint(bnds[6].subs(coeff_sol))
bnd6_sol = sympy.solve(bnds[6].subs(coeff_sol), dr_t)
print("Ξ³-Condition, solved for Ξrt:")
pprint(bnd6_sol)
# solving for Ξrt
print('substituted polynomial')
polynomial = sympy.simplify(fun.subs(coeff_sol))
pprint(polynomial) | Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/SplineHelper.py | SplineHelper.py |
import time
import os
import scipy.optimize
import numpy as np
from ContactMechanics import FreeFFTElasticHalfSpace
from SurfaceTopography import make_sphere
#TODO: update API use
from FFTEngine import PFFTEngine
from NuMPI.Optimization import LBFGS
from NuMPI.Tools.Reduction import Reduction
from Adhesion import VDW82smoothMin
from System import SmoothContactSystem
from NuMPI import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
pnp = Reduction(comm=comm)
class iter_inspector():
def __init__(self):
self.neval = 0
self.energies = []
self.maxgradients =[]
def __call__(self, system):
self.neval += 1
self.energies.append(system.energy)
self.maxgradients.append(pnp.max(system.force))
class decorated_objective:
def __init__(self, system, objective):
self.system = system
self.objective = objective
self.neval = 0
self.energies = []
self.maxgradients = []
def __call__(self, *args, **kwargs):
val = self.objective(*args, **kwargs)
self.neval += 1
self.energies.append(system.energy)
self.maxgradients.append(pnp.max(abs(system.force)))
return val
import matplotlib.pyplot as plt
fig, (axEn, axgrad) = plt.subplots(2,1, sharex = True)
n = 256
endx = []
for method, name in zip([LBFGS, "L-BFGS-B"],
["NuMPI", "Scipy"]):
# sphere radius:
r_s = 10.0
# contact radius
r_c = .2
# peak pressure
p_0 = 2.5
# equivalent Young's modulus
E_s = 102. # 102.
# work of adhesion
w = 1.0
# tolerance for optimizer
tol = 1e-12
# tolerance for contact area
gap_tol = 1e-6
nx, ny = n, n
sx = 21.0
z0 = 0.05 # needed to get small tolerance, but very very slow
fftengine = PFFTEngine((2 * nx, 2 * ny), comm=comm)
# the "Min" part of the potential (linear for small z) is needed for the LBFGS without bounds
inter = VDW82smoothMin(w * z0 ** 8 / 3, 16 * np.pi * w * z0 ** 2,
gamma=w, pnp=pnp)
# Parallel SurfaceTopography Patch
substrate = FreeFFTElasticHalfSpace((nx, ny), young=E_s, physical_sizes=(sx, sx),
fft=fftengine, pnp=pnp)
# print(substrate._comp_nb_grid_pts)
# print(fftengine.nb_domain_grid_pts)
surface = make_sphere(radius=r_s, nb_grid_pts=(nx, ny), physical_sizes=(sx, sx),
subdomain_locations=substrate.topography_subdomain_locations,
nb_subdomain_grid_pts=substrate.topography_nb_subdomain_grid_pts,
pnp=pnp,
standoff=float('inf'))
ext_surface = make_sphere(r_s, (2 * nx, 2 * ny), (2 * sx, 2 * sx),
centre=(sx / 2, sx / 2),
subdomain_locations=substrate.subdomain_locations,
nb_subdomain_grid_pts=substrate.nb_subdomain_grid_pts,
pnp=pnp,
standoff=float('inf'))
system = SmoothContactSystem(substrate, inter, surface)
penetration = 0
disp0 = ext_surface.heights() + penetration
disp0 = np.where(disp0 > 0, disp0, 0)
# disp0 = system.shape_minimisation_input(disp0)
maxcor = 20
starttime = time.time()
counter = iter_inspector()
objective_monitor = decorated_objective(system, system.objective(
penetration, gradient=True))
result = scipy.optimize.minimize(objective_monitor,
disp0, method=method, jac=True,
options=dict(
gtol=1e-6 * abs(w / z0),
ftol=1e-20,
maxcor=maxcor))
endx.append(result.x)
print(method)
print(result.message)
print("nevals: {}".format(objective_monitor.neval))
print(result.nit)
converged = result.success
assert converged
axgrad.plot(range(objective_monitor.neval), objective_monitor.maxgradients, label="{}".format(name))
axEn.plot(range(objective_monitor.neval), (objective_monitor.energies -
objective_monitor.energies[-1] )/ (objective_monitor.energies[0] - objective_monitor.energies[-1]),
label="{}".format(name))
print("max(|deformation {} - {} |)= {}".format("NuMPI", "Scipy", np.max(abs(endx[0].reshape(-1)-endx[1].reshape(-1)))))
print("max(|deformation Scipy|) = {}".format(np.max(abs(endx[-1]))))
axEn.set_xlabel("# of objective evaluations")
axEn.set_ylabel("E(i)-E(last) / (E(0)-E(last))")
axEn.set_yscale("log")
axgrad.set_yscale("log")
axgrad.set_ylabel(r"$|grad|_{\infty}$")
axgrad.legend()
for a in (axEn, axgrad):
a.set_xlabel("# of objective evaluations")
a.label_outer()
fig.suptitle("n={}".format(n))
fig.savefig("{}.png".format(os.path.basename(__file__))) | Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/LBFGS_convergence_smoothball_scipy_vs_NuMPI.py | LBFGS_convergence_smoothball_scipy_vs_NuMPI.py |
import numpy as np
import matplotlib.pyplot as plt
from Adhesion.System import make_system
from SurfaceTopography import make_sphere
from Adhesion.Interactions import VDW82SimpleSmooth as VdwPot
from ContactMechanics import FreeFFTElasticHalfSpace as Substrate
plt.ion()
E_silicon = 166e9
nu_silicon = .17
E_diamond = 1220e9
nu_diamond = .2
young = 1./((1-nu_silicon**2)/E_silicon+(1-nu_diamond**2)/E_diamond)
radius = 18e-9
base_size = 2*radius
size = (base_size, base_size)
c_sr = 2.1e-78*1e-6
hamaker = 68.1e-21
r_cut = 5e-10
pot = VdwPot(c_sr, hamaker).apply_cutoff(r_cut)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
for base_res in (32, 64, 128, 256, 512):
res = (base_res, base_res)
substrate = Substrate(res, young, size)
surface = make_sphere(radius, res, size, standoff=float('inf'))
system = make_system(substrate, pot, surface)
offset = pot.cutoff_radius * .4
step = pot.r_min*.01
pullof_forces = list()
offsets = list()
contact_area = list()
disp = np.zeros(substrate.nb_domain_grid_pts)
force = -1.
def iterator(initial_offset):
loc_offset = float(initial_offset)
yield loc_offset
while force <= 0.:
loc_offset -= step
yield loc_offset
for i in range(3):
loc_offset += step
yield loc_offset
#while force < 0.:
while offset < pot.cutoff_radius*.4:
loc_offset += step
yield loc_offset
ax1.set_ylabel("normal force")
ax2.set_ylabel("contact area", color='r')
line_force, = ax1.plot(offsets, pullof_forces,
label="nb_grid_pts = {}".format(res))
line_area, = ax2.plot(offsets, contact_area, color=line_force.get_color(),
linestyle='--')
ax1.legend(loc='center right')
marker, = ax1.plot((), (), marker='+', color='b', markersize=20)
for offset in iterator(offset):
result = system.minimize_proxy(offset, disp, deproxify_everytime=False)
disp = system.disp
force = system.babushka.compute_normal_force()
contact_area.append(system.babushka.compute_contact_area())
pullof_forces.append(force)
offsets.append(offset)
line_force.set_xdata(offsets)
line_force.set_ydata(pullof_forces)
marker.set_ydata((force,))
marker.set_xdata((offset,))
line_area.set_xdata(offsets)
line_area.set_ydata(contact_area)
ax1.relim()
ax1.autoscale_view()
ax2.relim()
ax2.autoscale_view()
fig.canvas.draw()
marker.set_ydata(())
marker.set_xdata(())
fig.savefig("fig_{:0>5}.png".format(res[0]))
plt.ioff()
plt.show() | Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/hysteresis_simple_smooth.py | hysteresis_simple_smooth.py |
import numpy as np
from ContactMechanics import FreeFFTElasticHalfSpace
from SurfaceTopography import make_sphere
from NuMPI.Optimization import LBFGS
from NuMPI.Tools.Reduction import Reduction
from Adhesion.Interactions import VDW82smoothMin
from Adhesion.System import SmoothContactSystem
from NuMPI.IO import save_npy
from NuMPI import MPI
import cProfile
def profile(filename=None, comm=MPI.COMM_WORLD):
def prof_decorator(f):
def wrap_f(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
result = f(*args, **kwargs)
pr.disable()
if filename is None:
pr.print_stats()
else:
filename_r = filename + ".{}".format(comm.rank)
pr.dump_stats(filename_r)
return result
return wrap_f
return prof_decorator
comm = MPI.COMM_WORLD
# sphere radius:
r_s = 10.0
# contact radius
r_c = .2
# peak pressure
p_0 = 2.5
# equivalent Young's modulus
E_s = 102. # 102.
# work of adhesion
w = 1.0
# tolerance for optimizer
tol = 1e-12
# tolerance for contact area
gap_tol = 1e-6
n = 512 * comm.Get_size()
nx, ny = n, n
sx = 21.0
z0 = 0.05 # needed to get small tolerance, but very very slow
pnp = Reduction(comm=comm)
# the "Min" part of the potential (linear for small z)
# is needed for the LBFGS without bounds
inter = VDW82smoothMin(w * z0 ** 8 / 3, 16 * np.pi * w * z0 ** 2, gamma=w,
communicator=comm)
# Parallel SurfaceTopography Patch
substrate = FreeFFTElasticHalfSpace((nx, ny), young=E_s,
physical_sizes=(sx, sx), communicator=comm)
print(substrate._comp_nb_grid_pts)
# print(fftengine.nb_domain_grid_pts)
surface = make_sphere(
radius=r_s, nb_grid_pts=(nx, ny),
physical_sizes=(sx, sx),
subdomain_locations=substrate.topography_subdomain_locations,
nb_subdomain_grid_pts=substrate.topography_nb_subdomain_grid_pts,
communicator=comm,
standoff=float('inf'))
ext_surface = make_sphere(
r_s, (2 * nx, 2 * ny), (2 * sx, 2 * sx),
centre=(sx / 2, sx / 2),
subdomain_locations=substrate.subdomain_locations,
nb_subdomain_grid_pts=substrate.nb_subdomain_grid_pts,
communicator=comm,
standoff=float('inf'))
system = SmoothContactSystem(substrate, inter, surface)
penetration = 0
disp0 = ext_surface.heights() + penetration
disp0 = np.where(disp0 > 0, disp0, 0)
disp0 = system.shape_minimisation_input(disp0)
def do_step():
result = LBFGS(system.objective(penetration, gradient=True), disp0,
jac=True,
pnp=pnp, gtol=1e-6 * abs(w / z0), ftol=1e-20, maxcor=3)
# result = system.minimize_proxy(offsets[i], disp0=None,
# method = LBFGS,options=dict(gtol = 1e-3, maxiter =100,maxls=10))
u = result.x
u.shape = ext_surface.nb_subdomain_grid_pts
f = substrate.evaluate_force(u)
converged = result.success
assert converged
gap = system.compute_gap(u, penetration)
save_npy("gap_profiling.npy",
gap[tuple([slice(None, r) for r in
substrate.topography_nb_subdomain_grid_pts])],
substrate.topography_subdomain_locations,
substrate.nb_grid_pts,
comm=comm)
do_step = profile("profile_out_{}procs".format(comm.Get_size()), comm)(do_step)
do_step()
# then call snakeviz profile_out.<rank> to get the see the performance analysis
import subprocess
subprocess.call("snakeviz {}".format(
"profile_out_{}procs.{}".format(comm.Get_size(), comm.Get_rank())),
shell=True) | Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/smoothcontact_performance.py | smoothcontact_performance.py |
```
import numpy as np
import matplotlib.pyplot as plt
from PyCo.SolidMechanics import PeriodicFFTElasticHalfSpace, FreeFFTElasticHalfSpace
from PyCo.Topography import Topography, make_sphere
nx = 16
ny = 16
sx = 2 * np.pi
sy = 2 * np.pi
R = 5
topography = make_sphere(R, (nx, ny), (sx,sy))
heights = topography.heights()
plt.colorbar(plt.imshow(heights))
substrate=FreeFFTElasticHalfSpace((nx, ny), young = 100 ,size= (sx, sy))
penetration = 0
external_force = None
offset = 0.5
sigma0 =1
h0 = 0.2
sigma0 *= topography.area_per_pt
nb_surface_pts = np.prod(topography.resolution)
# initial guess for p_r
u_r = np.zeros((2*nx, 2*ny))
heights = np.ones((2*nx, 2*ny)) * (-1e18)
heights[:nx, :ny] = topography.heights()
u_r = np.maximum(heights + offset,0)
#p_r = substrate.evaluate_force(u_r)
p_r = np.ones_like(u_r)* 2* sigma0
# initialisations
delta = 0
G_old = 1.0
tau = 0.0
t_r = np.zeros_like(u_r)
it = 1
pnp= np
plt.imshow(u_r)
plt.colorbar(plt.imshow(p_r))
comp_slice = [slice(0,max(0,min(substrate.resolution[i] - substrate.subdomain_location[i],substrate.subdomain_resolution[i])))
for i in range(substrate.dim)]
comp_mask = np.zeros(substrate.subdomain_resolution, dtype=bool)
comp_mask[tuple(comp_slice)] = True
figpc, (axpc, axgc) = plt.subplots(2,1)
axpc.set_title("pressure")
axpc.grid()
axgc.grid()
figg, axg = plt.subplots()
axg.set_aspect(1)
figg.suptitle("gap")
print(it)
c_r = p_r < sigma0 # = Ic in Bazrafshan
A_cg = pnp.sum(c_r*1)
print("A_cg {}".format(A_cg))
# Compute deformation
####
u_r = substrate.evaluate_disp((p_r<=sigma0) * p_r)
# Compute gap
g_r = u_r- heights
if external_force is not None:
offset = 0
if A_cg > 0:
offset = pnp.sum(g_r[c_r]) / A_cg
g_r -= offset
print("offset: {}".format(offset))
axgc.plot(g_r[:nx, ny//2], "+")
axgc.axhline(h0)
plt.colorbar(axg.pcolormesh(g_r[:nx, :ny] , rasterized = True))
########### Search direction
# Compute G = sum(g*g) (over contact area only)
G = pnp.sum(c_r*g_r*g_r)
if delta > 0 and G_old > 0: # CG step
t_r = c_r*(g_r + delta*(G/G_old)*t_r)
else: # steepest descend step (CG restart)
t_r = c_r*g_r
r_r = substrate.evaluate_disp(t_r)
#bazrafshan
#r_r -= pnp.sum(r_r[c_r]) / A_cg
########## Step size
tau = 0.0
if A_cg > 0:
# tau = -sum(g*t)/sum(r*t) where sum is only over contact region
x = -pnp.sum(c_r*r_r*t_r)
if x > 0.0:
tau = pnp.sum(c_r*g_r*t_r)/x
else:
G = 0.0
# Compute root-mean square penetration, max penetration and max force
# difference between the steps
if A_cg > 0:
rms_pen = np.sqrt(G/A_cg)
else:
rms_pen = np.sqrt(G)
max_pen = max(0.0, pnp.max(-g_r))
print("rms_pen {}".format(rms_pen))
print("max_pen {}".format(max_pen))
########## Do step
print("tau {}".format(tau))
p_r += tau*c_r*t_r
######### Projection on feasible set
p_r[p_r>sigma0] = sigma0
#p_r[np.logical_and] = sigma0 # not in bas, but I suggest to add new points to sigma0
######### Remove points with gap greater then h0 from interacting points
outside_mask = np.logical_and(g_r > h0, p_r >= 0) # bas
#outside_mask = g_r > h0
p_r[outside_mask] = 1000 * sigma0
######### Overlap Area: points to be added to the part where the gap is minimized
#overlap_mask = np.logical_and(g_r < 0, p_r > 0) # bazrafshan
overlap_mask = np.logical_and(g_r < 0, p_r >=sigma0)
# points with p_r < sigma 0 are already in the contact area
N_overlap = pnp.sum(overlap_mask*1.)
print("N_overlap {}".format(N_overlap))
if N_overlap > 0:
delta = 0. # this will restart the conjugate gradient with a steepest descent
if tau != 0
p_r[overlap_mask] += tau * g_r[overlap_mask]
else:
delta = 1.
axpc.plot((p_r * (p_r <= sigma0))[:nx, ny//2] / topography.area_per_pt, "+", label = "before balance")
######### Impose force balance
print("computed_force before balance {}".format(- pnp.sum(p_r * (p_r <= sigma0) )))
if external_force is not None:
contact_mask = p_r < sigma0 # not including the Dugdale zone, because there the pressure should not change
N_contact= pnp.sum(contact_mask)
contact_psum = pnp.sum(p_r[contact_mask])
print(contact_psum)
N_Dugdale = pnp.sum(p_r == sigma0)
print("N_Dugdale: {}".format(N_Dugdale))
if contact_psum != 0:
fact = ( ( - external_force - sigma0 * N_Dugdale) + N_contact * sigma0) \
/ (contact_psum + N_contact * sigma0)
p_r[contact_mask] = fact * (p_r[contact_mask] + sigma0) - sigma0
else:
# add constant pressure everywhere
p_r += (-external_force - sigma0 * N_Dugdale)/nb_surface_pts*np.ones_like(p_r)
#p_r[pad_mask] = 0.0
print("computed_force {}".format(- pnp.sum(p_r * (p_r <= sigma0) )))
figp, axp = plt.subplots()
axp.set_aspect(1)
figp.suptitle("pressure")
plt.colorbar(axp.pcolormesh(p_r[:nx, :ny] * (p_r[:nx, :ny] <= sigma0) / topography.area_per_pt, rasterized = True))
axpc.plot((p_r[:nx, :ny] * (p_r[:nx, :ny] <= sigma0))[:, ny//2] / topography.area_per_pt, "+", label = "after balance")
axpc.legend()
print("max_pen {}".format(max_pen))
fig, ax = plt.subplots()
plt.colorbar(ax.pcolormesh((np.ma.masked_array(p_r[:nx, :ny], mask = 1 - (g_r[:nx, :ny] > 0 )* (g_r[:nx, :ny]<=h0) * (p_r[:nx, :ny] != sigma0) ))))
ax.set_aspect(1)
ax.set_title("regions where 0 < g_r <= h0 but pressure is not sigma0")
#assert (p_r[(g_r > 0 )* (g_r<=h0)] == sigma0).all()
it +=1
delta
c_r
```
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/CGDugdale_playground.ipynb | CGDugdale_playground.ipynb |
```
import scipy
import scipy.optimize
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
# Algorithm 12.1 (p. 297) Intersection with confidence region
def intersection_confidence_region(x_start, direction, radius):
"""
Find the intersection between a direction and the boundary of the
confidence region
returns the step length
Keyword Arguments:
x_start -- starting point |x| <= radius
direction -- search direction != 0
radius -- scalar > 0
"""
a = float(direction.T * direction)
b = float(2 * x_start.T * direction)
c = float(x_start.T * x_start - radius**2)
return (-b + np.sqrt(b**2 - 4*a*c))/(2*a)
x_start = np.matrix([0., 0]).T
direction = np.matrix([0, -2.]).T
radius = 4
print(intersection_confidence_region(x_start, direction, radius))
# Algorithm 12.2 (p.297) dogleg method
def dogleg(grad_f, hess_f, radius):
"""
Finds an approximation to the solution of the confidence region sub-problem
Keyword Arguments:
grad_f -- current value of the gradient (column vector)
hess_f -- value of the hessian matrix (square matrix)
radius -- radius of the confidence region
"""
# Cauchy point
# 1) Computation of the curvature in steepest descent direction
beta = float(grad_f.T * hess_f * grad_f)
# 2) If beta <=0, the model is not locally convex.
if beta <= 0:
return -radius / np.linalg.norm(grad_f) * grad_f
# 3) else, compute the Cauchy point
alpha = float(grad_f.T * grad_f)
d_c = - alpha / beta * grad_f
# 4) make sure we're still in the confidence interval
step_len_c = np.linalg.norm(d_c)
if step_len_c > radius:
return radius / step_len_c * d_c
# Newton point
# 1) compute directly
d_n = np.linalg.solve(hess_f, -grad_f)
# 2) If not convex, stop with Cauchy point
if float(d_n.T * hess_f * d_n) <= 0:
return d_c
# 3) if d_n in region, return with it
if np.linalg.norm(d_n) <= radius:
return d_n
# Dogleg point
# 1)
eta = 0.2 + (0.8 * alpha**2) / (beta * float(abs( grad_f.T * d_n)))
d_d = eta * d_n
# 2)
if np.linalg.norm(d_d) <= radius:
return radius / np.linalg.norm(d_n) * d_n
# between Cauchy and dogleg
# 1) compute the intersection
step_len = intersection_confidence_region(d_c, d_d - d_c, radius)
return d_c + step_len * (d_d - d_c)
```
Test dogleg with example 12.3 p. 294
```
def fun(x):
return .5*x[0]**2 + 4.5 * x[1]**2
def grad_f(x):
return np.matrix([x[0, 0], 9*x[1, 0]]).T
def hess_f(x):
return np.matrix([[1, 0], [0, 9]])
x0 = np.matrix([9, 1]).T
f = fun(x0)
grad = grad_f(x0)
hess = hess_f(x0)
nb_steps = 16
radii = np.linspace(0, 10, nb_steps+1)[1:]
iterates = np.zeros((nb_steps, 2))
for i, radius in enumerate(radii):
iterates[i, :] = (dogleg(grad, hess, radius) + x0).T
print(iterates)
x, y = np.linspace(-1, 10, 51), np.linspace(-3, 3, 51)
X, Y = np.meshgrid(x, y)
Z = fun([X, Y])
plt.contour(X, Y, Z)
x, y = iterates[:, 0], iterates[:, 1]
plt.plot(x, y)
# Algorithm 12.3 (p.297) Steihaug-Toint method
def steihaug_toint(grad_f, hess_f, radius, tol = 1e-14):
"""
Finds an approximation to the solution of the confidence region sub-problem
Keyword Arguments:
grad_f -- current value of the gradient (column vector)
hess_f -- value of the hessian matrix (square matrix)
radius -- radius of the confidence region
"""
# initialisation
direction = -grad_f.copy()
xk = np.zeros_like(direction)
xkp1 = np.zeros_like(direction)
for i in range(grad_f.physical_sizes + 1):
# 1) Computation of the curvature in steepest descent direction
if float(direction.T * hess_f * direction) <= 0:
step_len = intersection_confidence_region(xk, direction, radius)
return xk + step_len * direction
# 2) Compute step length
alpha = (- float(direction.T*(hess_f * xk + grad_f)) /
float(direction.T * hess_f * direction))
# 3) compute next iterate
xkp1 = xk + alpha * direction
# 4)
if np.linalg.norm(xkp1) > radius:
step_len = intersection_confidence_region(xk,
direction, radius)
return xk + step_len * direction
# 5) compute beta
grad_k = hess_f * xk + grad_f
grad_kp1 = hess_f * xkp1 + grad_f
beta = float((grad_kp1.T * grad_kp1) / (grad_k.T * grad_k))
# 6) compute new direction
direction = - grad_kp1 + beta * direction
if np.linalg.norm(grad_kp1) < tol:
return xkp1
# 7) cleanup
xk[:] = xkp1[:]
return xk
x0 = np.matrix([9., 1.]).T
f = fun(x0)
grad = grad_f(x0)
hess = hess_f(x0)
nb_steps = 5
radii = np.linspace(0, 10, nb_steps+1)[1:]
iterates = np.zeros((nb_steps, 2))
for i, radius in enumerate(radii):
iterates[i, :] = (steihaug_toint(grad, hess, radius) + x0).T
print(iterates)
x, y = np.linspace(-1, 10, 51), np.linspace(-3, 3, 51)
X, Y = np.meshgrid(x, y)
Z = fun([X, Y])
plt.contour(X, Y, Z)
x, y = iterates[:, 0], iterates[:, 1]
plt.plot(x, y)
# Algorithm 12.4 (p.301) Newton method with confidence interval
def newton_confidence_region(fun, x0, jac, hess, tol, store_iterates=None,
radius0=10., eta1=0.01, eta2=.9,
method=steihaug_toint, **options):
"""
Keyword Arguments:
fun -- objective function to minimize
x0 -- initial guess for solution
jac -- Jacobian (gradient) of objective function
hess -- Hessian (matrix of second-order derivatives) of objective
function
tol -- Tolerance for termination
store_iterates -- (default None) if set to 'iterate' the full iterates are
stored in module-level constant iterates
radius0 -- (default 10) physical_sizes of initial confidence region physical_sizes
eta1/eta2 -- (default 0.01, 0.9) heuristics for step length
modifications. Defaults from Bierlaire (2006)
method -- (default 'steihaug_toint') solver for confidence region
sub-problem. can be either steihaug_toint or dogleg
**options -- none of those will be used
"""
# initialisation
maxiter_key = 'maxiter'
if maxiter_key not in options.keys():
options[maxiter_key] = 20
class ReachedTolerance(StopIteration): pass
class ReachedMaxiter(StopIteration): pass
counter = 0
x = np.matrix(x0.copy()).reshape((-1, 1))
radius = radius0
iterates = list()
state = ''
if store_iterates == 'iterate':
iterate = scipy.optimize.OptimizeResult(
{'x': x.copy(),
'fun': fun(x),
'jac': jac(x),
'hess': hess(x),
'radius': radius,
'rho': float('nan'),
'state': state})
iterates.append(iterate)
try:
while True:
b = np.matrix(jac(x))
norm_grad = np.linalg.norm(b)
if norm_grad < tol:
raise ReachedTolerance(
"||grad f(x)|| = {} < {} = tol".format(
norm_grad, tol))
if counter == options['maxiter']:
raise ReachedMaxiter("reached maxiter ({})".format(
options['maxiter']))
# 1) solve sub-problem
Q = np.matrix(hess(x))
direction = method(b, Q, radius)
# 2)
xpd = x + direction
rho = ((fun(x)-fun(xpd)) /
(-float(.5*direction.T*Q*direction + direction.T*b)))
# 3)
if rho < eta1:
# fail
state = '-'
radius = .5 * np.linalg.norm(direction)
else:
x += direction
state = '+'
if rho > eta2:
# very good progress
state = '++'
radius *= 2
counter += 1
if store_iterates == 'iterate':
iterate = scipy.optimize.OptimizeResult(
{'x': x.copy(),
'fun': fun(x),
'jac': jac(x),
'hess': hess(x),
'radius': radius,
'rho': rho,
'state': state})
iterates.append(iterate)
except ReachedMaxiter as err:
message = str(err)
success = False
except(ReachedTolerance) as err:
message = str(err)
success = True
result = scipy.optimize.OptimizeResult({'message': message,
'success': success,
'x': x,
'fun': fun(x),
'jac': jac(x),
'hess': hess(x),
'nit': counter})
if iterates:
result['iterates'] = iterates
return result
def fun(x):
return .5*x[0, 0]**2 + x[0, 0]* np.cos(x[1, 0])
def jac(x):
return np.array([[x[0, 0] + np.cos(x[1, 0])],
[-x[0, 0] * np.sin(x[1, 0])]])
def hess(x):
return np.array([[ 1., -np.sin(x[1, 0])],
[-np.sin(x[1, 0]), -x[0, 0] * np.cos(x[1, 0])]])
x0 = np.array([1.,1.])
result = scipy.optimize.minimize(fun, x0=x0, method=newton_confidence_region,
jac=jac, hess=hess, tol=1.e-8,
options={'store_iterates': 'iterate',
'method': dogleg,
'radius0': 1})
print(" k x_k f(x_k) |βf(x_k))| Ξ_k Ο_k state")
for (i, it) in enumerate(result.iterates):
print("{0:>2} ({6:+.4e}, {7:+.4e}) {1:+.4e} {2:.4e} {3:>.4e} {4:+.4e} {5:>4}".format(i, it.fun, np.linalg.norm(it.jac), it.radius, it.rho, it.state, it.x[0,0], it.x[1,0]))
print(np.array([it.x.reshape(-1) for it in result.iterates]).reshape((-1, 2)))
print(result.x)
```
Compare to Fig 12.4, p. 307
```
iterates = np.array([iterate.x for iterate in result.iterates])
x, y = iterates[:, 0], iterates[:, 1]
xg, yg = np.linspace(-2, 2, 51), np.linspace(-6, 6, 51)
X, Y = np.meshgrid(xg,yg)
def scalar_fun(x):
return .5*x[0]**2 + x[0]* np.cos(x[1])
Z = scalar_fun([X, Y])
plt.contour(X, Y, Z)
plt.plot(x, y, c='r')
plt.figure()
x0 = np.array([1.,1.])
result = scipy.optimize.minimize(fun, x0=x0, method=newton_confidence_region,
jac=jac, hess=hess, tol=1.e-8,
options={'store_iterates': 'iterate',
'method': steihaug_toint,
'radius0': 10})
print(" k x_k f(x_k) |βf(x_k))| Ξ_k Ο_k state")
for (i, it) in enumerate(result.iterates):
print(("{0:>2} ({6:+.4e}, {7:+.4e}) {1:+.4e} {2:.4e} {3:>.4e} {4:+.4e} {5:>4}"
"").format(i, float(it.fun), np.linalg.norm(it.jac), it.radius, float(it.rho), it.state, it.x[0,0], it.x[1,0]))
print(np.array([it.x.reshape(-1) for it in result.iterates]).reshape((-1, 2)))
print(result.x)
```
Compare to Fig. 12.5, p.307
```
iterates = np.array([iterate.x for iterate in result.iterates])
x, y = iterates[:, 0], iterates[:, 1]
plt.contour(X, Y, Z)
plt.plot(x, y)
%lsmagic
%%writefile?
```
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/Testing_NewtonConfidenceRegion.ipynb | Testing_NewtonConfidenceRegion.ipynb |
```
import numpy as np
import matplotlib.pyplot as plt
from PyCo.SolidMechanics import PeriodicFFTElasticHalfSpace
from PyCo.Topography import Topography
from IPython.display import HTML
pnp = np
nx = 32
ny = 32
sx = 2 * np.pi
sy = 2 * np.pi
x = np.arange(nx).reshape(-1,1) /nx *sx
y = np.arange(ny).reshape(1,-1) /ny *sy
heights = np.cos(x) * np.cos(y)
heights -= np.max(heights)
topography = Topography(heights,(sx, sy))
plt.colorbar(plt.imshow(heights))
```
original Basfaharan
```
external_force= 2
sigma0 =1
h0 = 0.2
sigma0 *= topography.area_per_pt
nb_surface_pts = np.prod(topography.resolution)
substrate = PeriodicFFTElasticHalfSpace((nx,ny), young= 10, size=(sx,sy))
fig, (axpc, axg, axp) = plt.subplots(1,3, figsize=(12,4))
MAXIT =200
def yield_iter(fig, sigma0, h0,external_force):
# initial guess for p_r
p_r = - np.ones_like(heights) * external_force / nb_surface_pts
u_r = substrate.evaluate_disp(p_r)
# initialisations
delta = 0
G_old = 1.0
tau = 0.0
t_r = np.zeros_like(u_r)
g_r = u_r - heights
axpc.set_title("pressure")
axpc.axhline(0)
axpc.axhline(sigma0)
axpc.set_ylabel("pressure")
lpc, = axpc.plot((p_r * (p_r <= sigma0))[:, ny//2] / topography.area_per_pt, "+-r", label = "before balance")
axgc = axpc.twinx()
axgc.axhline(0)
axgc.axhline(h0)
axgc.set_ylabel("gap")
lgc, = axgc.plot(g_r[:, ny//2], "--")
#axgc.grid()
caxp = plt.colorbar(axp.pcolormesh(p_r * (p_r <= sigma0) / topography.area_per_pt, rasterized = True), ax = axp).ax
caxg = plt.colorbar(axg.pcolormesh(g_r , rasterized = True), ax = axg).ax
axg.set_aspect(1)
axp.set_aspect(1)
axg.set_title("gap")
it = 1
max_pen = 1
while max_pen > 1e-8 and it < MAXIT:
axp.clear()
axg.clear()
c_r = p_r < sigma0 # = Ic in Bazrafshan
A_cg = pnp.sum(c_r*1)
print("A_cg {}".format(A_cg))
# Compute deformation
u_r = substrate.evaluate_disp((p_r<=sigma0) * p_r)
# Compute gap
g_r = u_r - heights
if external_force is not None:
offset = 0
if A_cg > 0:
offset = pnp.sum(g_r[c_r]) / A_cg
g_r -= offset
print("offset: {}".format(offset))
lgc.set_ydata(g_r[:, ny//2])
plt.colorbar(axg.pcolormesh(g_r , rasterized = True), cax = caxg)
########### Search direction
# Compute G = sum(g*g) (over contact area only)
G = pnp.sum(c_r*g_r*g_r)
if delta > 0 and G_old > 0: # CG step
t_r = c_r*(g_r + delta*(G/G_old)*t_r)
else: # steepest descend step (CG restart)
t_r = c_r*g_r
r_r = substrate.evaluate_disp(t_r)
#bazrafshan
#r_r -= pnp.sum(r_r[c_r]) / A_cg
########## Step size
tau = 0.0
if A_cg > 0:
# tau = -sum(g*t)/sum(r*t) where sum is only over contact region
x = -pnp.sum(c_r*r_r*t_r)
if x > 0.0:
tau = pnp.sum(c_r*g_r*t_r)/x
else:
print("x < 0")
G = 0.0
# Compute root-mean square penetration, max penetration and max force
# difference between the steps
if A_cg > 0:
rms_pen = np.sqrt(G/A_cg)
else:
rms_pen = np.sqrt(G)
max_pen = max(0.0, pnp.max(-g_r))
print("rms_pen {}".format(rms_pen))
print("max_pen {}".format(max_pen))
########## Do step
print("tau {}".format(tau))
p_r += tau*c_r*t_r
######### Projection on feasible set
p_r[p_r>sigma0] = sigma0
#p_r[np.logical_and(g_r>0, p_r>=0)] = sigma0 # not in bas, but I suggest to add new points to sigma0
######### Remove points with gap greater then h0 from interacting points
outside_mask = np.logical_and(g_r > h0, p_r >= 0) # bas
#outside_mask = g_r > h0
p_r[outside_mask] = 1000 * sigma0
######### Overlap Area: points to be added to the part where the gap is minimized
overlap_mask = np.logical_and(g_r < 0, p_r > 0) # bazrafshan
#overlap_mask = np.logical_and(g_r < 0, p_r >=sigma0)
# points with p_r < sigma 0 are already in the contact area
N_overlap = pnp.sum(overlap_mask*1.)
print("N_overlap {}".format(N_overlap))
if N_overlap > 0:
delta = 0. # this will restart the conjugate gradient with a steepest descent
p_r[overlap_mask] += tau * g_r[overlap_mask]
else:
delta = 1.
lpc.set_ydata((p_r * (p_r <= sigma0))[:, ny//2] / topography.area_per_pt)
######### Impose force balance
print("computed_force before balance {}".format(- pnp.sum(p_r * (p_r <= sigma0) )))
if external_force is not None:
contact_mask = p_r < sigma0 # not including the Dugdale zone, because there the pressure should not change
N_contact= pnp.sum(contact_mask)
contact_psum = pnp.sum(p_r[contact_mask])
print(contact_psum)
N_Dugdale = pnp.sum(p_r == sigma0)
print("N_Dugdale: {}".format(N_Dugdale))
if contact_psum != 0:
fact = ( ( - external_force - sigma0 * N_Dugdale) + N_contact * sigma0) \
/ (contact_psum + N_contact * sigma0)
p_r[contact_mask] = fact * (p_r[contact_mask] + sigma0) - sigma0
else:
# add constant pressure everywhere
p_r += (-external_force - sigma0 * N_Dugdale)/nb_surface_pts*np.ones_like(p_r)
#p_r[pad_mask] = 0.0
print("computed_force {}".format(- pnp.sum(p_r * (p_r <= sigma0) )))
plt.colorbar(axp.pcolormesh(p_r * (p_r <= sigma0) / topography.area_per_pt, rasterized = True), cax = caxp)
#axpc.plot((p_r * (p_r <= sigma0))[:, ny//2] / topography.area_per_pt, "+", label = "after balance")
#axpc.legend()
print("max_pen {}".format(max_pen))
#fig, ax = plt.subplots()
#plt.colorbar(ax.pcolormesh((np.ma.masked_array(p_r, mask = 1 - (g_r > 0 )* (g_r<=h0) * (p_r != sigma0) ))))
#ax.set_aspect(1)
#ax.set_title("regions where 0 < g_r <= h0 but pressure is not sigma0")
#assert (p_r[(g_r > 0 )* (g_r<=h0)] == sigma0).all()
it +=1
fig.canvas.draw()
yield it
#
#ani = FuncAnimation(fig, lambda i : gen.__next__(), frames=range(MAXIT), init_func= gen.__next__)
#HTML(ani.to_html5_video())
fig, (axpc, axg, axp) = plt.subplots(1,3, figsize=(12,4))
gen=yield_iter(fig, sigma0, h0,external_force)
next(gen)
fig
plt.colorbar(plt.imshow(c_r*1.))
```
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/CGDugdale_Demo.ipynb | CGDugdale_Demo.ipynb |
import time
import os
import scipy.optimize
import numpy as np
from ContactMechanics import PeriodicFFTElasticHalfSpace
from SurfaceTopography.Generation import fourier_synthesis
# TODO: this file uses deprecated API and is broken
from NuMPI.Optimization import LBFGS
from NuMPI.Tools.Reduction import Reduction
from Adhesion import VDW82smoothMin
from ContactMechanics.System import SmoothContactSystem
from NuMPI import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
pnp = Reduction(comm=comm)
class iter_inspector():
def __init__(self):
self.neval = 0
self.energies = []
self.maxgradients =[]
def __call__(self, system):
self.neval += 1
self.energies.append(system.energy)
self.maxgradients.append(pnp.max(system.force))
class decorated_objective:
def __init__(self, system, objective):
self.system = system
self.objective = objective
self.neval = 0
self.energies = []
self.maxgradients = []
def __call__(self, *args, **kwargs):
val = self.objective(*args, **kwargs)
self.neval += 1
self.energies.append(system.energy)
self.maxgradients.append(pnp.max(abs(system.force)))
return val
import matplotlib.pyplot as plt
fig, (axEn, axgrad) = plt.subplots(2,1, sharex = True)
n = 512
axEn.set_xlabel("# of objective evaluations")
axEn.set_ylabel("E(i)-E(last) / (E(0)-E(last))")
axEn.set_yscale("log")
axgrad.set_yscale("log")
axgrad.set_ylabel(r"$|grad|_{\infty}$")
for a in (axEn, axgrad):
a.set_xlabel("# of objective evaluations")
a.label_outer()
for method, name in zip(["L-BFGS-B", LBFGS],
["Scipy", "NuMPI"]):
print("#################")
print(name)
# equivalent Young's modulus
E_s = 1000000. # 102.
# work of adhesion
nx, ny = n, n
dx = 1.0
dy = 1.0 # in units of z0
sx = nx * dx
sy = ny * dy
z0 = 4.0 # needed to get small tolerance, but very very slow
w = 0.01 * E_s * z0
fftengine = PFFTEngine((nx, ny), comm=comm)
# the "Min" part of the potential (linear for small z) is needed for the LBFGS without bounds
inter = VDW82smoothMin(w * z0 ** 8 / 3, 16 * np.pi * w * z0 ** 2,
gamma=w, pnp=pnp)
# Parallel SurfaceTopography Patch
substrate = PeriodicFFTElasticHalfSpace((nx, ny), young=E_s,
physical_sizes=(sx, sx), pnp=pnp)
# print(substrate._comp_nb_grid_pts)
# print(fftengine.nb_domain_grid_pts)
surface = fourier_synthesis((nx, ny), (sx, sy), hurst=0.8, rms_height=1, short_cutoff=8, long_cutoff=sx / 2)
system = SmoothContactSystem(substrate, inter, surface)
penetration = -0.1
disp0 = surface.heights() + penetration
disp0 = np.where(disp0 > 0, disp0, 0)
# disp0 = system.shape_minimisation_input(disp0)
maxcor = 10
starttime = time.time()
objective_monitor = decorated_objective(system, system.objective(
penetration, gradient=True))
try:
result = scipy.optimize.minimize(objective_monitor,
disp0, method=method, jac=True,
options=dict(
gtol=1e-6 * abs(w / z0),
ftol=1e-30,
maxcor=maxcor))
converged = result.success
assert converged
except Exception as err:
print("went wrong")
print(err)
print(method)
print(result.message)
print("nevals: {}".format(objective_monitor.neval))
print(result.nit)
axgrad.plot(range(objective_monitor.neval), objective_monitor.maxgradients, label="{}".format(name))
axEn.plot(range(objective_monitor.neval), (objective_monitor.energies - objective_monitor.energies[-1] )/ (objective_monitor.energies[0] - objective_monitor.energies[-1]), label="{}".format(name))
axgrad.legend()
fig.suptitle("n={}".format(n))
fig.savefig("{}.png".format(os.path.basename(__file__))) | Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/LBFGS_convergence_randomly_rough_scipy_vs_NuMPI.py | LBFGS_convergence_randomly_rough_scipy_vs_NuMPI.py |
import numpy as np
import matplotlib.pyplot as plt
from Adhesion.System import make_system
from SurfaceTopography import make_sphere
from Adhesion import VDW82smoothMin as VdwPot
from ContactMechanics import FreeFFTElasticHalfSpace as Substrate
plt.ion()
E_silicon = 166e9
nu_silicon = .17
E_diamond = 1220e9
nu_diamond = .2
young = 1./((1-nu_silicon**2)/E_silicon+(1-nu_diamond**2)/E_diamond)
radius = 40e-9
base_size = 2*radius
size = (base_size, base_size)
c_sr = 2.1e-78
hamaker = 68.1e-21
pot = VdwPot(c_sr, hamaker)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
for base_res in (32, 64, 128, 256, 512):
res = (base_res, base_res)
substrate = Substrate(res, young, size)
surface = make_sphere(radius, res, size, standoff=float('inf'))
system = make_system(substrate, pot, surface)
offset = pot.cutoff_radius
step = pot.r_min*.01
pullof_forces = list()
offsets = list()
contact_area = list()
disp = np.zeros(substrate.nb_domain_grid_pts)
force = -1.
def iterator(initial_offset):
loc_offset = float(initial_offset)
yield loc_offset
while force <= 0.:
loc_offset -= step
yield loc_offset
for i in range(3):
loc_offset += step
yield loc_offset
while force < 0.:
loc_offset += step
yield loc_offset
ax1.set_ylabel("normal force")
ax2.set_ylabel("contact area", color='r')
line_force, = ax1.plot(offsets, pullof_forces,
label="nb_grid_pts = {}".format(res))
line_area, = ax2.plot(offsets, contact_area, color=line_force.get_color(),
linestyle='--')
ax1.legend(loc='center right')
marker, = ax1.plot((), (), marker='+', color='b', markersize=20)
for offset in iterator(offset):
result = system.minimize_proxy(offset, disp, deproxify_everytime=False)
disp = system.disp
force = system.babushka.compute_normal_force()
contact_area.append(system.babushka.compute_contact_area())
pullof_forces.append(force)
offsets.append(offset)
line_force.set_xdata(offsets)
line_force.set_ydata(pullof_forces)
marker.set_ydata((force,))
marker.set_xdata((offset,))
line_area.set_xdata(offsets)
line_area.set_ydata(contact_area)
ax1.relim()
ax1.autoscale_view()
ax2.relim()
ax2.autoscale_view()
fig.canvas.draw()
marker.set_ydata(())
marker.set_xdata(())
fig.savefig("fig_{:0>5}.png".format(res[0]))
plt.ioff()
plt.show() | Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/hysteresis.py | hysteresis.py |
import time
starttime=time.time()
import numpy as np
from ContactMechanics import FreeFFTElasticHalfSpace
from SurfaceTopography import make_sphere
from FFTEngine import PFFTEngine
from NuMPI.Optimization import LBFGS
from NuMPI.Tools.Reduction import Reduction
from Adhesion import VDW82smoothMin
from System import SmoothContactSystem
from NuMPI import MPI
import sys
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
try:
n = int(sys.argv[1])
except Exception:
n = 128
import matplotlib.pyplot as plt
fig, (axt, axit) = plt.subplots(2, 1, sharex=True)
for n in [128,256,512]:
# sphere radius:
r_s = 10.0
# contact radius
r_c = .2
# peak pressure
p_0 = 2.5
# equivalent Young's modulus
E_s = 102.#102.
# work of adhesion
w = 1.0
# tolerance for optimizer
tol = 1e-12
# tolerance for contact area
gap_tol = 1e-6
nx, ny = n, n
sx = 21.0
z0 = 0.05 # needed to get small tolerance, but very very slow
fftengine = PFFTEngine((2*nx, 2*ny), comm=comm)
pnp = Reduction(comm=comm)
# the "Min" part of the potential (linear for small z) is needed for the LBFGS without bounds
inter = VDW82smoothMin(w * z0 ** 8 / 3, 16 * np.pi * w * z0 ** 2, gamma=w, pnp = pnp)
# Parallel SurfaceTopography Patch
substrate = FreeFFTElasticHalfSpace((nx,ny), young=E_s, physical_sizes=(sx, sx), fft=fftengine, pnp=pnp)
#print(substrate._comp_nb_grid_pts)
#print(fftengine.nb_domain_grid_pts)
surface = make_sphere(radius=r_s, nb_grid_pts=(nx, ny), physical_sizes=(sx, sx),
subdomain_locations=substrate.topography_subdomain_locations,
nb_subdomain_grid_pts=substrate.topography_nb_subdomain_grid_pts,
pnp=pnp,
standoff=float('inf'))
ext_surface = make_sphere(r_s, (2 * nx, 2 * ny), (2 * sx, 2 * sx),
centre=(sx / 2, sx / 2),
subdomain_locations=substrate.subdomain_locations,
nb_subdomain_grid_pts=substrate.nb_subdomain_grid_pts,
pnp=pnp,
standoff=float('inf'))
system = SmoothContactSystem(substrate, inter, surface)
penetration = 0
disp0 = ext_surface.heights() + penetration
disp0 = np.where(disp0 > 0, disp0, 0)
disp0 = system.shape_minimisation_input(disp0)
maxcors = [5, 10, 20]
times = [None] * len(maxcors)
nits = [None] * len(maxcors)
for i, maxcor in enumerate(maxcors):
starttime =time.time()
result = LBFGS(system.objective(penetration, gradient=True), disp0, jac=True, pnp=pnp, maxcor=maxcor, gtol=1e-6 * abs(w/z0))
times[i] = time.time() - starttime
nits[i]=result.nit
#result = system.minimize_proxy(offsets[i], disp0=None,method = LBFGS,options=dict(gtol = 1e-3, maxiter =100,maxls=10))
print(result.nit)
print(times[i])
converged = result.success
assert converged
if rank == 0:
axt.plot(maxcors, times, label="n={}".format(n))
axit.plot(maxcors, nits, label="n={}".format(n))
axit.set_xlabel("# gradients stored (-)")
axt.set_ylabel("execution time (s)")
axit.set_ylabel("# of iterations")
axt.legend()
fig.savefig("influence_maxcor.png") | Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/influence_maxcor.py | influence_maxcor.py |
```
import sys
import os
import numpy as np
import scipy.optimize
def test_fun(x):
x.shape=(-1, 1)
return .5*x[0, 0]**2 + x[0, 0]* np.cos(x[1, 0])
def test_jac(x):
x.shape=(-1, 1)
return np.matrix([[x[0, 0] + np.cos(x[1, 0])],
[-x[0, 0] * np.sin(x[1, 0])]])
def test_hess(x):
x.shape=(-1, 1)
return np.matrix([[ 1., -np.sin(x[1, 0])],
[-np.sin(x[1, 0]), -x[0, 0] * np.cos(x[1, 0])]])
# fname = "../PyPyContact/Tools/Optimisation/NewtonLineSearch.py"
# with open(fname) as filehandle:
# content = ''.join((line for line in filehandle))
# exec(content)
def first_wolfe_condition(fun, x0, fprime, direction, alpha, beta1):
"""
p. 268, 11.19
Keyword Arguments:
fun -- objective function to minimize
x0 -- initial guess for solution
fprime -- Jacobian (gradient)
direction -- search direction (column vec)
alpha -- step physical_sizes
beta1 -- lower wolfe bound
"""
return fun(x0+alpha*direction) <= fun(x0) + \
alpha * beta1 * float(fprime(x0).T * direction)
def second_wolfe_condition(x0, fprime, direction, alpha, beta2):
"""
p. 270, 11.21
Keyword Arguments:
x0 -- initial guess for solution
fprime -- Jacobian (gradient) of objective function
direction -- search direction
alpha -- step physical_sizes
beta2 -- upper wolfe bound
"""
return (float(fprime(x0 + alpha*direction).T * direction) >=
beta2*float(fprime(x0).T * direction))
def modified_cholesky(symmat, maxiter = 20):
"""
Modify a symmetric matrix A in order to make it positive definite. Returns
a lower triangular matrix L and a scalar Ο > 0 so that
A + ΟI = LL^T
Keyword Arguments:
symmat -- symmetric matrix
"""
fronorm = np.linalg.norm(symmat, ord='fro')
if np.diag(symmat).min() > 0:
tau = 0
else:
tau = .5*fronorm
success = False
I = np.eye(symmat.shape[0])
for i in range(maxiter):
try:
L = np.linalg.cholesky(symmat + tau*I)
return L, tau
except np.linalg.LinAlgError:
tau = max(2*tau, .5*fronorm)
raise Exception("Couldn't factor")
import warnings
# implements the line search, p. 273, algo 11.2
def line_search(fun, x0, fprime, direction, alpha0, beta1=1e-4, beta2=0.99,
step_factor=3., store_iterates=None, maxiter=40):
"""
find a step physical_sizes alpha that satisfies both conditions of Wolfe
Keyword Arguments:
fun -- objective function to minimize
x0 -- initial guess for solution
fprime -- Jacobian (gradient) of objective function
direction -- search direction
alpha0 -- Initial guess for step physical_sizes
beta1 -- (default 1e-4)
beta2 -- (default 0.99)
step_factor -- (default 3.) step increase when too short
store_iterates -- (default None) if set to 'iterate' the full iterates are
stored in module-level constant iterates
maxiter -- (default 20) abort and raise Exception after maxiter is
reached
"""
alpha_l = 0
alpha_r = float('inf')
alpha = alpha0
wolfe1 = first_wolfe_condition(fun, x0, fprime, direction, alpha, beta1)
wolfe2 = second_wolfe_condition(x0, fprime, direction, alpha, beta2)
iterates = list()
counter = 0
violation = 0
if store_iterates == 'iterate':
iterate = scipy.optimize.OptimizeResult(
{'x': x0.copy(),
'fun': fun(x0),
'jac': fprime(x0),
'alpha_i': alpha,
'alpha_r': alpha_r,
'alpha_l': alpha_l,
'violation': 0})
iterates.append(iterate)
while not (wolfe1 and wolfe2):
if counter == maxiter:
warnings.warn(
("Line search did not converge. Are your jacobians correct? "
"wolfe1 = {}, wolfe2 = {}, alpha = {}, nit = {}.\n"
"If they are, machine precision has been reached. Currently,"
" progress regarding funval would be {}").format(
wolfe1, wolfe2, alpha, counter, float(alpha * fprime(x0).T*direction)),
ReachedMaxiterWarning)
break
if not wolfe1: # step too long
alpha_r = alpha
alpha = .5*(alpha_l + alpha_r)
violation = 1
elif wolfe1 and not wolfe2:
alpha_l = alpha
violation = 2
if np.isfinite(alpha_r):
alpha = .5*(alpha_l + alpha_r)
else:
alpha *= step_factor
wolfe1 = first_wolfe_condition(fun, x0, fprime, direction, alpha, beta1)
wolfe2 = second_wolfe_condition(x0, fprime, direction, alpha, beta2)
if store_iterates == 'iterate':
iterate = scipy.optimize.OptimizeResult(
{'x': x0.copy(),
'fun': fun(x0),
'jac': fprime(x0),
'alpha_i': alpha,
'alpha_r': alpha_r,
'alpha_l': alpha_l,
'violation': violation})
iterates.append(iterate)
counter += 1
result = scipy.optimize.OptimizeResult({'success': True,
'x': alpha,
'nit': counter,
'violation':violation})
if iterates:
result['iterates'] = iterates
return result
# verification of line search with example 11.2, p. 259, results listet in table 11.4
Q = np.matrix([[1., 0.],
[0., 9.]])
obj = lambda x: float(0.5*x.T*Q*x)
grad = lambda x: Q*x
hess_st = lambda x: Q
x = np.matrix([10, 1.]).T
d = np.matrix([-2., 1,]).T/np.sqrt(5)
alpha0 = 1e-3
beta1, beta2 = 0.3, 0.7
step_factor = 20
result = line_search(obj, x, grad, d, alpha0, beta1, beta2, step_factor, store_iterates='iterate')
print(" k Ξ±_i Ξ±_l Ξ±_r violation ")
for (i, it) in enumerate(result.iterates):
print("{:>2} {:+.4e} {:>+.4e} {:+.4e} {}".format(i, it.alpha_i, it.alpha_l, it.alpha_r, it.violation))
print("Ξ±_i = {}".format(result.x))
from PyPyContact.Tools.Optimisation import ReachedTolerance, ReachedMaxiter, FailedIterate
def newton_linesearch(fun, x0, jac, hess, tol, args=(), store_iterates=None, **options):
"""
see Bierlaire (2006), p. 278
Keyword Arguments:
fun -- objective function to minimize
x0 -- initial guess for solution
jac -- Jacobian (gradient) of objective function
hess -- Hessian (matrix of second-order derivatives) of objective function
tol -- Tolerance for termination
store_iterates -- (default None) if set to 'iterate' the full iterates are
stored in module-level constant iterates
**options -- none of those will be used
"""
x = np.matrix(x0.copy()).reshape((-1, 1))
try:
fprime = jac(x, *args)
except Exception:
print(jac, type(jac))
raise
maxiter_key = 'maxiter'
if maxiter_key not in options.keys():
options[maxiter_key] = 20
linesearch_maxiter_key = 'linesearch_maxiter'
if linesearch_maxiter_key not in options.keys():
options[linesearch_maxiter_key] = 20
counter = 0
iterates = list()
if store_iterates == 'iterate':
iterate = scipy.optimize.OptimizeResult(
{'x': x.copy(),
'fun': fun(x, *args),
'jac': jac(x, *args),
'hess': hess(x, *args),
'tau': float('nan'),
'alpha': float('nan')})
iterates.append(iterate)
if args:
use_fun = lambda x: fun(x, *args)
use_jac = lambda x: jac(x, *args)
use_hess = lambda x: hess(x, *args)
else:
use_fun = fun
use_jac = jac
use_hess = hess
try:
while True:
try:
norm_grad = np.linalg.norm(fprime)
except Exception:
print(fprime)
print(type(fprime))
print(fprime.dtype)
print(fprime.shape)
raise
if norm_grad < tol:
raise ReachedTolerance(
"||grad f(x)|| = {} < {} = tol".format(
norm_grad, tol))
if counter == options['maxiter']:
raise ReachedMaxiter("reached maxiter ({})".format(
options['maxiter']))
# 1)
L, tau = modified_cholesky(hess(x, *args))
# 2)
fprime = use_jac(x)
z = np.linalg.solve(L, fprime)
# 3)
d = np.linalg.solve(L.T, -z)
# 4)
result = line_search(use_fun, x, use_jac, d, alpha0=1, maxiter=options[linesearch_maxiter_key])
alpha = result.x
violation = result.violation
# 5)
x += alpha * d
counter += 1
if store_iterates == 'iterate':
iterate = scipy.optimize.OptimizeResult(
{'x': x.copy(),
'fun': use_fun(x),
'jac': use_jac(x),
'hess': use_hess(x),
'tau': tau,
'alpha': alpha})
iterates.append(iterate)
except ReachedMaxiter as err:
message = str(err)
success = False
except(ReachedTolerance) as err:
message = str(err)
success = True
result = scipy.optimize.OptimizeResult({'message': message,
'success': success,
'x': np.asarray(x).ravel(),
'fun': use_fun(x),
'jac': use_jac(x),
'hess': use_hess(x),
'nit': counter})
if iterates:
result['iterates'] = iterates
return result
x0 = np.array([1.,1.])
result = scipy.optimize.minimize(test_fun, x0=x0, method=newton_linesearch,
jac=test_jac, hess=test_hess, tol=1.e-14,
options={'store_iterates': 'iterate'})
print(" k x_k f(x_k) |βf(x_k))| Ξ±_k Ο_k")
for (i, it) in enumerate(result.iterates):
print("{0:>2} ({5:+.7e}, {6:+.7e}) {1:+.4e} {2:.4e} {3: >.4e} {4:.4e}".format(i, it.fun, np.linalg.norm(it.jac), it.alpha, it.tau, it.x[0, 0], it.x[1, 0]))
%matplotlib inline
import matplotlib.pyplot as plt
iterates = np.array([iterate.x for iterate in result.iterates])
x, y = iterates[:, 0], iterates[:, 1]
xg, yg = np.linspace(-2, 2, 51), np.linspace(-6, 6, 51)
def mat_fun(x_g, x_):
Z = np.zeros((xg.physical_sizes, yg.physical_sizes))
for i in range(Z.shape[0]):
for j in range(Z.shape[1]):
Z[j, i] = test_fun(np.array([xg[i], yg[j]]))
return Z
X, Y = np.meshgrid(xg,yg)
plt.contour(X, Y, mat_fun(xg, yg))
plt.plot(x, y, c='r', marker='+')
plt.grid(True)
plt.figure()
xg, yg = np.linspace(-.3, 1.1, 51), np.linspace(-.95, 3.5, 51)
X, Y = np.meshgrid(xg,yg)
plt.contour(xg, yg, mat_fun(xg, yg))
plt.plot(x, y, c='r')
plt.grid(True)
```
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/Testing_Newton_linesearch.ipynb | Testing_Newton_linesearch.ipynb |
```
import sympy
from sympy import Symbol, diff, Matrix, pprint, MatrixSymbol, symbols
def grad(fun, coords):
return Matrix([diff(fun, coord) for coord in coords])
def hess(fun, coords):
dfun = grad(fun, coords)
return Matrix([ [diff(dfun[i], coord) for coord in coords]
for i in range(len(dfun))])
def Lc(fun, hfuns, lams, c):
retfun = fun
for i in range(len(lams)):
retfun = retfun + lams[i]*hfuns[i] + c/2*hfuns[i]**2
return retfun
def dLc(fun, hfuns, lams, c, coords):
retfun = grad(fun, coords)
for i in range(len(lams)):
retfun += lams[i] * grad(hfuns[i], coords) + c*grad(hfuns[i], coords)*hfuns[i]
return retfun
def ddLc(fun, hfuns, lams, c, coords):
dfun = dLc(fun, hfuns, lams, c, coords)
return Matrix([ [diff(dfun[i], coord) for coord in coords]
for i in range(len(dfun))])
b = Symbol('b', real = True, positive = True)
a = Symbol('a', real = True)
lam = Symbol('Ξ»', real = True)
c = Symbol('c', real = True, positive=True)
x = Symbol('x', real = True)
y = Symbol('y', real = True)
def print_fun(fun, hfuns, lams, coords):
print('f(x, y):')
pprint(fun)
print('grad(f(x, y)):')
pprint(grad(fun, coords))
print('hess(f(x, y)):')
pprint(hess(fun, coords))
for i in range(len(hfuns)):
print('')
print('h_{}(x, y):'.format(i))
pprint(hfuns[i])
print('grad(h_{}(x, y)):'.format(i))
pprint(grad(hfuns[i], coords))
print('hess(h_{}(x, y)):'.format(i))
pprint(hess(hfuns[i], coords))
print('')
print('L(x, y, lam, c):')
pprint(Lc(f, hfuns, lams, c))
print('dL(x, y, lam, c):')
pprint(dLc(f, hfuns, lams, c, coords))
print('ddL(x, y, lam, c):')
pprint(ddLc(f, hfuns, lams, c, coords))
f = b*x
h = x+y*y -a
print_fun(f, [h], [lam], [x, y])
# example 20.5
f = (2*(x**2 + y**2 - 1) - x)/1e4
h = [(x**2 + y**2 - 1)]
print_fun(f, h, symbols('Ξ»:1'), [x, y])
sympy.pretty(f)
from sympy import pretty
import numpy as np
import scipy.optimize
class augmented_algo(object):
def __init__(self, fun, h_list, coords, coords0):
self.fun = fun
self.hfuns = h_list
self.h_size = len(self.hfuns)
self.coords = coords
self.coords0 = coords0
self.lams = symbols('Ξ»:{}'.format(len(self.hfuns)))
self.c = symbols('c', positive=True)
self.Lc = Lc(self.fun, self.hfuns, self.lams, self.c)
self.dLc = dLc(self.fun, self.hfuns, self.lams, self.c, self.coords)
self.ddLc = ddLc(self.fun, self.hfuns, self.lams, self.c, self.coords)
self.counter = 0
self.x_val = self.coords0
self.c_val = 10.
self.lam_val = [0. for _ in range(self.h_size)]
self.tau_val = 10
self.alpha_val = .1
self.beta_val = .9
self.eta0_val = .1258925
self.eta_val = 1/self.c_val**self.alpha_val
self.eps_val = 1e-8
self.eps_0_val = 1/self.c_val
self.eps_k_val = self.eps_0_val
self.iterates = list()
def __repr__(self):
fun_str = pretty(self.fun)
hfun_strs = [pretty(h) for h in self.hfuns]
lag_str = pretty(self.Lc)
outstr = []
coord_str = ', '.join((pretty(c) for c in self.coords))
outstr.append('f({}) ='.format(coord_str))
outstr.append(fun_str)
outstr.append('')
outstr.append('h({}) ='.format(coord_str))
for hf in hfun_strs:
outstr.append(hf)
outstr.append('')
outstr.append('L_c(({}), ({})) = '.format(
coord_str, ', '.join((pretty(c) for c in self.lams))))
outstr.append(lag_str)
return '\n'.join(outstr)
def numeric_Lc(self):
subs = {lam: lam_val for lam, lam_val in zip(self.lams, self.lam_val)}
subs[self.c] = self.c_val
fun_val = sympy.utilities.lambdify(
self.coords,
self.Lc.subs(subs),
modules='numpy')
grad_val = sympy.utilities.lambdify(
self.coords,
self.dLc.subs(subs),
modules='numpy')
hess_val = sympy.utilities.lambdify(
self.coords,
self.ddLc.subs(subs),
modules='numpy')
h_vals = [sympy.utilities.lambdify(self.coords, self.hfuns[i], modules='numpy')
for i in range(self.h_size)]
return fun_val, grad_val, hess_val, h_vals
def iteration(self):
self.counter += 1
print('\nIteration {}:'.format(self.counter))
fun_val, grad_val, hess_val, h_vals = self.numeric_Lc()
# 1 solve local prob
result = scipy.optimize.minimize(
lambda x: fun_val(*x), self.x_val, tol = self.eps_k_val,
method='Newton-CG',# 'trust-ncg',#
jac=lambda x: np.asarray(grad_val(*x)).flatten(),
hess=lambda x: np.asarray(hess_val(*x)).squeeze())
print('success = {}'.format(result.success))
print('message = {}'.format(result.message))
print('solution = {}'.format(result.x))
if result.success:
self.x_val = result.x
else:
raise Exception(result.message)
# 2 test convergence
gv = grad_val(*self.x_val)
gv = np.sqrt(float(gv.T*gv))
grad_convergence = gv < self.eps_val
h_val_evals = [h(*self.x_val)**2 for h in h_vals]
hv = np.sqrt(sum(h_val_evals))
constraints_convergence = hv < self.eps_val
print('\nConvergence:')
print(('grad_convergence: {} ({:.4e} >= {}),\n'
'constraints_convergence: {} ({:.4e} >= {})').format(
grad_convergence, gv, self.eps_val, constraints_convergence, hv, self.eps_val))
print('overall convergence: {}, current tol = {:.4e}'.format(
grad_convergence and constraints_convergence, self.eps_k_val))
overall_convergence = grad_convergence and constraints_convergence
if hv < self.eta_val:
self.lam_val = [lam + self.c_val*h_eval
for lam, h_eval in zip(self.lam_val, h_val_evals)]
self.eps_k_val /= self.c_val
self.eta_val /= self.c_val**self.beta_val
print(('\nWeak constraint violation: {:.4e} < {:.4e}; '
'updated multipliers').format(
hv, self.eta_val))
print('Ξ» = {}, tol_k = {:.4e}, update_tol = {:.4e}'.format(
['{:.4e}'.format(l) for l in self.lam_val], self.eps_k_val, self.eta_val))
else:
self.c_val *= self.tau_val
self.eps_k_val = self.eps_0_val/self.c_val
self.eta_val = self.eta0_val/self.c_val**self.beta_val
print(('\nBad constraint violation: {:.4e} > {:.4e}; '
'increased penalty').format(
hv, self.eta_val))
print('c = {:.4e}, tol_k = {:.4e}, update_tol = {:.4e}'.format(
self.c_val, self.eps_k_val, self.eta_val))
self.iterates.append(scipy.optimize.OptimizeResult(
{'x': self.x_val.copy(),
'success': result.success,
'message': result.message,
'fun': result.fun,
'jac': result.jac,
'hess': hess_val(*self.x_val)}))
return overall_convergence
aa = augmented_algo(f, h, [x, y], (-.1, 1.))
fun_val, grad_val, hess_val, h_vals = aa.numeric_Lc()
print(hess_val(0, 0))
converged = False
while not converged:
try:
converged = aa.iteration()
except Exception:
converged = True
%matplotlib inline
import matplotlib.pyplot as plt
coords = np.array([it.x for it in aa.iterates])
xs = np.linspace(-1.1, 1.1, 51)
ys = np.linspace(-1.1, 1.1, 51)
X, Y = np.meshgrid(xs, ys)
Z = sympy.utilities.lambdify([x, y], f)(X,Y)
CS = plt.contourf(X, Y, Z)
phi = np.linspace(0, 2*np.pi, 97)
xs, ys = np.cos(phi), np.sin(phi)
plt.plot(xs, ys, c='k', lw=2)
plt.plot(coords[:, 0], coords[:, 1], c = 'r', lw = 2)
a = Symbol('a')
b = Symbol('b')
f = a
f
f += b
f
a.shape
d = np.array((5., 5))
B = np.matrix(np.eye(2)*1.2)
B
print(np.dot(B, d))
np.dot(d, np.dot(B, d))
```
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/Symbolic_Augmented_Lagrangian.ipynb | Symbolic_Augmented_Lagrangian.ipynb |
from SurfaceTopography import make_sphere
from SurfaceTopography.Generation import fourier_synthesis
import Adhesion.Interactions as Inter
from Adhesion.System import SmoothContactSystem
import ContactMechanics as Solid
import numpy as np
nx = 32
sx = sy = 2
R = 10.
Es = 50.
interaction = Inter.RepulsiveExponential(0, 0.5, 0, 1.)
substrate = Solid.PeriodicFFTElasticHalfSpace((nx,), young=Es,
physical_sizes=(sx,))
topography = make_sphere(R, (nx,), (sx,), kind="paraboloid")
system = SmoothContactSystem(substrate=substrate, surface=topography,
interaction=interaction)
def check_fun_grad_consistency(fun,
x0 , dx=None,
hs=np.array([1e1, 1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5,1e-6, 1e-7])):
"""
Tests the consistency between the function and its gradient values.
Parameters
_________
fun: callable function returning fun and gradient at x.
x: value to test at.
"""
obj = fun
x = x0
if dx is None:
dx = 1 * (0.5 + np.random.random(size=(x0.shape)))
dx *= np.linalg.norm(dx) # make it a unit vector
en, grad = obj(x)
taylor = []
for h in hs:
_en, _grad = obj(x + h * dx)
_taylor = _en - en - np.sum(grad * h * dx)
_taylor = _taylor/h**2
if not taylor :
_max_taylor = _taylor
lower_bnd = _max_taylor/10
upper_bnd = _max_taylor*10
taylor.append(_taylor)
np.testing.assert_array_less(lower_bnd,_taylor,err_msg='lower bound not met.')
np.testing.assert_array_less(_taylor,upper_bnd,err_msg='upper bound not met.')
if True :
# Visualize the quadratic convergence of the taylor expansion
# What to expect:
# Taylor expansion: g(x + h βx) - g(x) = Hessian * h * βx + O(h^2)
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot(hs, taylor, "+-")
ax.set_xscale("log")
ax.set_xlabel('hs')
ax.set_yscale("log")
ax.set_ylabel('taylor diff')
ax.grid(True)
plt.show()
obj_float = system.objective_k_float(0, True, True)
obj_real = system.objective(0, True, True)
x = np.random.uniform(size=nx)
# dx = np.zeros(nx)
# dx[8]=1
check_fun_grad_consistency(obj_real, x)
check_fun_grad_consistency(obj_float, x) | Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/helpers/check_gradient_energy.py | check_gradient_energy.py |
import os
import sys
from collections import defaultdict
from datetime import datetime
from subprocess import Popen, PIPE
root = os.path.dirname(sys.argv[0])
def read_authors(fn):
return {email.strip('<>'): name for name, email in
[line.rsplit(maxsplit=1) for line in open(fn, 'r')]}
def parse_git_log(log, authors):
committers = defaultdict(set)
author = None
date = None
for line in log.decode('utf-8').split('\n'):
if line.startswith('commit'):
if date is not None and author is not None:
committers[author].add(date.year)
elif line.startswith('Author:'):
email = line.rsplit('<', maxsplit=1)[1][:-1]
elif line.startswith('Date:'):
date = datetime.strptime(line[5:].rsplit(maxsplit=1)[0].strip(),
'%a %b %d %H:%M:%S %Y')
try:
author = authors[email]
except KeyError:
author = email
elif 'copyright' in line.lower() or 'license' in line.lower():
date = None
if date is not None:
committers[author].add(date.year)
return committers
def pretty_years(years):
years = sorted(years)
prev_year = prev_out = years[0]
s = '{}'.format(prev_year)
for year in years[1:]:
if year - prev_year > 1:
if year - prev_out > 1:
if prev_year == prev_out:
s = '{}, {}'.format(s, year)
else:
s = '{}-{}, {}'.format(s, prev_year, year)
else:
s = '{}, {}'.format(s, prev_year)
prev_out = year
prev_year = year
if prev_year - prev_out == 1:
s = '{}-{}'.format(s, prev_year)
elif prev_year - prev_out > 1:
s = '{}, {}'.format(s, prev_year)
return s
authors = read_authors('{}/../AUTHORS'.format(root))
process = Popen(['git', 'log', '--follow', sys.argv[1]], stdout=PIPE,
stderr=PIPE)
stdout, stderr = process.communicate()
committers = parse_git_log(stdout, authors)
prefix = 'Copyright'
for name, years in committers.items():
print('{} {} {}'.format(prefix, pretty_years(years), name))
prefix = ' ' * len(prefix)
print() | Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/maintenance/copyright.py | copyright.py |
.. _contributing:
Contributing to Adhesion
========================
Code style
----------
Always follow PEP-8_, with the following exception: "One big exception to PEP 8 is our preference of longer line lengths. Weβre well into the 21st Century, and we have high-nb_grid_pts computer screens that can fit way more than 79 characters on a screen. Donβt limit lines of code to 79 characters if it means the code looks significantly uglier or is harder to read." (Taken from Django's contributing guidelines_.)
Development branches
--------------------
New features should be developed always in its own branch. When creating your own branch,
please suffix that branch by the year of creation on a description of what is contains.
For example, if you are working on an implementation for line scans and you started that
work in 2018, the branch could be called "18_line_scans".
Commits
-------
Prepend you commits with a shortcut indicating the type of changes they contain:
- ENH: Enhancement (e.g. a new feature)
- MAINT: Maintenance (e.g. fixing a typo)
- DOC: Changes to documentation strings
- BUG: Bug fix
- TST: Changes to the unit test environment
- CI: Changes to the CI configuration
.. _PEP-8: https://www.python.org/dev/peps/pep-0008/
.. _guidelines: https://docs.djangoproject.com/en/dev/internals/contributing/writing-code/coding-style/
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/docs/contributing.rst | contributing.rst |
Usage
=====
The code is documented via Python's documentation strings that can be
accesses via the `help` command
or by appending a questions mark `?` in ipython/jupyter.
There are two command line tools available that may be a good starting point.
They are in the `commandline` subdirectory:
- `soft_wall.py`: Command line front end for calculations with soft (possibly adhesive) interactions between rigid and elastic flat. This is a stub rather than a fully featured command line tool that can be used as a starting point for modified script. The present implementation is set up for a solution of Martin MΓΌser's contact mechanics challenge.
Have a look in the `examples` folder as well as in the tests.
Conventions
-----------
Coordinate system
+++++++++++++++++
Definition of gap, heights and displacements
.. image:: ./Figures/geometry_pdf_tex.svg
:math:`h_0(x)` is the content of the topography.
:math:`\delta`: rigid body penetration
:math:`h(x) = \delta + h_0(x)` is the height of the indenter with respect to the surface of the undeformed halfspace
:math:`u(x)` displacement of the halfspace
:math:`g(x) = u(x) - h(x) = u(x) - (\delta + h_0(x))`: gap
The simulation models the indentation of an elastic halfspace (flat) with a rigid indenter whose geometry is given by the topography.
In the picture above the maximum value of the topography :math:`h_0(x)` is 0. First contact occurs at :math:`\delta = 0 ` and the load will increase as `delta` increases.
If :math:`h_0(x)` contains positive values the first contact will occur at :math:`\delta < 0`
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/docs/usage.rst | usage.rst |
.. Adhesion documentation master file, created by
sphinx-quickstart on Tue Nov 27 17:14:58 2018.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Adhesion's documentation!
================================
Adhesive Contact mechanics with Python.
This code implements computation of contact geometry and pressure
of a rigid object on a flat elastic half-space.
All calculations assume small deformations;
in that limit, the contact of any two objects of arbitrary
geometry and elastic moduli can be mapped on that of a
rigid on an elastic flat.
.. toctree::
:maxdepth: 2
:caption: Notes
installation
usage
development
Testing
contributing
.. toctree::
:maxdepth: 1
:caption: Package Reference
source/Adhesion.Interactions
source/Adhesion.System
source/Adhesion.ReferenceSolutions
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/docs/index.rst | index.rst |
Development
===========
To use the code without installing it, e.g. for development purposes, use the `env.sh` script to set the environment:
``source /path/to/Adhesion/env.sh [python3]``
Note that the parameter to `env.sh` specifies the Python interpreter for which the environment is set up. Adhesion contains portions that need to be compiled, make sure to run
``python setup.py build``
whenever any of the Cython (.pyx) sources are modified.
Please read :ref:`contributing` if you plan to contribute to this code.
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/docs/development.rst | development.rst |
# %%
import scipy.optimize
import matplotlib.pyplot as plt
import numpy as np
from ContactMechanics import PeriodicFFTElasticHalfSpace
# %%
from Adhesion.Interactions import Exponential
from NuMPI.IO.NetCDF import NCStructuredGrid
# %%
from ContactMechanics.Tools.Logger import Logger
from Adhesion.System import BoundedSmoothContactSystem
import time
import datetime
from SurfaceTopography import make_sphere
# %% [markdown]
# These Parameters are one because of nondimensionalisation,
# this way the typical displacements, forces and contact areas are of order 1
# %%
maugis_K = 1.
Es = 3 / 4 # maugis K = 1.
R = 1.
work_of_adhesion = 1 / np.pi
# %%
# the shorter ranged thew potential, the finer the discretisation needed
# and the more difficult the minimisation
length_parameter = 0.2
# %%
# discretisation: too high dx leads to artificial hysteresis
dx = .025
# %%
# how much to increment the indentation depth at each simulation step
delta_d = 0.01
# %%
nx, ny = 256, 256 # should be choosen so that the system size
# is bigger then the jump in radius
sx, sy = (nx * dx, ny * dx)
topography = make_sphere(R, (nx, ny), (sx, sy), kind="paraboloid")
interaction = Exponential(work_of_adhesion, length_parameter)
substrate = PeriodicFFTElasticHalfSpace((nx, ny), Es, (sx, sy), )
system = BoundedSmoothContactSystem(substrate, interaction, topography)
# %%
starting_penetration = - 2 * length_parameter # rigid body penetration
max_stress = abs(interaction.max_tensile)
# %%
# demo how to plot things. You can import this from outside !
def plot_result(filename="data.nc"):
nc = NCStructuredGrid(filename)
fig, ax = plt.subplots()
ax.plot(nc.penetration, nc.normal_force)
ax.set_xlabel(r"Penetration $(\pi^2 w_m^2 R / K^2)^{1/3}$")
ax.set_ylabel(r"Force ($\pi w_m R$)")
plt.show()
fig, ax = plt.subplots()
x, y = topography.positions()
for i in range(len(nc)):
ax.plot(x[:, 0], nc.displacements[i][:, ny // 2],
label="penetration={:.2f}".format(nc.penetration[i]))
ax.set_ylabel(r"displacement $(\pi^2 w_m^2 R / K^2)^{1/3}$")
ax.set_xlabel(r"x ($\left(\pi w R^2 /K\right)^{1/3}$)")
ax.legend()
plt.show()
nc.close()
# %%
gtol = 1e-4
if __name__ == '__main__':
pulloff_force = 0
monitor = None
disp0 = None
print("create nc file")
# binary file to store all the data
ncfile = NCStructuredGrid("data.nc", mode="w",
nb_domain_grid_pts=system.surface.nb_grid_pts)
# size of the simulation domain
# (relevant when storing fields)
starttime = time.time()
try:
counter = 1
i = 0
j = 0
penetration = starting_penetration
mean_deformation = 0
main_logger = Logger("main.log")
absstarttime = time.time()
for penetration in np.linspace(starting_penetration, 1., 10):
# this needs to be tweaked for each system
# printp("#######################################################")
print("penetration = {}".format(penetration))
# printp("#######################################################")
if disp0 is None:
disp0 = np.zeros(system.substrate.nb_subdomain_grid_pts)
starttime = time.time()
# This is typically the scope of minimize_proxy
lbounds = system._lbounds_from_heights(penetration)
# sol = scipy.optimize.fmin_l_bfgs_b(
# # mandatory interface
# system.objective(penetration, gradient=True), disp0,
# bounds=system._reshape_bounds(lbounds=lbounds, ),
# # starting from now you are free to adapt
# pgtol=gtol * abs(max_stress) * topography.area_per_pt, factr=0,
# m=3,
# maxls=20)
# this function has an output that doesn't
# match scipy.optimize.minimize standart, and that is annoying
# REPLACE THIS WITH CUSTOM MINIMIZER
sol = scipy.optimize.minimize(
system.primal_objective(penetration, gradient=True),
x0=disp0,
method="L-BFGS-B",
jac=True,
bounds=system._reshape_bounds(lbounds=lbounds, ),
callback=system.callback(True),
options=dict(gtol=gtol * abs(
interaction.max_tensile) * topography.area_per_pt,
# typical force on one pixel
ftol=0, maxcor=3),
)
# REPLACE THIS WITH CUSTOM MINIMIZER
elapsed_time = time.time() - starttime
assert sol.success, sol.message
# update internal state of system so we can use it's
# utility functionsn to compute some physical quantities
system._update_state(penetration, result=sol)
u = disp0 = sol.x
#
ncfile[i].displacements = u
force = - substrate.evaluate_force(u)
#
contacting_points = np.where(system.gap == 0., 1., 0.)
ncfile[i].contact_area = system.compute_contact_area()
ncfile[i].repulsive_area = repulsive_area = \
system.compute_repulsive_contact_area()
ncfile[i].normal_force = normal_force = \
system.compute_normal_force()
ncfile[i].penetration = penetration
ncfile[i].repulsive_force = system.compute_repulsive_force()
ncfile[i].attractive_force = system.compute_attractive_force()
ncfile[i].mean_deformation = mean_deformation
ncfile[i].elastic_energy = elastic_energy = system.substrate.energy
ncfile[i].interaction_energy = interaction_energy = \
system.interaction.energy
ncfile[i].energy = energy = system.energy
rel_rep_area = repulsive_area / np.prod(topography.physical_sizes)
pulloff_force = min(normal_force, pulloff_force)
# logfile you can open in gnuplot
main_logger_headers = ["step", "nit", "nfev", "walltime",
"penetration", "mean deformation", "force",
"frac. rep. area", "energy"]
main_logger.st(main_logger_headers,
[i, sol.nit, -1, elapsed_time, penetration,
mean_deformation, normal_force, rel_rep_area,
energy, ]
)
i += 1
finally:
ncfile.close()
endtime = time.time()
elapsed_time = endtime - absstarttime
print(
"elapsed time: {} \n= {}"
"".format(elapsed_time,
datetime.timedelta(seconds=elapsed_time)))
# %% | Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/examples/hardwall_adhesion_custom.py | hardwall_adhesion_custom.py |
```
import matplotlib.pyplot as plt
import numpy as np
from Adhesion.Interactions import PowerLaw, Exponential
pot = PowerLaw(0.5, 3 * 0.2, 3)
exp = Exponential(0.5, 0.2)
fig, (axpot, axf, axcurv) = plt.subplots(3,1)
r = np.linspace(-0.001, 2)
v, dv, ddv = pot.evaluate(r, True, True, True)
axpot.plot(r, v, label="PowerLaw")
axf.plot(r, dv, label="PowerLaw")
axcurv.plot(r, ddv, label="PowerLaw")
v, dv, ddv = exp.evaluate(r, True, True, True)
axpot.plot(r, v, label="exponential")
axf.plot(r, dv, label="exponential")
axcurv.plot(r, ddv, label="exponential")
axpot.set_ylabel("Potential")
axf.set_ylabel("interaction stress")
axcurv.set_ylabel("curvature")
for a in (axpot, axf, axcurv):
a.grid()
axcurv.set_xlabel("gap")
axpot.legend()
fig.savefig("PowerLawPotential.png")
```
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/examples/power_law_potential.ipynb | power_law_potential.ipynb |
import numpy as np
from ContactMechanics import PeriodicFFTElasticHalfSpace
from SurfaceTopography import UniformLineScan
import matplotlib.pyplot as plt
from Adhesion.Interactions import VDW82, Exponential
from Adhesion.System import SmoothContactSystem, BoundedSmoothContactSystem
from Adhesion.ReferenceSolutions.sinewave import JKR
# %% [markdown]
#
# # Smooth against JKR
#
# %%
s = 1.
Es = 1. / np.pi
alpha = 0.2 # nondimensionalized stress intensity factor
w = alpha ** 2 / (2 * Es)
fig, ax = plt.subplots()
ax.set_title("comparison against analytical")
a = np.linspace(0, 0.4)
ax.plot(np.sin(np.pi * a) ** 2 - alpha * np.sqrt(np.tan(np.pi * a)), a * 2,
"--k", label="JKR limit")
ax.set_xlabel(R"mean pressure ($\pi E^* h / \lambda$)")
ax.set_ylabel("frac. contact area (-)")
for p in [6, 8, 10, 12]:
n = 2 ** p
dx = s / n
z0 = 2 * np.sqrt(dx)
inter = VDW82(w * z0 ** 8 / 3, 16 * np.pi * w * z0 ** 2
).spline_cutoff(gamma=w
).linearize_core()
substrate = PeriodicFFTElasticHalfSpace((n,), young=Es,
physical_sizes=(s,),
fft='serial')
surface = UniformLineScan(
np.cos(np.arange(0, n) * np.pi * 2. / n),
physical_sizes=(s,))
system = SmoothContactSystem(substrate, inter, surface)
offsets = np.linspace(-2, 0.35, 20)
offsets = np.concatenate((offsets, offsets[-2::-1]))
contact_areas = np.zeros_like(offsets)
mean_pressures = np.zeros_like(offsets)
nsteps = len(offsets)
disp0 = None
gtol = 1e-5
i = 0
for offset in offsets:
if disp0 is not None:
disp0 += offset - offset_prev # noqa: F821
sol = system.minimize_proxy(
initial_displacements=disp0,
options=dict(gtol=gtol * max(Es * surface.rms_slope_from_profile(), abs(
inter.max_tensile)) * surface.area_per_pt,
# max absolute value of the gradient
# of the objective for convergence
),
# logger=Logger("laststep.log"),
method="L-BFGS-B",
offset=offsets[i],
callback=None,
lbounds="auto"
)
assert sol.success, sol.message
disp0 = sol.x
mean_pressures[i] = system.compute_normal_force() / s
contact_areas[i] = np.count_nonzero(system.gap < inter.r_infl) / n
# print("step {}".format(i))
offset_prev = offset
i += 1
abserror = np.max(abs(
mean_pressures - JKR.mean_pressure(contact_areas / 2, alpha)))
ax.plot(mean_pressures, contact_areas,
label="n={}, error={:.1e}".format(n, abserror))
plt.pause(0.0001)
ax.grid()
ax.legend()
plt.show()
# %% [markdown]
#
# # Hardwall against JKR
#
# %%
s = 1.
Es = 1. / np.pi
alpha = 0.2 # nondimensionalized stress intensity factor
w = alpha ** 2 / (2 * Es) # = alpha^2 np.pi / 2
fig, ax = plt.subplots()
ax.set_title("comparison against analytical")
a = np.linspace(0, 0.4)
ax.plot(JKR.mean_pressure(a, alpha), a * 2, "--k", label="JKR limit")
ax.set_xlabel(R"mean pressure ($\pi E^* h / \lambda$)")
ax.set_ylabel("frac. contact area (-)")
for p in [6, 8, 10, 12]:
n = 2 ** p
dx = s / n
rho = np.sqrt(dx)
inter = Exponential(w, rho)
substrate = PeriodicFFTElasticHalfSpace((n,), young=Es,
physical_sizes=(s,),
fft='serial')
surface = UniformLineScan(
np.cos(np.arange(0, n) * np.pi * 2. / n),
physical_sizes=(s,), periodic=True)
system = BoundedSmoothContactSystem(substrate, inter, surface)
offsets = np.linspace(-2, 0.35, 20)
offsets = np.concatenate((offsets, offsets[-2::-1]))
contact_areas = np.zeros_like(offsets)
mean_pressures = np.zeros_like(offsets)
nsteps = len(offsets)
disp0 = None
gtol = 1e-5
i = 0
for offset in offsets:
if disp0 is not None:
disp0 += offset - offset_prev
sol = system.minimize_proxy(
initial_displacements=disp0,
options=dict(gtol=gtol * max(Es * surface.rms_slope_from_profile(), abs(
inter.max_tensile)) * surface.area_per_pt,
# max absolute value of the gradient
# of the objective for convergence
),
# logger=Logger("laststep.log"),
method="L-BFGS-B",
offset=offset,
callback=None,
lbounds="auto"
)
assert sol.success, sol.message
disp0 = sol.x
mean_pressures[i] = system.compute_normal_force() / s
contact_areas[i] = system.compute_contact_area() / s
# print("step {}".format(i))
offset_prev = offset
i += 1
abserror = np.max(abs(
mean_pressures - JKR.mean_pressure(contact_areas / 2, alpha)))
ax.plot(mean_pressures, contact_areas,
label="n={}, error={:.1e}".format(n, abserror))
plt.pause(0.0001)
ax.grid()
ax.legend()
plt.show()
# %% | Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/examples/sinewave_bem_vs_jkr.py | sinewave_bem_vs_jkr.py |
# taken as reference from pycontact (https://github.com/pastewka/pycontact)
import numpy as np
def C1(epsilon, sigma, rc1, rc2):
return epsilon / sigma * (
6. / 5 * (sigma / rc1) ** 10 - 3 * (sigma / rc1) ** 4)
def C2(epsilon, sigma, rc1, rc2):
return -12 * epsilon / sigma ** 2 * (
(sigma / rc1) ** 11 - (sigma / rc1) ** 5)
def C3(epsilon, sigma, rc1, rc2):
return -(3 * C1(epsilon, sigma, rc1, rc2) +
2 * C2(epsilon, sigma, rc1, rc2) * (rc2 - rc1)) \
/ ((rc2 - rc1) ** 2)
def C4(epsilon, sigma, rc1, rc2):
return (2 * C1(epsilon, sigma, rc1, rc2) +
C2(epsilon, sigma, rc1, rc2) * (rc2 - rc1)) \
/ ((rc2 - rc1) ** 3)
def C0(epsilon, sigma, rc1, rc2):
return C1(epsilon, sigma, rc1, rc2) * (rc2 - rc1) + \
C2(epsilon, sigma, rc1, rc2) * (rc2 - rc1) ** 2 / 2. + \
C3(epsilon, sigma, rc1, rc2) * (rc2 - rc1) ** 3 / 3. + \
C4(epsilon, sigma, rc1, rc2) * (rc2 - rc1) ** 4 / 4.
###
def V(x, epsilon, sigma, rc1, rc2):
return np.where(x < rc1,
epsilon*(2./15*(sigma/x)**9 - (sigma/x)**3) # noqa: E122, E127, E501
- epsilon*(2./15*(sigma/rc1)**9 - (sigma/rc1)**3) # noqa: E122, E127, E501
+ C0(epsilon, sigma, rc1, rc2), # noqa: E122, E127
np.where(x < rc2, # noqa: E122, E127, E128
C0(epsilon, sigma, rc1, rc2) # noqa: E122, E128
- C1(epsilon, sigma, rc1, rc2)*(x - rc1) # noqa: E122
- C2(epsilon, sigma, rc1, rc2)*(x - rc1)**2/2 # noqa: E122
- C3(epsilon, sigma, rc1, rc2)*(x - rc1)**3/3 # noqa: E122
- C4(epsilon, sigma, rc1, rc2)*(x - rc1)**4/4, # noqa: E122
np.zeros_like(x) # noqa: E128, E122
)) # noqa: E122, E127, E128
def dV(x, epsilon, sigma, rc1, rc2):
return np.where(x < rc1,
- epsilon*(6./5*(sigma/x)**6 - 3)*(sigma/x)**3/x,
np.where(x < rc2,
- C1(epsilon, sigma, rc1, rc2)
- C2(epsilon, sigma, rc1, rc2) * (x - rc1)
- C3(epsilon, sigma, rc1, rc2) * (x - rc1) ** 2
- C4(epsilon, sigma, rc1, rc2) * (x - rc1) ** 3,
np.zeros_like(x)
)) # noqa: E122
def d2V_lj(x, epsilon, sigma, rc1, rc2):
return 12 * epsilon * ((sigma / x) ** 6 - 1) * (sigma / x) ** 3 / (x * x)
def d2V_smooth(x, epsilon, sigma, rc1, rc2):
return - C2(epsilon, sigma, rc1, rc2) \
- 2 * C3(epsilon, sigma, rc1, rc2) * (x - rc1) \
- 3 * C4(epsilon, sigma, rc1, rc2) * (x - rc1) ** 2
def d2V(x, epsilon, sigma, rc1, rc2):
return np.where(x < rc1,
d2V_lj(x, epsilon, sigma, rc1, rc2),
np.where(x < rc2,
d2V_smooth(x, epsilon, sigma, rc1, rc2),
np.zeros_like(x)
))
###
def find_epsilon_from_gamma(gamma_target, sigma, rc1, rc2):
return -gamma_target / V(rc1, 1.0, sigma, rc1, rc2) | Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/test/Interactions/lj93smooth_ref_potential.py | lj93smooth_ref_potential.py |
# Singularity images with Adhesion
The process is split into two steps.
Build an image containing the dependencies
```bash
sudo singularity build dep_serial.sif dep_serial.def
```
From this image you should also be able to run Adhesion without installing it (but don't forget to run `python3 setup.py build` from inside the container)
Based on this image, you can create an image with Adhesion "pip installed":
```bash
sudo singularity build adhesion_serial.sif adhesion_serial.def
```
Similarly, you can build the Adhesion image with mpi support.
```bash
sudo singularity build dep_mpi.sif dep_mpi.def
sudo singularity build adhesion_mpi.sif adhesion_mpi.def
```
## Running test
In the Adhesion main directory, create a file `testjob.sh` with the following content:
```bash
source env.sh
pytest
# only for mpi
python3 run-tests.py --mpirun="mpirun -np 4 --oversubscribe" --verbose $@
```
run it:
```
singularity exec dep_mpi.sif bash testjob.sh
```
| Adhesion | /Adhesion-0.91.0.tar.gz/Adhesion-0.91.0/singularity/README.md | README.md |
# Adifpy
## Table of Contents
- [Introduction](#Introduction)
- [Background](#Background)
- [How to Use](#How-to-Use)
- [Software Organization](#Software-Organization)
- [Directory Structure](#Directory-Structure)
- [Subpackages](#Subpackages)
- [Implementation](#Implementation)
- [Libraries](#Libraries)
- [Modules and Classes](#Modules-and-Classes)
- [Elementary Funcions](#Elementary-Functions)
- [Future Features](#Future-Features)
- [Reverse Mode](#Reverse-Mode)
- [Higher Dimensions](#Higher-Dimensions)
- [Visualization](#Visualization)
- [Implementation Schedule](#Implementation-Schedule)
## Introduction
This software is aimed at allowing users to evaluate and differentiate their function. This is a powerful tool that will allow users to find solutions to complex derivations and visualize their results. It serves as an efficient way to take derivatives when compared to other solutions such as symbolic differentiation.
Applications are widespread, ranging from graphing simple functions to taking the derivative of complex, high dimension functions that can have widespread uses such as in optimization problems, machine learning, and data analysis.
## Background
Traditional methods for differentiation include symbolic differentiation and numerical differentiation. Each of these techniques brings its own challenges when used for computational science - symbolic differentiation requires converting complex computer programs into simple components and often results in complex and cryptic expressions, while numerical differentiation is susceptible to floating point and rounding errors.
Automatic differentiation (AD) solves these problems: any mathematical function (for which a derivative is needed) can be broken down into a series of constituent elementary (binary and unary) operations, executed in a specific order on a predetermined set of inputs. A technique for visualizing the sequence of operations corresponding to the function is the computational graph, with nodes representing intermediate variables and lines leaving from nodes representing operations used on intermediate variables. AD combines the known derivatives of the constituent elementary operations (e.g. arithmetic and transcendental functions) via the chain rule to find the derivative of the overall composition.
For example, for the hypothetical function ))), where 
all represent elementary operations, we can pose ,%5Cquad%20v_2%20=%20g(v_1),%5Cquad%20y%20=%20v_3%20=%20h(v_2)). The desired output is , and by the chain rule and simple derivatives, we obtain:
<p align="center">
<img src="https://latex.codecogs.com/gif.image?%5Cbg_white%20%5Cdpi%7B110%7D%5Cfrac%7Bdy%7D%7Bdx%7D%20=%20%5Cfrac%7Bdv_3%7D%7Bdv_2%7D%20%5Ccdot%20%5Cfrac%7Bdv_2%7D%7Bdv_1%7D%20%5Ccdot%20%5Cfrac%7Bdv_1%7D%7Bdv_0%7D">
</p>
Our implementation of AD uses dual numbers to calculate derivatives of individual components. Dual numbers have real and dual components, taking the form  and  and where `a` and `b` are real. By the Taylor series expansion of a function around a point, notice that evaluating a function at  yields:
<p align="center">
<img src="https://latex.codecogs.com/gif.image?%5Cbg_white%20%5Cdpi%7B110%7Df(a%20+%20%5Cepsilon)%20=%20f(a)%20+%20%5Cfrac%7Bf'(a)%7D%7B1!%7D%20%5Cepsilon%20+%20%5Cfrac%7Bf''(a)%7D%7B2!%7D%20%5Cepsilon%5E2%20+%20...%20=%20f(a)%20+%20f'(a)%20%5Cepsilon">
</p>
Hence, by evaluating the function at the desired point , the outputted real and dual components are the function evaluated at `a` and derivative of the function evaluated at `a` respectively. This is an efficient way of calculating requisite derivatives.
## How to Use
First, ensure that you are using Python 3.10 or newer. All future steps can/should be completed in a virtual environment so as not to pollute your base Python installation. To create and activate a new virtual environment, use the following:
```
python3 -m venv [desired/path/to/venv]
source [desired/path/to/venv]/bin/activate
```
Next, clone the package from this GitHub repository and install the needed dependencies and the package:
```
git clone https://code.harvard.edu/CS107/team33.git
python3 -m pip install -r requirements.txt
python3 -m pip install .
```
Now, you're ready to use the package. Continue to the [Example](#Example) to test our the package!
### Example
First, import the package in your python code:
```
import Adifpy
```
and create an `Evaluator` object, which takes a callable function as an argument:
```
evaluator = Adifpy.Evaluator(lambda x : x**2)
```
Next, we want to find the value and derivative of the function at a point (currently, only scalar functions with 1 input and 1 output are supported). We can use the `Evaluator`'s `eval` function, passing in the point at which you want to evaluate (and optionally, a scalar seed vector):
```
output = evaluator.eval(3)
```
This function returns a tuple, in the form `(value, derivative)`, where the value is the evaluation of the function at that point (in this case, 9) and the derivative is the derivative of the function at that point (in this case, 6).
Additionally a seed vector (for now, only scalars such as type `int` or `float` are supported) can be passed to take the derivative with respect to a different seed vector. For example, if you want to take the derivative with respect to a seed vector of `2` you could call the following:
```
output2 = evaluator.eval(3, seed_vector=2)
```
which would return `(9,12)` (since the directional derivative is in the same direction, with twice the magnitude).
## Software Organization
The following section outlines our plans for organizing the package directory, sub-packages, modules, classes, and deployment.
### Directory Structure
<pre>
adifpy/
βββ docs
β βββ milestone1
β βββ milestone2
β βββ milestone2_progress
βββ LICENSE
βββ README.md
βββ requirements.txt
βββ pyproject.toml
βββ Adifpy
β βββ differentiate
β β βββ <a href="#dual_numberpy">dual_number.py</a>
β β βββ <a href="#elementary_functionspy">elementary_functions.py</a>
β β βββ <a href="#evaluatorpy">evaluator.py</a>
β β βββ <a href="#forward_modepy">forward_mode.py</a>
β β βββ <a href="#function_treepy">function_tree.py</a>
β β βββ <a href="#reverse_modepy">reverse_mode.py</a>
β βββ visualize
β β βββ <a href="#graph_treepy">graph_tree.py</a>
β β βββ <a href="#graph_functionpy">graph_function.py</a>
β βββ test
β β βββ README.md
β β βββ run_tests.sh
β β βββ test_dual_number.py
β β βββ ... (unit and integration tests)
β βββ __init__.py
β βββ config.py
βββ .github
βββ workflows
βββ coverage.yaml
βββ test.yaml
</pre>
### Subpackages
The `Adifpy` directory contains the source code of our package, which contains 3 subpackages: `differentiate`, `visualize`, and `test`, described below.
#### Differentiate
The differentiate subpackage currently contains modules required to perform forward mode AD on functions from R to R. Contained in this subpackage are the modules `dual_number.py`, `elementary_functions.py`, `evaluator.py`, `forward_mode.py`, `function_tree.py`, and `reverse_mode.py`. For more information on each module, see [Modules and Classes](#Modules-and-Classes).
#### Visualize
This subpackage has not been implemented yet. Check out our implementation plan [below](#Visualization).
#### Test
The test suite is contained in the test sub-package, as shown above in the [Directory Structure](#Directory-Structure). The test directory contains a `run_tests.sh`, which installs the package and runs the relevant `pytest` commands to display data on the testing suite (similar to the CI workflows).
The individual test files, each of which are named in the `test_*.py` format, test a different aspect of the package. Within each file, each function (also named `test_*`) tests a smaller detail of that aspect. For example, the `test_dual_number.py` test module tests the implementation of the `DualNumber` class. Each function in that module tests one of the overloaded operators. Thus, error messaging will be comprehensive, should one of these operators be changed and fail to work.
The easiest way to run the test suite is to go to the `test` directory and run `./run_tests.sh`.
### Distribution
Distributing Our Package: We will use PyPi to distribute our software package under the name `Adifpy`. Users will then be able to install the package from the Python index instead of needing to install from source (by cloning or downloading the code). For now follow the method of installation described in the [How to Use](#How-to-Use) section.
## Implementation
Major data structures, including descriptions on how dual numbers are implemented, are described in the [Modules and Classes](#Modules-and-Classes) section below.
### Libraries
The `differentiate` sub-package requires the `NumPy` library. Additionally, the `visualization` sub-package will require `MatPlotLib` for displaying graphs. Additional libraries may be required later for additional ease of computation or visualization.
These requirements are specified in the `requirements.txt` for easy installation.
### Modules and Classes
#### `dual_number.py`
the `DualNumber` class, stored in this module, contains the functionality for dual numbers for automatic differentiation. When a forward pass (in forward mode) is performed on a user function, a `DualNumber` object is passed to mimic the function's numeric or vector input. All of `DualNumber`'s major mathematical dunder methods are overloaded so that the `DualNumber` is updated for each of the function's elementary operations.
Each of the binary dunder methods (addition, division, etc.) work with both other numeric types (integers and floats) as well as other `DualNumber`s.
#### `elementary_functions.py`
The implementation for elementary functions is described [below](#Elementary-Functions).
#### `evaluator.py`
The `Evaluator` class, stored in this module, is the user's main communication with the package. An `Evaluator` object is defined by its function, which is provided by the user on creation. A derivative can be calculated at any point, with any seed vector, by calling an `Evaluator`'s `eval` function. The `Evaluator` class ensures that a user's function is valid, decides whether to use forward or reverse mode (based on performance), and returns the derivative on `eval` calls.
*When reverse mode is implemented, the `Evaluator` class may also contain optimizations for making future `eval` calls faster by storing a computational graph.*
#### `forward_mode.py`
This module contains only the `forward_mode` method, which takes a user function, evaluation point, and seed vector. Its implementation is incredibly simple: a `DualNumber` is created with the real part as the evaluation point and the dual part as the seed vector. This `DualNumber` is then passed through the user's function, and the resulting real and dual components of the output `DualNumber` are the function output and derivative.
#### `function_tree.py`
The `FunctionTree` class, stored in this module, is a representation of a computational graph in the form of a tree, where intermediate variables are stored as nodes. The parent-child relationship between these nodes represents the elementary operations for these intermediate variables. This class contains optimizations like ensuring duplicate nodes are avoided.
*This module is currently unused (and un-implemented). When reverse mode is implemented, a given `Evaluator` object will build up and store a `FunctionTree` for optimization.*
#### `reverse_mode.py`
This module contains only the `reverse_mode` method, which takes the same arguments as `forward_pass`. This function is not yet implemented.
#### `graph_tree.py`
This module will contain functionality for displaying a presentable representation of a computation graph in an image. Using a `FunctionTree` object, the resulting image will be a tree-like structure with nodes and connections representing intermediate variables and elementary operations. This functionality is not yet implemented.
#### `graph_function.py`
This module will contain functionality for graphing a function and its derivative. It will create an `Evaluator` object and make the necessary `eval` calls to fill a graph for display. This functionality is not yet implemented.
### Elementary Functions
Many elementary functions like trigonometric, inverse trigonometric and exponential cannot be overloaded by Python's dunder methods (like addition and subtraction can). However, a user must still be able to use these operators in their functions, but cannot use the standard `math` or `np` versions, since a `DualNumber` object is passed to the function for forward passes.
Thus, we define a module `elementary_functions.py` that contains methods which take a `DualNumber`, and return a `DualNumber`, with the real part equal to the elementary operation applied to the real part, and the derivative of the operation applied to the dual part. Thus, these functions are essentially our package's **storage** for the common derivatives (cosine is the derivative of sine, etc.), where the storage of the derivative is the assignment of the dual part of the output of these elementary operations.
These operations will be automatically imported in the package's `__init__.py` so that users can simply call `Adifpy.sin()` or `Adifpy.cos()` (for this milestone our implementation requires users to call `ef.sin()` and `ef.cos()`, not `Adifpy.sin()` or `Adifpy.cos()`), as they would with `np.sin()` and `np.cos()`.
## Future Features
Now that our forward mode implementation is complete, we will move on to implement additional features and conveniences for the user.
### Reverse Mode
We will implement reverse mode AD in the differentiate subpackage. Given that we have already been quizzed on the background math, encoding this process should not be too onerous. One of the biggest challenges that we foresee is determining when it is best to use Reverse Mode and when it is best to use Forward Mode. Obviously, it is better to use forward mode when there are far more outputs than inputs and vice-versa for reverse mode, but in the cases where number of inputs and outputs are similar it is not so simple. To address this we will do a series of practical tests on functions of different dimensions, and manually encode the most efficient results into `evaluator.py`.
### Higher Dimensions
We have yet to make our AD implementation compatible with functions that have more than one input or output. This is one of the most important next steps for our project. Most of the existing modules, classes, data structures, etc. have already been implemented, but we need to expand their compatibility to higher order functions. We will need to overload certain dunder methods (that we haven't touched yet) that are used on list-like objects such as the `__delitem__` method that could be used in a function that projects an object onto another object of smaller dimension, and `__setitem__`, and `__getitem__`.
### Visualization
We are planning on creating a visualization tool with `MatPlotLib` that can plot the computational graph (calculated in Reverse Mode) of simple functions that are being differentiated. Obviously, the computational graph of very complex functions with many different inputs and outputs can be impractical to represent on a screen, so one of the biggest challenges that we will face is to have our program able determine when it can produce a visual tool that can be easily rendered, and when it cannot.
### Implementation Schedule
We will first expand our current implementation to be compatible with functions in higher dimensions. This will help us to better understand how to work with functions in greater dimensions before we implement Reverse Mode. In addition, by adding higher dimension compatibility we will not have to redo much of the code that we produce for our Reverse Mode implementation. We then plan on implementing reverse mode as this will serve as the foundation for the visualization tool, since the visualization tool requires the building of a computational graph (which does not happen in forward mode).
| Adifpy | /adifpy-0.0.3.tar.gz/adifpy-0.0.3/docs/milestone2.md | milestone2.md |
# Adifpy
## Table of Contents
- [Introduction](#Introduction)
- [Background](#Background)
- [How to Use](#How-to-Use)
- [Software Organization](#Software-Organization)
- [Directory Structure](#Directory-Structure)
- [Test Suite](Test-Suite)
- [Distribution](#Distribution)
- [Implementation](#Implementation)
- [Libraries](#Libraries)
- [Modules and Classes](#Modules-and-Classes)
- [Overloading](#Overloading)
- [Order of Implementation](#Order-of-Implementation)
- [Higher Dimensions](#Higher-Dimensions)
- [Feedback](#Feedback)
- [Milestone1](#Milestone1)
- [Licensing](#Licensing)
## Introduction
This software is aimed at allowing users to evaluate and differentiate their function. This is a powerful tool that will allow users to find solutions to complex derivations and visualize their results. It serves as an efficient way to take derivatives when compared to other solutions such as symbolic differentiation.
Applications are widespread, ranging from graphing simple functions to taking the derivative of complex, high dimension functions that can have widespread uses such as in optimization problems, machine learning, and data analysis.
## Background
Traditional methods for differentiation include symbolic differentiation and numerical differentiation. Each of these techniques brings its own challenges when used for computational science - symbolic differentiation requires converting complex computer programs into simple components and often results in complex and cryptic expressions, while numerical differentiation is susceptible to floating point and rounding errors.
Automatic differentiation (AD) solves these problems: any mathematical function (for which a derivative is needed) can be broken down into a series of constituent elementary (binary and unary) operations, executed in a specific order on a predetermined set of inputs. A technique for visualizing the sequence of operations corresponding to the function is the computational graph, with nodes representing intermediate variables and lines leaving from nodes representing operations used on intermediate variables. AD combines the known derivatives of the constituent elementary operations (e.g. arithmetic and transcendental functions) via the chain rule to find the derivative of the overall composition.
For example, for the hypothetical function ))), where 
all represent elementary operations, we can pose ,%5Cquad%20v_2%20=%20g(v_1),%5Cquad%20y%20=%20v_3%20=%20h(v_2)). The desired output is , and by the chain rule and simple derivatives, we obtain:
<p align="center">
<img src="https://latex.codecogs.com/gif.image?%5Cbg_white%20%5Cdpi%7B110%7D%5Cfrac%7Bdy%7D%7Bdx%7D%20=%20%5Cfrac%7Bdv_3%7D%7Bdv_2%7D%20%5Ccdot%20%5Cfrac%7Bdv_2%7D%7Bdv_1%7D%20%5Ccdot%20%5Cfrac%7Bdv_1%7D%7Bdv_0%7D">
</p>
Our implementation of AD uses dual numbers to calculate derivatives of individual components. Dual numbers have real and dual components, taking the form  and  and where `a` and `b` are real. By the Taylor series expansion of a function around a point, notice that evaluating a function at  yields:
<p align="center">
<img src="https://latex.codecogs.com/gif.image?%5Cbg_white%20%5Cdpi%7B110%7Df(a%20+%20%5Cepsilon)%20=%20f(a)%20+%20%5Cfrac%7Bf'(a)%7D%7B1!%7D%20%5Cepsilon%20+%20%5Cfrac%7Bf''(a)%7D%7B2!%7D%20%5Cepsilon%5E2%20+%20...%20=%20f(a)%20+%20f'(a)%20%5Cepsilon">
</p>
Hence, by evaluating the function at the desired point , the outputted real and dual components are the function evaluated at `a` and derivative of the function evaluated at `a` respectively. This is an efficient way of calculating requisite derivatives.
## How to Use
To use the package, the user will be able to call the entire package directly (since the functionality for construction and differentiation will be in the `__main__.py` file).
A user will need to provide:
1. The function `fn` in an acceptable format (these format options will be outlined by the documentation in the package `README.md`).
2. The point `eval_pt` at which to evaluate the derivative.
3. The seed vector `p` upon which to perform the differentiation.
With these arguments, the user will be able to make a call which looks like the following:
```
import adifpy as ad
...
derivative = ad.run(fn, eval_pt, p)
```
Note that `eval_pt` and `p` will need to be of the same dimension as the input to `f`.
The user may wish to also generate a computational graph when computing the derivative. In this case, they can call the package with
```
derivative = ad.run(fn, eval_pt, p, viz_file=[FILENAME])
```
which will return the derivative and save an image at the desired `FILENAME`. Note that this runs the same function but passes an optional argument to indicate that the package should use the computational graph to construct an image and save it as well.
## Software Organization
The following section outlines our plans for organizing the package directory, sub-packages, modules, classes, and deployment.
### Directory Structure
<pre>
adifpy/
βββ docs
β βββ milestone1
βββ LICENSE
βββ README.md
βββ requirements.txt
βββ <a href="#mainpy">main.py</a>
βββ src
β βββ construct
β β βββ <a href="#function_treepy">function_tree.py</a>
β β βββ <a href="#nodepy">node.py</a>
β βββ differentiate
β β βββ <a href="#dual_numberpy">dual_number.py</a>
β β βββ <a href="#forward_passpy">forward_pass.py</a>
β β βββ <a href="#reverse_passpy">reverse_pass.py</a>
β βββ visualize
β βββ <a href="#graph_treepy">graph_tree.py</a>
βββ test
βββ run_tests.sh
βββ test_1.py
βββ test_2.py
βββ β¦
</pre>
sub-packages: The package will contain 4 sub-packages: construct, differentiate, visualize, test. Construct will allow for the creation of a computational graph from an inputted function. Differentiate will allow a user to perform either forward or reverse automatic differentiation (AD). Visualize will allow the user to to create visualizations for either their computational graph or their AD. Test will provide a suite of tests to ensure the package is working as intended.
### Test Suite
The test suite will be contained in the test sub-package, as shown above in the [Directory Structure](#Directory-Structure).
### Distribution
Distributing Our Package: We will use PyPI to distribute our software package under the name `adifpy` (which does not currently exist).
## Implementation
Major data structures, including descriptions on how dual numbers are implemented, are described in the [Modules and Classes](#Modules-and-Classes) section below.
### Libraries
All sub-packages will require the `NumPy` package. Additionally, the `visualization` sub-package will require `MatPlotLib`. Additional libraries may be required later for additional ease of computation or visualization.
### Modules and Classes
#### `main.py`
This module will be the main communication with the user. Users will have the ability to provide functions in different formats (other than as a callable Python object). For example, this module may contain functionality for passing a function as a string representing a Latex function. Specific implementation will be decided depending on future usage and tests.
First, input will be processed and the function's computational graph will be generated: this process is described below. Next, this module will decide whether to call forward or reverse mode, depending on which will be more efficient (or, if the user explicity requests one or the other). The corresponding AD will be performed and the results returned to the user. Note that the computational graph will be generated fully before the AD is performed, so no inefficiency in constructing the graph during AD will be realized. Methods in this module will likely include `latex_to_fn`, `compute_deriv`, etc.
#### `function_tree.py`
The `FunctionTree` class, stored in this module, is a representation of a computational graph in the form of a tree, where intermediate variables are stored as nodes. The parent-child relationship between these nodes represents the elementary operations for these intermediate variables. This class contains optimizations like ensuring duplicate nodes are avoided. Methods included in this class will execute modifications to the function tree.
#### `node.py`
The `Node` class, stored in this module, mimics input to the userβs function. All of its relevant mathematical operators are overloaded so that when a function performs an operation on the node, this elementary operation can be βregisteredβ with the relevant `FunctionTree` instance. `Node` will also have to mimic vector input, so any relevant list-like operator (like `__getitem__` will be overloaded as well). Methods in this class will conduct the various elementary operations needed to build the function tree, like add, multiply, and trigonometric functions, for example.
#### `dual_number.py`
the `DualNumber` class, stored in this module, contains the functionality for dual numbers for automatic differentiation. Its operators are overloaded so that elementary operations in forward and reverse passes work. Methods in this class will compute the various elementary operations used with dual numbers, like addition and multiplication.
#### `forward_pass.py`
the functionality for the forward pass in AD is implemented in this module. Methods in this class will execute the forward pass.
#### `reverse_pass.py`
the functionality for the reverse pass in AD is implemented in this module. Methods in this class will execute the reverse pass.
#### `graph_tree.py`
this module will contain functionality for displaying a presentable representation of a computation graph in an image. Using a function_tree instance, the resulting image will be a tree-like structure with nodes and connections representing intermediate variables and elementary operations. Methods in this class will display the tree in a computation graph in an image.
### Overloading
Some of the classes above use overloaded operators (the `Node` and `DualNumber` classes). For the major mathematical operators, they will be overloaded simply using the Python built-in dunder methods (`__add__` and `__mul__`, etc).
For elementary functions like `sin`, `cos`, `sqrt`, and `exp`, we can still overload these operators in the needed classes. For the purpose of convenience in reverse mode, each of the classes will store a boolean representing whether these functions are to be evaluated for the derivative instead of the value. This will essentially provide the "storage" for common derivatives: the derivative of the "sin" function will be encoded in the overloaded `sin` method, and will take effect when the class attribute representing whether the derivative is needed is `true` (which will occur in reverse mode).
### Order of Implementation
We will build the modules in the following order:
black box tests (if applicable) β `node.py` β `__main__.py` β `function_tree.py` β `dual_number.py` β `forward_pass.py` β `reverse_pass.py` β `graph_tree.py` β additional tests.
### Higher Dimensions
In order to handle cases with higher dimensions, `Node` will also have to mimic vector input. By overloading list-like operators like `__getitem__`, when the function accesses the `n`th input, the `Node` class can simply register these additional inputs with the relevant `FunctionTree` instance.
## Feedback
### Milestone1
1. *Thinking about how users interact with reverse mode and computational graphs:*
Users will not directly communicate the reverse mode or forward mode. By communicating with `main.py`, the user will provide a function and our library will automatically call forward or reverse mode, depending on which will be faster. This can be easily calculated from the dimensionality of the input and output of the function. This is described further in issue 3 below and in [`main.py`](#mainpy) above.
2. *Adding a `src` directory and `requirements.txt` file:*
We have updated the [directory structure](#Directory-Structure) above to reflect adding these components, and have updated the repository.
3. *Being clear about forward and reverse modes:*
We have updated the [`main.py`](#mainpy) description above to reflect how the package will call forward and reverse modes.
## Licensing
The licensing will contain the MIT License. We chose this license for two reasons:Β
1. We wanted the simplest licensing.Β
2. Since our project is mainly meant as a utility, to allow people to do whatever they would like with our project, in hopes of maximizing how useful our project is.Β
While we use the `NumPy` and `MatPlotLib` libraries, we do not need to attribute/reference the library because we are not actually redistributing any of the source code.
| Adifpy | /adifpy-0.0.3.tar.gz/adifpy-0.0.3/docs/milestone1.md | milestone1.md |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.