response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings). | def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs |
Encode whitespaces to extra tokens.
>>> encode_whitespaces('a\n b\n c', 10, 10)
'a\n<|extratoken_10|>b\n<|extratoken_11|>c' | def encode_whitespaces(text: str, start_extra_id: int=10, max_len: int=10):
"""Encode whitespaces to extra tokens.
>>> encode_whitespaces('a\\n b\\n c', 10, 10)
'a\\n<|extratoken_10|>b\\n<|extratoken_11|>c'
"""
for i in np.arange(max_len, 1, -1):
text = text.replace(" " * i, f"<|extratoken_{start_extra_id + i - 2}|>")
return text |
Decode the whitespace-encoded strings produced by encode_whitespace.
>>> text = 'a\n b\n c'
>>> s, l = 10, 10
>>> text == decode_whitespaces(encode_whitespaces(text, s, l), s, l)
True | def decode_whitespaces(text: str, start_extra_id: int=10, max_len: int=10):
"""Decode the whitespace-encoded strings produced by encode_whitespace.
>>> text = 'a\\n b\\n c'
>>> s, l = 10, 10
>>> text == decode_whitespaces(encode_whitespaces(text, s, l), s, l)
True
"""
for l in range(2, max_len + 1):
token_id = start_extra_id - 2 + l
token = f"<|extratoken_{token_id}|>"
text = text.replace(token, " " * l)
return text |
Initialize tokenizer. | def build_hgf_tokenizer(args):
"""Initialize tokenizer."""
tokenizer_path = args.tokenizer_path
if args.rank == 0:
print(f"> building huggingface tokenizer from {tokenizer_path} ...", flush=True)
assert tokenizer_path is not None, "Tokenizer path must be provided."
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
if args.rank == 0:
print(f" > eos_token = {tokenizer.eos_token}", flush=True)
ws_start_id = args.ws_encoding_start_id if "ws_encoding_start_id" in args else None
ws_len = args.ws_encoding_length if "ws_encoding_length" in args else None
return HgfTokenizerWrapper(
tokenizer, ws_start=ws_start_id, ws_len=ws_len
) |
Initialize tokenizer. | def build_tokenizer(args):
"""Initialize tokenizer."""
if "tokenizer_path" in args and args.tokenizer_path is not None:
# build huggingface tokenizer
tokenizer = build_hgf_tokenizer(args)
args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args)
return tokenizer
if args.rank == 0:
print("> building {} tokenizer ...".format(args.tokenizer_type), flush=True)
# Select and instantiate the tokenizer.
assert args.vocab_file is not None
if args.tokenizer_type == "GPT2BPETokenizer":
assert args.merge_file is not None
tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file)
else:
raise NotImplementedError(
"{} tokenizer is not " "implemented.".format(args.tokenizer_type)
)
# Add vocab size.
args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args)
return tokenizer |
Pad vocab size so it is divisible by model parallel size and
still having GPU friendly size. | def _vocab_size_with_padding(orig_vocab_size, args):
"""Pad vocab size so it is divisible by model parallel size and
still having GPU friendly size."""
after = orig_vocab_size
if args.make_vocab_size_divisible_by > orig_vocab_size:
multiple = args.make_vocab_size_divisible_by
else:
multiple = args.make_vocab_size_divisible_by * args.tensor_model_parallel_size
while (after % multiple) != 0:
after += 1
if args.rank == 0:
print(
" > padded vocab (size: {}) with {} dummy tokens "
"(new size: {})".format(orig_vocab_size, after - orig_vocab_size, after),
flush=True,
)
return after |
Build the model. | def model_provider(pre_process=True, post_process=True):
"""Build the model."""
print_rank_0("building GPT model ...")
see_memory_usage(f"Before Building Model", force=True)
args = get_args()
with deepspeed.zero.Init(
data_parallel_group=mpu.get_data_parallel_group(),
remote_device=None if args.remote_device == "none" else args.remote_device,
config_dict_or_path=args.deepspeed_config,
enabled=args.zero_stage == 3,
mpu=mpu,
):
if args.deepspeed and not args.no_pipeline_parallel:
model = CodeGeeXModelPipe(num_tokentypes=0, parallel_output=True)
# This is a hack to give us a reference to get_batch_pipe from within training.py
# We need to call model.set_batch_fn after deepspeed.initialize
model._megatron_batch_fn = get_batch_pipe
# Predompute the attention mask and store it in args. This avoids having to
# pipeline it as an activation during training. The mask is constant, and thus
# we can reuse it.
attention_mask = torch.tril(
torch.ones(
(1, args.seq_length, args.seq_length),
device=torch.cuda.current_device(),
)
).view(1, 1, args.seq_length, args.seq_length)
# Convert attention mask to binary:
attention_mask = attention_mask < 0.5
if args.fp16:
attention_mask = attention_mask.half()
elif args.bf16:
attention_mask = attention_mask.bfloat16()
# Attention mask must be bool.
args.attn_mask = attention_mask.to(torch.bool)
else:
model = CodeGeeXModel(
num_tokentypes=0,
parallel_output=True,
)
if args.load_state is not None:
timers = get_timers()
print_rank_0("Loading warmstarting model states ...")
timers("load-model-states").start()
mp_rank = mpu.get_tensor_model_parallel_rank()
if os.path.isdir(args.load_state):
model_path = os.path.join(
args.load_state, "mp_rank_{:02d}_model_states.pt".format(mp_rank)
)
else:
model_path = args.load_state
print_rank_0(f"Loading model from {model_path} ...")
state_dict = torch.load(model_path, map_location="cpu")
if "module" in state_dict:
state_dict = state_dict["module"] # strip other client states
model.load_state_dict(state_dict)
timers("load-model-states").stop()
timers.log(["load-model-states"])
see_memory_usage(f"After Building Model", force=True)
return model |
Generate a batch | def get_batch(data_iterator):
"""Generate a batch"""
args = get_args()
tokenizer = get_tokenizer()
# Items and their type.
keys = ["input_ids", "attention_mask", "labels"]
datatype = torch.int64
# Broadcast data.
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
data_b = mpu.broadcast_data(keys, data, datatype)
# Unpack.
tokens_ = data_b["input_ids"].contiguous()
# attn_mask_ = data_b["attention_mask"].contiguous()
labels_ = data_b["labels"].contiguous()
tokens = tokens_[:, :-1]
labels = labels_[:, 1:]
# Get the masks and postition ids.
attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eod,
args.reset_position_ids,
args.reset_attention_mask,
args.eod_mask_loss,
)
# mask loss to avoid predicting prompt and paddings
prompt_loss_mask = labels >= 0
loss_mask = prompt_loss_mask * loss_mask
return tokens, labels, loss_mask, attention_mask, position_ids |
Modification of `get_batch` to work on `next(data_iterator)` instead of `data_iterator` | def get_batch_pipe(data):
"""Modification of `get_batch` to work on `next(data_iterator)` instead of `data_iterator`"""
args = get_args()
tokenizer = get_tokenizer()
# Items and their type.
keys = ["input_ids"]
datatype = torch.int64
# Broadcast data.
data_b = mpu.broadcast_data(keys, data, datatype)
# Unpack.
tokens_ = data_b["input_ids"].long()
labels = tokens_[:, 1:].contiguous()
tokens = tokens_[:, :-1].contiguous()
# Get the masks and postition ids.
attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eod,
args.reset_position_ids,
args.reset_attention_mask,
args.eod_mask_loss,
)
return (tokens, position_ids, attention_mask), (labels, loss_mask) |
Forward step. | def forward_step(data_iterator, model):
"""Forward step."""
args = get_args()
timers = get_timers()
# Get the batch.
timers("batch-generator").start()
tokens, labels, loss_mask, attention_mask, position_ids = get_batch(data_iterator)
timers("batch-generator").stop()
output_tensor = model(tokens, position_ids, attention_mask, labels=labels)
return output_tensor, partial(loss_func, loss_mask) |
Forward step. | def valid_forward_step(data_iterator, model):
"""Forward step."""
args = get_args()
timers = get_timers()
# Get the batch.
timers("batch-generator").start()
tokens, labels, loss_mask, attention_mask, position_ids = get_batch(data_iterator)
timers("batch-generator").stop()
output_tensor = model(tokens, position_ids, attention_mask, labels=labels)
return output_tensor, partial(valid_loss_func, loss_mask) |
Build train, valid, and test datasets. | def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0("> building train, validation, and test datasets " "for GPT ...")
if args.co_evaluation:
def dataset_partition_path_parsing(data_path):
dataset_path = {}
for index in range(len(data_path)):
dataset_path[data_path[index]] = data_path[index]
return dataset_path
assert args.valid_data_path is not None, "Valid data path must be given when --co-evaluation is turned on."
valid_data_path = dataset_partition_path_parsing(args.valid_data_path)
if args.test_data_path is not None:
test_data_path = dataset_partition_path_parsing(args.test_data_path)
else:
test_data_path = None
train_ds, _, _ = build_train_valid_test_datasets(
data_prefix=args.data_path,
data_impl=args.data_impl,
splits_string="1,0,0",
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup),
)
valid_ds = {}
for key, value in valid_data_path.items():
_, valid_ds_item, _ = build_train_valid_test_datasets(
data_prefix=[value],
data_impl=args.data_impl,
splits_string="0,1,0",
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup),
)
valid_ds[key] = valid_ds_item
if test_data_path is not None:
test_ds = {}
for key, value in test_data_path.items():
_, _, test_ds_item = build_train_valid_test_datasets(
data_prefix=[value],
data_impl=args.data_impl,
splits_string="0,0,1",
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup),
)
test_ds[key] = test_ds_item
else:
test_ds = None
elif args.valid_data_path is None:
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
data_prefix=args.data_path,
data_impl=args.data_impl,
splits_string=args.split,
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup),
)
else:
train_ds, _, test_ds = build_train_valid_test_datasets(
data_prefix=args.data_path,
data_impl=args.data_impl,
splits_string="100,0,0",
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup),
)
_, valid_ds, _ = build_train_valid_test_datasets(
data_prefix=args.valid_data_path,
data_impl=args.data_impl,
splits_string="0,100,0",
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup),
)
print_rank_0("> finished creating GPT datasets ...")
return train_ds, valid_ds, test_ds |
Build the model. | def model_provider(pre_process=True, post_process=True):
"""Build the model."""
print_rank_0("building GPT model ...")
see_memory_usage(f"Before Building Model", force=True)
args = get_args()
with deepspeed.zero.Init(
data_parallel_group=mpu.get_data_parallel_group(),
remote_device=None if args.remote_device == "none" else args.remote_device,
config_dict_or_path=args.deepspeed_config,
enabled=args.zero_stage == 3,
mpu=mpu,
):
if args.deepspeed and not args.no_pipeline_parallel:
model = CodeGeeXModelPipe(num_tokentypes=0, parallel_output=True)
# This is a hack to give us a reference to get_batch_pipe from within training.py
# We need to call model.set_batch_fn after deepspeed.initialize
model._megatron_batch_fn = get_batch_pipe
# Predompute the attention mask and store it in args. This avoids having to
# pipeline it as an activation during training. The mask is constant, and thus
# we can reuse it.
attention_mask = torch.tril(
torch.ones(
(1, args.seq_length, args.seq_length),
device=torch.cuda.current_device(),
)
).view(1, 1, args.seq_length, args.seq_length)
# Convert attention mask to binary:
attention_mask = attention_mask < 0.5
if args.fp16:
attention_mask = attention_mask.half()
elif args.bf16:
attention_mask = attention_mask.bfloat16()
# Attention mask must be bool.
args.attn_mask = attention_mask.to(torch.bool)
else:
model = CodeGeeXModel(
num_tokentypes=0,
parallel_output=True,
)
if args.load_state is not None:
timers = get_timers()
print_rank_0("Loading warmstarting model states ...")
timers("load-model-states").start()
mp_rank = mpu.get_tensor_model_parallel_rank()
if os.path.isdir(args.load_state):
model_path = os.path.join(
args.load_state, "mp_rank_{:02d}_model_states.pt".format(mp_rank)
)
else:
model_path = args.load_state
print_rank_0(f"Loading model from {model_path} ...")
state_dict = torch.load(model_path, map_location="cpu")
if "module" in state_dict:
state_dict = state_dict["module"] # strip other client states
model.load_state_dict(state_dict)
timers("load-model-states").stop()
timers.log(["load-model-states"])
see_memory_usage(f"After Building Model", force=True)
return model |
Generate a batch | def get_batch(data_iterator):
"""Generate a batch"""
args = get_args()
tokenizer = get_tokenizer()
# Items and their type.
keys = ["input_ids"]
datatype = torch.int64
# Broadcast data.
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
data_b = mpu.broadcast_data(keys, data, datatype)
# Unpack.
tokens_ = data_b["input_ids"].long()
labels = tokens_[:, 1:].contiguous()
tokens = tokens_[:, :-1].contiguous()
# Get the masks and postition ids.
attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eod,
args.reset_position_ids,
args.reset_attention_mask,
args.eod_mask_loss,
)
return tokens, labels, loss_mask, attention_mask, position_ids |
Modification of `get_batch` to work on `next(data_iterator)` instead of `data_iterator` | def get_batch_pipe(data):
"""Modification of `get_batch` to work on `next(data_iterator)` instead of `data_iterator`"""
args = get_args()
tokenizer = get_tokenizer()
# Items and their type.
keys = ["input_ids"]
datatype = torch.int64
# Broadcast data.
data_b = mpu.broadcast_data(keys, data, datatype)
# Unpack.
tokens_ = data_b["input_ids"].long()
labels = tokens_[:, 1:].contiguous()
tokens = tokens_[:, :-1].contiguous()
# Get the masks and postition ids.
attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eod,
args.reset_position_ids,
args.reset_attention_mask,
args.eod_mask_loss,
)
return (tokens, position_ids, attention_mask), (labels, loss_mask) |
Forward step. | def forward_step(data_iterator, model):
"""Forward step."""
args = get_args()
timers = get_timers()
# Get the batch.
timers("batch-generator").start()
tokens, labels, loss_mask, attention_mask, position_ids = get_batch(data_iterator)
timers("batch-generator").stop()
output_tensor = model(tokens, position_ids, attention_mask, labels=labels)
return output_tensor, partial(loss_func, loss_mask) |
Build train, valid, and test datasets. | def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0("> building train, validation, and test datasets " "for GPT ...")
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
data_prefix=args.data_path,
data_impl=args.data_impl,
splits_string=args.split,
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
seed=args.seed,
skip_warmup=(not args.mmap_warmup),
)
print_rank_0("> finished creating GPT datasets ...")
return train_ds, valid_ds, test_ds |
The main function for load model | def load_model(args_opt):
r"""
The main function for load model
"""
# Set execution mode
context.set_context(save_graphs=False,
mode=context.GRAPH_MODE,
device_target=args_opt.device_target)
context.set_context(variable_memory_max_size="30GB")
# Set parallel context
if args_opt.distribute == "true":
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
print("rank_id is {}, device_num is {}".format(rank, device_num))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL,
gradients_mean=False,
full_batch=True,
loss_repeated_mean=True,
enable_parallel_optimizer=False,
pipeline_stages=args_opt.stage_num)
set_algo_parameters(elementwise_op_strategy_follow=True)
_set_multi_subgraphs()
else:
rank = 0
device_num = 1
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
strategy_ckpt_load_file=args_opt.strategy_load_ckpt_path)
context.set_context(
save_graphs=False,
save_graphs_path="/cache/graphs_of_device_id_" + str(rank),
)
use_past = (args_opt.use_past == "true")
print('local_rank:{}, start to run...'.format(rank), flush=True)
if args_opt.export:
use_past = True
# Set model property
print("===args_opt: ", args_opt, flush=True)
print("===device_num is: ", device_num, flush=True)
args_opt.op_level_model_parallel_num = 1
model_parallel_num = args_opt.op_level_model_parallel_num
data_parallel_num = int(device_num / model_parallel_num)
print("===data_parallel_num is: ", data_parallel_num, flush=True)
parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num,
model_parallel=model_parallel_num,
pipeline_stage=args_opt.stage_num,
micro_batch_num=args_opt.micro_size,
optimizer_shard=False,
vocab_emb_dp=bool(args_opt.word_emb_dp),
recompute=True)
per_batch_size = args_opt.per_batch_size
batch_size = per_batch_size * data_parallel_num
# Now only support single batch_size for predict
if args_opt.run_type == "predict":
batch_size = 1
config = PanguAlphaConfig(
batch_size=batch_size,
seq_length=args_opt.seq_length,
vocab_size=args_opt.vocab_size,
hidden_size=args_opt.embedding_size,
num_layers=args_opt.num_layers,
num_heads=args_opt.num_heads,
post_layernorm_residual=False,
dropout_rate=0.0,
ffn_hidden_size=args_opt.embedding_size * 4,
use_past=use_past,
eod_token=args_opt.eod_id,
eod_reset=False,
parallel_config=parallel_config,
load_ckpt_path=args_opt.load_ckpt_path,
param_init_type=mstype.float32
if args_opt.param_init_type == 'fp32'
else mstype.float16,
)
print("===config is: ", config, flush=True)
print("=====args_opt is: ", args_opt, flush=True)
ckpt_name = args_opt.load_ckpt_name
# Define network
pangu_alpha = PanguAlphaModel(config)
eval_net = EvalNet(pangu_alpha, pad_token=50256)
eval_net.set_train(False)
model_predict = Model(eval_net)
# Compile network and obtain tensor layout for loading ckpt
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0]), mstype.int32)
if args_opt.distribute == "false":
predict_layout = None
elif config.use_past:
batch_valid_length = Tensor(np.array([0]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
print("Input shape:", inputs_np.shape, flush=True)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
print("is_first_iteration=True", flush=True)
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index, init_true, batch_valid_length)
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
print("is_first_iteration=False", flush=True)
init_false = Tensor([False], mstype.bool_)
_ = model_predict.infer_predict_layout(inputs_np_1, current_index, init_false, batch_valid_length)
else:
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index)
print("======start load_distributed checkpoint", flush=True)
time.sleep(rank * 0.5)
if not os.path.exists(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}")):
os.mkdir(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}"))
ckpt_name = f"code-13B0-50.ckpt" # TODO: set to current ckpt name
if not mox.file.exists(os.path.join(args_opt.load_ckpt_path, "rank_0", ckpt_name)):
print(f"Checkpoint from rank {rank} doesn't exist!")
mox.file.copy(os.path.join(args_opt.load_ckpt_path, "rank_0", ckpt_name),
os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
param_dict = load_checkpoint(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
# TODO: add them back if not for the 1st run!
if param_dict.get("epoch_num") and param_dict.get("step_num"):
args_opt.has_trained_epoches = int(param_dict["epoch_num"].data.asnumpy())
args_opt.has_trained_steps = int(param_dict["step_num"].data.asnumpy())
if not os.path.exists(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1/rank_{rank}'):
os.mkdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1/rank_{rank}')
while True:
num = len(os.listdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1'))
if num == device_num:
break
if rank % 8 == 0:
print("Loaded ckpt in step 1: ", num)
time.sleep(1)
net_not_load = load_param_into_net(pangu_alpha, param_dict)
print("====== load_distributed checkpoint done, net_not_load: ", net_not_load, flush=True)
if not os.path.exists("/home/work/sfs/cache/ckpts_npy/"):
os.mkdir("/home/work/sfs/cache/ckpts_npy/")
for k, weight in pangu_alpha.parameters_dict().items():
print(k)
np.save(os.path.join("/home/work/sfs/cache/ckpts_npy/", f"{k}.npy"), weight.asnumpy())
rank_obs_save_path = "./" # TODO: set to current obs path for saving
if not mox.file.exists(rank_obs_save_path):
mox.file.make_dirs(rank_obs_save_path)
mox.file.copy_parallel("/home/work/sfs/cache/ckpts_npy/", rank_obs_save_path)
print("======= npy saved")
return model_predict, config, rank |
Export mindir model | def export_mindir(model_predict, config):
"""Export mindir model"""
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0]), mstype.int32)
batch_valid_length = Tensor(np.array([0]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
export(model_predict.predict_network, inputs_np, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1024', file_format='MINDIR')
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
export(model_predict.predict_network, inputs_np_1, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1', file_format='MINDIR')
print("Export finished and now exit.") |
run predict | def run_predict(model_predict, config, args_opt, rank):
"""run predict"""
from src.generate import generate, generate_increment
# Define tokenizer
tokenizer = CodeTokenizer(mode='6b')
# Tokenize input sentence to ids
samples = [
"# language: Python\ndef add(a, b):\n '''\n Find the sum of a and b.\n '''\n",
"def add(a, b):\n '''\n Find the sum of a and b.\n '''\n",
"# language: Python\ndef optimization():\n '''\n Find the maximum of P=E**2*R/(R + r)**2 if E and r are fixed but R varies. Import sympy. Use sympy. Find where the derivative is equal to zero. Substitute the value of R into P.\n '''\n",
"from typing import List\n\n\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\n given threshold.\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\n False\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\n True\n \"\"\"\n",
"// language: C++\nint add(int a, int b) {\n /* Find the sum of a and b. */\n",
"int add(int a, int b) {\n /* Find the sum of a and b. */\n",
"bool prime(int n) {\n // Find whether n is a prime number\n",
"// language: JavaScript\nfunction add(a, b) {\n // Find the sum of a and b.\n",
"# language: R\nadd<-function(a, b) {\n # Find the sum of a and b.\n",
]
verbose = False
for i, sample in enumerate(samples):
for _ in range(1):
tokenized_token = tokenizer.encode_code(sample)
input_ids = np.array(tokenized_token).reshape(1, -1)
# Call inference
generate_func = generate_increment if config.use_past else generate
t0 = time.perf_counter()
output_ids = generate_func(model_predict, input_ids, args_opt, verbose)
# Decode output ids to sentence
t1 = time.perf_counter()
output_samples = tokenizer.decode_code(output_ids.tolist())
output_samples_str = "".join(output_samples)
if rank % 8 == 0:
print(f"=================== prompt {i} ====================")
print(sample, flush=True)
print(f"=================== generation {i} ====================")
print(output_samples_str, flush=True)
print(
f"=== Total time (s): {t1 - t0}, {output_ids.shape[-1] - input_ids.shape[-1]} tokens, {(output_ids.shape[-1] - input_ids.shape[-1]) / (t1 - t0)} token/s") |
Main process for predict or export model | def main():
"""Main process for predict or export model"""
opt = get_args(True)
set_parse(opt)
model_predict, config, rank = load_model(opt)
if opt.export:
export_mindir(model_predict, config)
else:
run_predict(model_predict, config, opt, rank) |
Set weight decay coefficient, zero for bias and layernorm, 1e-1 for rest | def set_weight_decay(params):
"""
Set weight decay coefficient, zero for bias and layernorm, 1e-1 for rest
"""
decay_filter = lambda x: 'layernorm' not in x.name.lower() and "bias" not in x.name.lower()
decay_params = list(filter(decay_filter, params))
other_params = list(filter(lambda x: not decay_filter(x), params))
group_params = [
{"params": decay_params, "weight_decay": 1e-1},
{"params": other_params, "weight_decay": 0.0},
{"order_params": params},
]
return group_params |
Add checkpoint policy to callback. | def add_checkpoint_callback_policy(args_param, callback, rank_id):
r"""
Add checkpoint policy to callback.
"""
if args_param.save_checkpoint:
# checkpoint store epoch_num and step_num info
ckpt_append_info = [{"epoch_num": args_param.has_trained_epoches, "step_num": args_param.has_trained_steps}]
ckpt_config = CheckpointConfig(
save_checkpoint_steps=args_param.save_checkpoint_steps,
keep_checkpoint_max=args_param.keep_checkpoint_max,
integrated_save=False,
append_info=ckpt_append_info,
)
# save checkpoint into rank directory
ckpoint_cb = ModelCheckpoint(prefix=args_param.ckpt_name_prefix + str(rank_id),
directory=os.path.join(args_param.save_checkpoint_path, f"rank_{rank_id}"),
config=ckpt_config)
callback.append(ckpoint_cb)
saveckpt_cb = SaveCheckpointCallback(cache_dir=args_param.save_checkpoint_path,
bucket=args_param.save_checkpoint_obs_path,
local_rank=rank_id,
has_trained_epoch=args_param.has_trained_epoches,
has_trained_step=args_param.has_trained_steps,
syn_times=args_param.save_checkpoint_steps)
callback.append(saveckpt_cb) |
Set parallel context | def set_parallel_context(args_opt):
r"""Set parallel context"""
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
print("rank_id is {}, device_num is {}".format(rank, device_num))
if device_num < 128:
args_opt.optimizer_shard = 0
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, gradients_mean=False,
full_batch=bool(args_opt.full_batch), strategy_ckpt_load_file=args_opt.strategy_load_ckpt_path,
enable_parallel_optimizer=bool(args_opt.optimizer_shard), strategy_ckpt_save_file='strategy.ckpt',
)
set_algo_parameters(elementwise_op_strategy_follow=True)
_set_multi_subgraphs()
return rank, device_num |
The main training process. | def run_train(args_opt):
r"""The main training process."""
os.environ["HCCL_CONNECT_TIMEOUT"] = "2000"
# Set execution mode
context.set_context(
mode=context.GRAPH_MODE, device_target=args_opt.device_target
)
if args_opt.profiling:
profiler = Profiler(output_path="/cache/profiler_data")
context.set_context(variable_memory_max_size="30GB")
# Set parallel context
rank = 0
device_num = 1
if args_opt.distribute == "true":
rank, device_num = set_parallel_context(args_opt)
context.set_context(
save_graphs=False,
save_graphs_path="/cache/graphs_of_device_id_" + str(rank),
)
# copy data from the cloud to the /cache/Data
cache_url = '/cache/Data/'
eval_cache_url = '/cache/EvalData/'
if not args_opt.offline:
download_data(src_data_url=args_opt.data_url, tgt_data_path=cache_url, rank=rank)
download_data(src_data_url=args_opt.eval_data_url, tgt_data_path=eval_cache_url, rank=rank)
# Set model property
model_parallel_num = args_opt.op_level_model_parallel_num
data_parallel_num = int(device_num / model_parallel_num)
batch_size = args_opt.per_batch_size * data_parallel_num
parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num, model_parallel=model_parallel_num,
pipeline_stage=args_opt.stage_num,
micro_batch_num=args_opt.micro_size,
optimizer_shard=bool(args_opt.optimizer_shard),
vocab_emb_dp=bool(args_opt.word_emb_dp), recompute=True,
gradient_aggregation_group=args_opt.gradient_aggregation_group)
micro_interleaved_size = args_opt.micro_interleaved_size
config = PanguAlphaConfig(
batch_size=batch_size // micro_interleaved_size,
num_heads=args_opt.num_heads,
hidden_size=args_opt.embedding_size,
seq_length=args_opt.seq_length,
vocab_size=args_opt.vocab_size,
num_layers=args_opt.num_layers,
ffn_hidden_size=args_opt.embedding_size * 4,
eod_token=args_opt.eod_id,
load_ckpt_path=args_opt.load_ckpt_path,
param_init_type=mstype.float32
if args_opt.param_init_type == "fp32"
else mstype.float16,
dropout_rate=args_opt.dropout_rate,
enable_offload=bool(args_opt.opt_offload),
use_moe=bool(args_opt.use_moe),
per_dp_dim_expert_num=args_opt.per_dp_dim_expert_num,
hidden_act="fast_gelu" if args_opt.device_target != "GPU" else "gelu",
parallel_config=parallel_config,
)
print("===config is: ", config, flush=True)
# Define network
pangu_alpha = PanguAlphaModel(config=config)
loss = CrossEntropyLoss(config.parallel_config.dp_mp_config)
if micro_interleaved_size > 1:
print("===using MicroBatchInterleaved", flush=True)
pangu_alpha_with_loss_net = MicroBatchInterleaved(PanGUAlphaWithFinetuneLoss(config, pangu_alpha, loss),
micro_interleaved_size)
else:
pangu_alpha_with_loss_net = PanGUAlphaWithFinetuneLoss(config, pangu_alpha, loss)
pangu_alpha_with_loss = _VirtualDatasetCell(pangu_alpha_with_loss_net)
print("=====args_opt is: ", args_opt, flush=True)
# Warm-up and cosine decay learning rate
lr = LearningRate(learning_rate=args_opt.start_lr, end_learning_rate=args_opt.end_lr,
warmup_steps=args_opt.warmup_step, decay_steps=args_opt.decay_steps)
params = pangu_alpha_with_loss.trainable_params()
group_params = set_weight_decay(params)
if args_opt.optimizer == "lamb":
optimizer = nn.Lamb(group_params, learning_rate=lr)
elif args_opt.opt_offload:
optimizer = AdamWeightDecayOp(group_params, learning_rate=lr, eps=1e-8, beta1=0.9, beta2=0.95,
param_init_type=config.param_init_type)
else:
optimizer = FP32StateAdamWeightDecay(group_params, learning_rate=lr, eps=1e-8, beta1=0.9, beta2=0.95)
# Initial scaling sens
loss_scale_value = math.pow(2, 32)
epoch_num = args_opt.epoch_size
if args_opt.load_ckpt_epoch > 0:
time.sleep(rank * 0.05)
os.mkdir(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}"))
ckpt_name = f"code-13B{rank}_21-{args_opt.load_ckpt_epoch}_2.ckpt"
if not mox.file.exists(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name)):
print(f"Checkpoint from rank {rank} doesn't exist!")
mox.file.copy(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name),
os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
param_dict = load_checkpoint(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
# TODO: remove after warming-up!
param_dict.pop('global_step')
# TODO: add them back if not for the 1st run!
# if param_dict.get("epoch_num") and param_dict.get("step_num"):
# args_opt.has_trained_epoches = int(param_dict["epoch_num"].data.asnumpy())
# args_opt.has_trained_steps = int(param_dict["step_num"].data.asnumpy())
# args_opt.has_trained_steps = 9000
os.mkdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1/rank_{rank}')
while True:
num = len(os.listdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1'))
if num == device_num:
break
if rank % 64 == 0:
print("Loaded ckpt in step 1: ", num)
time.sleep(1)
# if args_opt.tb_dir is not None and rank == device_num - 1:
if args_opt.tb_dir is not None and rank == 0:
os.makedirs(args_opt.tb_dir, exist_ok=True)
summary_writer = SummaryWriter(args_opt.tb_dir)
os.system(f'chmod 777 -R {args_opt.tb_dir}')
else:
summary_writer = None
# Dataset loading mindrecord files
ds, ds_eval = create_dataset(config.batch_size * micro_interleaved_size, data_path=args_opt.code_data,
args_opt=args_opt, data_start_index=0,
eod_reset=config.eod_reset, full_batch=bool(args_opt.full_batch),
eod_id=args_opt.eod_id,
device_num=device_num, rank=rank, epoch=epoch_num,
train_and_eval=bool(args_opt.train_and_eval_mode), val_ratio=0.001)
actual_epoch_num = int(ds.get_dataset_size() / args_opt.sink_size)
callback = [
TimeMonitor(args_opt.sink_size),
]
update_cell = DynamicLossScaleUpdateCell(loss_scale_value=loss_scale_value, scale_factor=2, scale_window=1000)
pangu_alpha_with_grads = PanguAlphaTrainOneStepWithLossScaleCell(
pangu_alpha_with_loss, optimizer=optimizer, scale_update_cell=update_cell, enable_global_norm=True,
config=config)
if ds_eval:
ppl_metric = PPLMetric(config.seq_length)
validation_loss = ValidationLoss(config.seq_length)
model = Model(pangu_alpha_with_grads, eval_network=pangu_alpha_with_loss,
metrics={"ppl": ppl_metric, "validation_loss": validation_loss})
callback.append(
EvalCallBack(
model=model,
eval_dataset=ds_eval,
ppl_metric=ppl_metric,
validation_loss=validation_loss,
print_per_step=20,
has_trained_step=args_opt.has_trained_steps,
local_rank=rank,
rank_size=device_num,
tb_writer=summary_writer
)
)
else:
model = Model(pangu_alpha_with_grads)
if args_opt.load_ckpt_epoch > 0:
print("===build model and load ckpt")
time_stamp = datetime.datetime.now()
print(f"time stamp {time_stamp.strftime('%Y.%m.%d-%H:%M:%S')} before building", flush=True)
model.build(train_dataset=ds, sink_size=args_opt.sink_size, epoch=actual_epoch_num)
time_stamp = datetime.datetime.now()
print(f"time stamp {time_stamp.strftime('%Y.%m.%d-%H:%M:%S')} before loading ckpt", flush=True)
load_param_into_net(pangu_alpha_with_loss, param_dict)
os.mkdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/2/rank_{rank}')
while True:
num = len(os.listdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/2'))
if num == device_num:
break
if rank % 64 == 0:
print("Loaded ckpt in step 2: ", num)
time.sleep(1)
callback.append(
LossCallBack(
name=args_opt.ckpt_name_prefix,
dataset_size=args_opt.sink_size,
local_rank=rank,
rank_size=device_num,
has_trained_epoch=args_opt.has_trained_epoches,
has_trained_step=args_opt.has_trained_steps,
micro_size=args_opt.micro_size * micro_interleaved_size,
tb_writer=summary_writer,
)
)
if not args_opt.profiling:
add_checkpoint_callback_policy(args_opt, callback, rank)
if args_opt.incremental_training:
strategy = model.infer_train_layout(train_dataset=ds, sink_size=args_opt.sink_size)
print("======start load_distributed checkpoint", flush=True)
# For 2.6B and 13B models, the number of ckpt files is 512.
ckpt_file_list = [os.path.join(args_opt.load_ckpt_path, f"filerted_{ckpt_rank}.ckpt") for ckpt_rank in
range(0, 512)]
print(f"Loading from path {ckpt_file_list[0]}", flush=True)
load_distributed_checkpoint(model.train_network, ckpt_file_list, strategy)
print("Dataset size: {}, actual_epoch_num: {}".format(ds.get_dataset_size(), actual_epoch_num), flush=True)
try:
model.train(10 if args_opt.profiling else actual_epoch_num, ds, callbacks=callback,
sink_size=args_opt.sink_size, dataset_sink_mode=True)
finally:
if args_opt.profiling:
jobid = os.environ["BATCH_JOB_ID"]
profiler.analyse()
rank_id = rank
if context.get_context("save_graphs"):
mox.file.make_dirs("s3://wudao-1/yyf/graphs_" + jobid)
mox.file.copy_parallel(src_url="/cache/graphs_of_device_id_" + str(rank_id),
dst_url="s3://wudao-1/yyf/graphs_" + jobid + "/" + str(rank_id))
if rank_id % 8 == 0:
mox.file.make_dirs("s3://wudao-1/yyf/profiler_" + jobid)
mox.file.copy_parallel(src_url="/cache/profiler_data",
dst_url="s3://wudao-1/yyf/profiler_" + jobid + "/" + str(rank_id)) |
The main function for load model | def load_model(args_opt):
r"""
The main function for load model
"""
# Set execution mode
context.set_context(save_graphs=False,
mode=context.GRAPH_MODE,
device_target=args_opt.device_target)
context.set_context(variable_memory_max_size="30GB")
# Set parallel context
if args_opt.distribute == "true":
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
print("rank_id is {}, device_num is {}".format(rank, device_num))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL,
gradients_mean=False,
full_batch=True,
loss_repeated_mean=True,
enable_parallel_optimizer=False,
pipeline_stages=args_opt.stage_num)
set_algo_parameters(elementwise_op_strategy_follow=True)
_set_multi_subgraphs()
else:
rank = 0
device_num = 1
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
strategy_ckpt_load_file=args_opt.strategy_load_ckpt_path)
context.set_context(
save_graphs=False,
save_graphs_path="/cache/graphs_of_device_id_" + str(rank),
)
use_past = (args_opt.use_past == "true")
print('local_rank:{}, start to run...'.format(rank), flush=True)
if args_opt.export:
use_past = True
# Set model property
model_parallel_num = args_opt.op_level_model_parallel_num
data_parallel_num = int(device_num / model_parallel_num)
parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num,
model_parallel=model_parallel_num,
pipeline_stage=args_opt.stage_num,
micro_batch_num=args_opt.micro_size,
optimizer_shard=False,
vocab_emb_dp=bool(args_opt.word_emb_dp),
recompute=True)
per_batch_size = args_opt.per_batch_size
batch_size = per_batch_size * data_parallel_num
if args_opt.run_type == "predict":
batch_size = 1
config = PanguAlphaConfig(
batch_size=batch_size,
seq_length=args_opt.seq_length,
vocab_size=args_opt.vocab_size,
hidden_size=args_opt.embedding_size,
num_layers=args_opt.num_layers,
num_heads=args_opt.num_heads,
post_layernorm_residual=False,
dropout_rate=0.0,
ffn_hidden_size=args_opt.embedding_size * 4,
use_past=use_past,
eod_token=args_opt.eod_id,
eod_reset=False,
parallel_config=parallel_config,
load_ckpt_path=args_opt.load_ckpt_path,
param_init_type=mstype.float32
if args_opt.param_init_type == 'fp32'
else mstype.float16,
)
print("===config is: ", config, flush=True)
print("=====args_opt is: ", args_opt, flush=True)
ckpt_name = args_opt.load_ckpt_name
# Define network
pangu_alpha = PanguAlphaModel(config)
eval_net = EvalNet(pangu_alpha, pad_token=50256)
eval_net.set_train(False)
model_predict = Model(eval_net)
# Compile network and obtain tensor layout for loading ckpt
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0 for _ in range(batch_size)]), mstype.int32)
if args_opt.distribute == "false":
predict_layout = None
elif config.use_past:
batch_valid_length = Tensor(np.array([0 for _ in range(batch_size)]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
print("Input shape:", inputs_np.shape, flush=True)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
print("is_first_iteration=True", flush=True)
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index, init_true, batch_valid_length)
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
print("is_first_iteration=False", flush=True)
init_false = Tensor([False], mstype.bool_)
_ = model_predict.infer_predict_layout(inputs_np_1, current_index, init_false, batch_valid_length)
else:
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index)
if context.get_context("save_graphs"):
print("==============save_graph", flush=True)
jobid = os.environ["BATCH_JOB_ID"]
rank_id = rank
mox.file.make_dirs("s3://wudao-1/yyf/graphs_" + jobid)
mox.file.copy_parallel(src_url="/cache/graphs_of_device_id_" + str(rank_id),
dst_url="s3://wudao-1/yyf/graphs_" + jobid + "/" + str(rank_id))
print("======start load_distributed checkpoint", flush=True)
if args_opt.load_ckpt_epoch > 0:
time.sleep(rank * 0.1)
os.mkdir(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}"))
ckpt_name = f"code-13B{rank}-{args_opt.load_ckpt_epoch}.ckpt"
if not mox.file.exists(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name)):
print(f"Checkpoint from rank {rank} doesn't exist!")
mox.file.copy(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name),
os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
param_dict = load_checkpoint(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
if param_dict.get("epoch_num") and param_dict.get("step_num"):
args_opt.has_trained_epoches = int(param_dict["epoch_num"].data.asnumpy())
args_opt.has_trained_steps = int(param_dict["step_num"].data.asnumpy())
os.mkdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1/rank_{rank}')
while True:
num = len(os.listdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1'))
if num == device_num:
break
if rank % 8 == 0:
print("Loaded ckpt in step 1: ", num)
time.sleep(1)
net_not_load = load_param_into_net(pangu_alpha, param_dict)
print("====== load_distributed checkpoint done, net_not_load: ", net_not_load, flush=True)
return model_predict, config, rank |
Export mindir model | def export_mindir(model_predict, config):
"""Export mindir model"""
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0]), mstype.int32)
batch_valid_length = Tensor(np.array([0]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
export(model_predict.predict_network, inputs_np, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1024', file_format='MINDIR')
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
export(model_predict.predict_network, inputs_np_1, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1', file_format='MINDIR')
print("Export finished and now exit.") |
run predict | def run_predict(model_predict, config, args_opt, rank):
"""run predict"""
from src.generate import generate, generate_increment
# Define tokenizer
tokenizer = CodeTokenizer(mode='6b')
# Tokenize input sentence to ids
samples = [
"Hello there!",
"# language: Python\ndef add(a, b):\n '''\n Find the sum of a and b.\n '''\n",
"def add(a, b):\n '''\n Find the sum of a and b.\n '''\n",
"# language: Python\ndef optimization():\n '''\n Find the maximum of P=E**2*R/(R + r)**2 if E and r are fixed but R varies. Import sympy. Use sympy. Find where the derivative is equal to zero. Substitute the value of R into P.\n '''\n",
"from typing import List\n\n\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\n given threshold.\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\n False\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\n True\n \"\"\"\n",
"// language: JavaScript\nfunction prime(n) {\n // Find whether n is a prime number.\n",
"string morse_encoder(string text) {\n // Translate text into Morse code\n",
"def morse_encoder(text):\n # Translate text into Morse code separated by spaces\n",
f"% language: MATLAB\nfunction x = solve(A, b)\n % Solve Ax = b\n",
f"% language: MATLAB\nfunction [L, U] = lu(A)\n % Return LU factorization of A\n",
"def TCPState(state):\n # given a state in TCP protocol, return a list of next possible states\n",
"def coordinates(p1, p2, precision=0)\n # p1 is (x1, y1), p2 is (x2, y2), return the distance between p1 and p2 on a cartesian plane, rounded to precision\n",
"double travel(double total_time, double run_time, double rest_time, double speed) {\n // the horse runs for run_time with speed speed and rests for rest_time, return the distance it travels after total_time\n",
"def travel(total_time, run_time, rest_time, speed):\n # the horse runs for run_time with speed speed and rests for rest_time, return the distance it travels after total_time\n",
"// language: C++\nint add(int a, int b) {\n /* Find the sum of a and b. */\n",
"int add(int a, int b) {\n /* Find the sum of a and b. */\n",
"// language: C++\nvoid sort(int *array, int len) {\n // Sort the array with length len\n",
"bool prime(int n) {\n // Find whether n is a prime number\n",
"def prime(n):\n # Find whether n is a prime number\n",
f"% language: MATLAB\nfunction H = hilbert(n)\n % Return Hilbert matrix of size n * n\n",
f"% language: MATLAB\nfunction L = cholesky(A)\n % Return Cholesky factorization of symmetric positive definete matrix A\n",
"// language: JavaScript\nfunction add(a, b) {\n // Find the sum of a and b.\n",
"# language: R\nadd<-function(a, b) {\n # Find the sum of a and b.\n",
]
verbose = False
for i, sample in enumerate(samples):
for _ in range(1):
tokenized_token = tokenizer.encode_code(sample)
input_ids = np.array(tokenized_token).reshape(1, -1)
# Call inference
generate_func = generate_increment if config.use_past else generate
t0 = time.perf_counter()
output_ids = generate_func(model_predict, input_ids, args_opt, verbose)
# Decode output ids to sentence
t1 = time.perf_counter()
output_samples = tokenizer.decode_code(output_ids.tolist())
output_samples_str = "".join(output_samples)
if rank % 8 == 0:
print(f"=================== prompt {i} ====================")
print(sample, flush=True)
print(f"=================== generation {i} ====================")
print(output_samples_str, flush=True)
print(
f"=== Total time (s): {t1 - t0}, {output_ids.shape[-1] - input_ids.shape[-1]} tokens, {(output_ids.shape[-1] - input_ids.shape[-1]) / (t1 - t0)} token/s") |
Main process for predict or export model | def main():
"""Main process for predict or export model"""
opt = get_args(True)
set_parse(opt)
model_predict, config, rank = load_model(opt)
if opt.export:
export_mindir(model_predict, config)
else:
run_predict(model_predict, config, opt, rank) |
The main function for load model | def load_model(args_opt):
r"""
The main function for load model
"""
# Set execution mode
context.set_context(save_graphs=False,
mode=context.GRAPH_MODE,
device_target=args_opt.device_target)
context.set_context(variable_memory_max_size="30GB")
# Set parallel context
if args_opt.distribute == "true":
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
print("rank_id is {}, device_num is {}".format(rank, device_num))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL,
gradients_mean=False,
full_batch=True,
loss_repeated_mean=True,
enable_parallel_optimizer=False,
pipeline_stages=args_opt.stage_num)
set_algo_parameters(elementwise_op_strategy_follow=True)
_set_multi_subgraphs()
else:
rank = 0
device_num = 1
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
strategy_ckpt_load_file=args_opt.strategy_load_ckpt_path)
context.set_context(
save_graphs=False,
save_graphs_path="/cache/graphs_of_device_id_" + str(rank),
)
use_past = (args_opt.use_past == "true")
print('local_rank:{}, start to run...'.format(rank), flush=True)
if args_opt.export:
use_past = True
# Set model property
print("===args_opt: ", args_opt, flush=True)
print("===device_num is: ", device_num, flush=True)
args_opt.op_level_model_parallel_num = 1
model_parallel_num = args_opt.op_level_model_parallel_num
data_parallel_num = int(device_num / model_parallel_num)
print("===data_parallel_num is: ", data_parallel_num, flush=True)
parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num,
model_parallel=model_parallel_num,
pipeline_stage=args_opt.stage_num,
micro_batch_num=args_opt.micro_size,
optimizer_shard=False,
vocab_emb_dp=bool(args_opt.word_emb_dp),
recompute=True)
per_batch_size = args_opt.per_batch_size
batch_size = per_batch_size * data_parallel_num
# Now only support single batch_size for predict
if args_opt.run_type == "predict":
batch_size = 1
config = PanguAlphaConfig(
batch_size=batch_size,
seq_length=args_opt.seq_length,
vocab_size=args_opt.vocab_size,
hidden_size=args_opt.embedding_size,
num_layers=args_opt.num_layers,
num_heads=args_opt.num_heads,
post_layernorm_residual=False,
dropout_rate=0.0,
ffn_hidden_size=args_opt.embedding_size * 4,
use_past=use_past,
eod_token=args_opt.eod_id,
eod_reset=False,
parallel_config=parallel_config,
load_ckpt_path=args_opt.load_ckpt_path,
param_init_type=mstype.float32
if args_opt.param_init_type == 'fp32'
else mstype.float16,
)
print("===config is: ", config, flush=True)
print("=====args_opt is: ", args_opt, flush=True)
ckpt_name = args_opt.load_ckpt_name
# Define network
pangu_alpha = PanguAlphaModel(config)
eval_net = EvalNet(pangu_alpha, pad_token=50256)
eval_net.set_train(False)
model_predict = Model(eval_net)
# Compile network and obtain tensor layout for loading ckpt
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0]), mstype.int32)
if args_opt.distribute == "false":
predict_layout = None
elif config.use_past:
batch_valid_length = Tensor(np.array([0]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
print("Input shape:", inputs_np.shape, flush=True)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
print("is_first_iteration=True", flush=True)
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index, init_true, batch_valid_length)
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
print("is_first_iteration=False", flush=True)
init_false = Tensor([False], mstype.bool_)
_ = model_predict.infer_predict_layout(inputs_np_1, current_index, init_false, batch_valid_length)
else:
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index)
if context.get_context("save_graphs"):
print("==============save_graph", flush=True)
jobid = os.environ["BATCH_JOB_ID"]
rank_id = rank
mox.file.make_dirs("s3://wudao-1/yyf/graphs_" + jobid)
mox.file.copy_parallel(src_url="/cache/graphs_of_device_id_" + str(rank_id),
dst_url="s3://wudao-1/yyf/graphs_" + jobid + "/" + str(rank_id))
print("======start load_distributed checkpoint", flush=True)
if args_opt.load_ckpt_epoch > 0:
time.sleep(rank * 0.5)
os.mkdir(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}"))
ckpt_name = f"code-13B0-{args_opt.load_ckpt_epoch}.ckpt"
if not mox.file.exists(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name)):
print(f"Checkpoint from rank {rank} doesn't exist!")
mox.file.copy(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name),
os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
param_dict = load_checkpoint(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
# TODO: add them back if not for the 1st run!
if param_dict.get("epoch_num") and param_dict.get("step_num"):
args_opt.has_trained_epoches = int(param_dict["epoch_num"].data.asnumpy())
args_opt.has_trained_steps = int(param_dict["step_num"].data.asnumpy())
os.mkdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1/rank_{rank}')
while True:
num = len(os.listdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1'))
if num == device_num:
break
if rank % 8 == 0:
print("Loaded ckpt in step 1: ", num)
time.sleep(1)
net_not_load = load_param_into_net(pangu_alpha, param_dict)
print("====== load_distributed checkpoint done, net_not_load: ", net_not_load, flush=True)
return model_predict, config, rank |
Export mindir model | def export_mindir(model_predict, config):
"""Export mindir model"""
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0]), mstype.int32)
batch_valid_length = Tensor(np.array([0]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
export(model_predict.predict_network, inputs_np, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1024', file_format='MINDIR')
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
export(model_predict.predict_network, inputs_np_1, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1', file_format='MINDIR')
print("Export finished and now exit.") |
run predict | def run_predict(model_predict, config, args_opt, rank):
"""run predict"""
from src.generate import generate, generate_increment
# Define tokenizer
tokenizer = CodeTokenizer(mode='6b')
# Tokenize input sentence to ids
samples = [
"# language: Python\ndef add(a, b):\n '''\n Find the sum of a and b.\n '''\n",
"def add(a, b):\n '''\n Find the sum of a and b.\n '''\n",
"# language: Python\ndef optimization():\n '''\n Find the maximum of P=E**2*R/(R + r)**2 if E and r are fixed but R varies. Import sympy. Use sympy. Find where the derivative is equal to zero. Substitute the value of R into P.\n '''\n",
"from typing import List\n\n\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\n given threshold.\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\n False\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\n True\n \"\"\"\n",
"// language: C++\nint add(int a, int b) {\n /* Find the sum of a and b. */\n",
"int add(int a, int b) {\n /* Find the sum of a and b. */\n",
"bool prime(int n) {\n // Find whether n is a prime number\n",
"// language: JavaScript\nfunction add(a, b) {\n // Find the sum of a and b.\n",
"# language: R\nadd<-function(a, b) {\n # Find the sum of a and b.\n",
]
verbose = False
for i, sample in enumerate(samples):
for _ in range(1):
tokenized_token = tokenizer.encode_code(sample)
input_ids = np.array(tokenized_token).reshape(1, -1)
# Call inference
generate_func = generate_increment if config.use_past else generate
t0 = time.perf_counter()
output_ids = generate_func(model_predict, input_ids, args_opt, verbose)
# Decode output ids to sentence
t1 = time.perf_counter()
output_samples = tokenizer.decode_code(output_ids.tolist())
output_samples_str = "".join(output_samples)
if rank % 8 == 0:
print(f"=================== prompt {i} ====================")
print(sample, flush=True)
print(f"=================== generation {i} ====================")
print(output_samples_str, flush=True)
print(
f"=== Total time (s): {t1 - t0}, {output_ids.shape[-1] - input_ids.shape[-1]} tokens, {(output_ids.shape[-1] - input_ids.shape[-1]) / (t1 - t0)} token/s")
break |
Main process for predict or export model | def main():
"""Main process for predict or export model"""
opt = get_args(True)
set_parse(opt)
model_predict, config, rank = load_model(opt)
if opt.export:
export_mindir(model_predict, config)
else:
run_predict(model_predict, config, opt, rank) |
The main function for load model | def load_model(args_opt):
r"""
The main function for load model
"""
# Set execution mode
context.set_context(save_graphs=False,
mode=context.GRAPH_MODE,
device_target=args_opt.device_target)
context.set_context(variable_memory_max_size="30GB")
# Set parallel context
if args_opt.distribute == "true":
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
print("rank_id is {}, device_num is {}".format(rank, device_num))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL,
gradients_mean=False,
full_batch=True,
loss_repeated_mean=True,
enable_parallel_optimizer=False,
pipeline_stages=args_opt.stage_num)
set_algo_parameters(elementwise_op_strategy_follow=True)
_set_multi_subgraphs()
else:
rank = 0
device_num = 1
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
strategy_ckpt_load_file=args_opt.strategy_load_ckpt_path)
context.set_context(
save_graphs=False,
save_graphs_path="/cache/graphs_of_device_id_" + str(rank),
)
use_past = (args_opt.use_past == "true")
print('local_rank:{}, start to run...'.format(rank), flush=True)
if args_opt.export:
use_past = True
# Set model property
model_parallel_num = args_opt.op_level_model_parallel_num
data_parallel_num = int(device_num / model_parallel_num)
parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num,
model_parallel=model_parallel_num,
pipeline_stage=args_opt.stage_num,
micro_batch_num=args_opt.micro_size,
optimizer_shard=False,
vocab_emb_dp=bool(args_opt.word_emb_dp),
recompute=True)
per_batch_size = args_opt.per_batch_size
batch_size = per_batch_size * data_parallel_num
config = PanguAlphaConfig(
batch_size=batch_size,
seq_length=args_opt.seq_length,
vocab_size=args_opt.vocab_size,
hidden_size=args_opt.embedding_size,
num_layers=args_opt.num_layers,
num_heads=args_opt.num_heads,
post_layernorm_residual=False,
dropout_rate=0.0,
ffn_hidden_size=args_opt.embedding_size * 4,
use_past=use_past,
eod_token=args_opt.eod_id,
eod_reset=False,
parallel_config=parallel_config,
load_ckpt_path=args_opt.load_ckpt_path,
param_init_type=mstype.float32
if args_opt.param_init_type == 'fp32'
else mstype.float16,
)
print("===config is: ", config, flush=True)
print("=====args_opt is: ", args_opt, flush=True)
ckpt_name = args_opt.load_ckpt_name
# Define network
pangu_alpha = PanguAlphaModel(config)
eval_net = EvalNet(pangu_alpha, pad_token=50256)
eval_net.set_train(False)
model_predict = Model(eval_net)
# Compile network and obtain tensor layout for loading ckpt
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0 for _ in range(batch_size)]), mstype.int32)
if args_opt.distribute == "false":
predict_layout = None
elif config.use_past:
batch_valid_length = Tensor(np.array([0 for _ in range(batch_size)]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
print("Input shape:", inputs_np.shape, flush=True)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
print("is_first_iteration=True", flush=True)
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index, init_true, batch_valid_length)
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
print("is_first_iteration=False", flush=True)
init_false = Tensor([False], mstype.bool_)
_ = model_predict.infer_predict_layout(inputs_np_1, current_index, init_false, batch_valid_length)
else:
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index)
if context.get_context("save_graphs"):
print("==============save_graph", flush=True)
jobid = os.environ["BATCH_JOB_ID"]
rank_id = rank
mox.file.make_dirs("s3://wudao-1/yyf/graphs_" + jobid)
mox.file.copy_parallel(src_url="/cache/graphs_of_device_id_" + str(rank_id),
dst_url="s3://wudao-1/yyf/graphs_" + jobid + "/" + str(rank_id))
print("======start load_distributed checkpoint", flush=True)
if args_opt.load_ckpt_epoch > 0:
time.sleep(rank * 0.1)
os.mkdir(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}"))
ckpt_name = f"code-13B{rank}-{args_opt.load_ckpt_epoch}.ckpt"
if not mox.file.exists(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name)):
print(f"Checkpoint from rank {rank} doesn't exist!")
mox.file.copy(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name),
os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
param_dict = load_checkpoint(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
if param_dict.get("epoch_num") and param_dict.get("step_num"):
args_opt.has_trained_epoches = int(param_dict["epoch_num"].data.asnumpy())
args_opt.has_trained_steps = int(param_dict["step_num"].data.asnumpy())
os.mkdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1/rank_{rank}')
while True:
num = len(os.listdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1'))
if num == device_num:
break
if rank % 8 == 0:
print("Loaded ckpt in step 1: ", num)
time.sleep(1)
net_not_load = load_param_into_net(pangu_alpha, param_dict)
print("====== load_distributed checkpoint done, net_not_load: ", net_not_load, flush=True)
return model_predict, config, rank |
Export mindir model | def export_mindir(model_predict, config):
"""Export mindir model"""
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0]), mstype.int32)
batch_valid_length = Tensor(np.array([0]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
export(model_predict.predict_network, inputs_np, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1024', file_format='MINDIR')
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
export(model_predict.predict_network, inputs_np_1, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1', file_format='MINDIR')
print("Export finished and now exit.") |
run predict | def run_predict(model_predict, config, args_opt, rank):
"""run predict"""
from src.generate_finetune import generate_increment
# Define tokenizer
tokenizer = CodeTokenizer(mode='6b')
# Tokenize input sentence to ids
samples = [
"Hello there!",
"# language: Python\ndef add(a, b):\n '''\n Find the sum of a and b.\n '''\n",
"def add(a, b):\n '''\n Find the sum of a and b.\n '''\n",
"# language: Python\ndef optimization():\n '''\n Find the maximum of P=E**2*R/(R + r)**2 if E and r are fixed but R varies. Import sympy. Use sympy. Find where the derivative is equal to zero. Substitute the value of R into P.\n '''\n",
"from typing import List\n\n\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\n given threshold.\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\n False\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\n True\n \"\"\"\n",
"// language: JavaScript\nfunction prime(n) {\n // Find whether n is a prime number.\n",
"string morse_encoder(string text) {\n // Translate text into Morse code\n",
"def morse_encoder(text):\n # Translate text into Morse code separated by spaces\n",
f"% language: MATLAB\nfunction x = solve(A, b)\n % Solve Ax = b\n",
f"% language: MATLAB\nfunction [L, U] = lu(A)\n % Return LU factorization of A\n",
"def TCPState(state):\n # given a state in TCP protocol, return a list of next possible states\n",
"def coordinates(p1, p2, precision=0)\n # p1 is (x1, y1), p2 is (x2, y2), return the distance between p1 and p2 on a cartesian plane, rounded to precision\n",
"double travel(double total_time, double run_time, double rest_time, double speed) {\n // the horse runs for run_time with speed speed and rests for rest_time, return the distance it travels after total_time\n",
"def travel(total_time, run_time, rest_time, speed):\n # the horse runs for run_time with speed speed and rests for rest_time, return the distance it travels after total_time\n",
"// language: C++\nint add(int a, int b) {\n /* Find the sum of a and b. */\n",
"int add(int a, int b) {\n /* Find the sum of a and b. */\n",
"// language: C++\nvoid sort(int *array, int len) {\n // Sort the array with length len\n",
"bool prime(int n) {\n // Find whether n is a prime number\n",
"def prime(n):\n # Find whether n is a prime number\n",
f"% language: MATLAB\nfunction H = hilbert(n)\n % Return Hilbert matrix of size n * n\n",
f"% language: MATLAB\nfunction L = cholesky(A)\n % Return Cholesky factorization of symmetric positive definete matrix A\n",
"// language: JavaScript\nfunction add(a, b) {\n // Find the sum of a and b.\n",
"# language: R\nadd<-function(a, b) {\n # Find the sum of a and b.\n",
]
samples = [tokenizer.encode_code(l) for l in samples]
generations = []
batch_size = config.batch_size
verbose = (rank % 8 == 0)
save_path = f'/home/work/sfs/xx/pangu_alpha_code/generation_batch/{args_opt.temperature}.txt' # TODO: set as current save path
save_dir = os.path.split(save_path)[0]
if rank == 0:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if not os.path.exists(save_path):
f = open(save_path, 'w')
f.close()
os.system(f'sudo chmod 777 -R {save_dir}')
batch = []
input_length = []
sample_ids = []
for i, sample in enumerate(samples):
tokenized_token = sample
input_ids = np.array(tokenized_token).reshape(1, -1)
batch.append(input_ids)
input_length.append(input_ids.shape[1])
sample_ids.append(i)
if (i + 1) % batch_size == 0:
valid_length = max(input_length)
for j in range(len(batch)):
batch[j] = np.pad(batch[j], ((0, 0), (0, valid_length - input_length[j])),
'constant', constant_values=(args_opt.end_token, args_opt.end_token))
input_ids = np.concatenate(batch, axis=0)
t0 = time.perf_counter()
output_ids = generate_increment(model_predict, input_ids, input_length, args_opt, tokenizer, verbose)
t1 = time.perf_counter()
batch, input_length = [], []
if rank % 8 == 0:
print(f"=== Batch time: {t1 - t0}s")
for k, out in enumerate(output_ids):
if not out.endswith('\n'):
out = out + '\n'
print(f"=================== generation {sample_ids[k]} ====================")
print(out, flush=True)
generations.append(out)
if rank == 0:
f = open(save_path, 'a')
f.write(generations[-1])
f.close()
sample_ids = []
if len(batch) > 0:
for j in range(batch_size - len(sample_ids)):
batch.append(np.zeros((1, 1)))
input_length.append(-1)
valid_length = max(input_length)
for j in range(len(batch)):
batch[j] = np.pad(batch[j], ((0, 0), (0, valid_length - batch[j].shape[1])),
'constant', constant_values=(args_opt.end_token, args_opt.end_token))
input_ids = np.concatenate(batch, axis=0)
t0 = time.perf_counter()
output_ids = generate_increment(model_predict, input_ids, input_length, args_opt, tokenizer, verbose)
t1 = time.perf_counter()
if rank % 8 == 0:
print(f"=== Batch time: {t1 - t0}s")
for k, out in enumerate(output_ids):
if input_length[k] == -1:
break
if not out.endswith('\n'):
out = out + '\n'
print(f"=================== generation {sample_ids[k]} ====================")
print(out, flush=True)
generations.append(out)
if rank == 0:
f = open(save_path, 'a')
f.write(generations[-1])
f.close() |
Main process for predict or export model | def main():
"""Main process for predict or export model"""
print("===Enter main!")
opt = get_args(True)
set_parse(opt)
model_predict, config, rank = load_model(opt)
if opt.export:
export_mindir(model_predict, config)
else:
run_predict(model_predict, config, opt, rank) |
The main function for load model | def load_model(args_opt):
r"""
The main function for load model
"""
# Set execution mode
context.set_context(save_graphs=False,
mode=context.GRAPH_MODE,
device_target=args_opt.device_target)
context.set_context(variable_memory_max_size="30GB")
# Set parallel context
if args_opt.distribute == "true":
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
print("rank_id is {}, device_num is {}".format(rank, device_num))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL,
gradients_mean=False,
full_batch=True,
loss_repeated_mean=True,
enable_parallel_optimizer=False,
pipeline_stages=args_opt.stage_num)
set_algo_parameters(elementwise_op_strategy_follow=True)
_set_multi_subgraphs()
else:
rank = 0
device_num = 1
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
strategy_ckpt_load_file=args_opt.strategy_load_ckpt_path)
context.set_context(
save_graphs=False,
save_graphs_path="/cache/graphs_of_device_id_" + str(rank),
)
use_past = (args_opt.use_past == "true")
print('local_rank:{}, start to run...'.format(rank), flush=True)
if args_opt.export:
use_past = True
# Set model property
model_parallel_num = args_opt.op_level_model_parallel_num
data_parallel_num = int(device_num / model_parallel_num)
# data_parallel_num = 1
parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num,
model_parallel=model_parallel_num,
pipeline_stage=args_opt.stage_num,
micro_batch_num=args_opt.micro_size,
optimizer_shard=False,
vocab_emb_dp=bool(args_opt.word_emb_dp),
recompute=True)
per_batch_size = args_opt.per_batch_size
batch_size = per_batch_size * data_parallel_num
config = PanguAlphaConfig(
batch_size=batch_size,
seq_length=args_opt.seq_length,
vocab_size=args_opt.vocab_size,
hidden_size=args_opt.embedding_size,
num_layers=args_opt.num_layers,
num_heads=args_opt.num_heads,
post_layernorm_residual=False,
dropout_rate=0.0,
ffn_hidden_size=args_opt.embedding_size * 4,
use_past=use_past,
eod_token=args_opt.eod_id,
eod_reset=False,
parallel_config=parallel_config,
load_ckpt_path=args_opt.load_ckpt_path,
param_init_type=mstype.float32
if args_opt.param_init_type == 'fp32'
else mstype.float16,
)
print("===config is: ", config, flush=True)
print("=====args_opt is: ", args_opt, flush=True)
ckpt_name = args_opt.load_ckpt_name
# Define network
pangu_alpha = PanguAlphaModel(config)
eval_net = EvalNet(pangu_alpha, pad_token=50256)
eval_net.set_train(False)
model_predict = Model(eval_net)
# Compile network and obtain tensor layout for loading ckpt
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0 for _ in range(batch_size)]), mstype.int32)
if args_opt.distribute == "false":
predict_layout = None
elif config.use_past:
batch_valid_length = Tensor(np.array([0 for _ in range(batch_size)]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
print("Input shape:", inputs_np.shape, flush=True)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
print("is_first_iteration=True", flush=True)
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index, init_true, batch_valid_length)
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
print("is_first_iteration=False", flush=True)
init_false = Tensor([False], mstype.bool_)
_ = model_predict.infer_predict_layout(inputs_np_1, current_index, init_false, batch_valid_length)
else:
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index)
if context.get_context("save_graphs"):
print("==============save_graph", flush=True)
jobid = os.environ["BATCH_JOB_ID"]
rank_id = rank
mox.file.make_dirs("s3://wudao-1/yyf/graphs_" + jobid)
mox.file.copy_parallel(src_url="/cache/graphs_of_device_id_" + str(rank_id),
dst_url="s3://wudao-1/yyf/graphs_" + jobid + "/" + str(rank_id))
print("======start load_distributed checkpoint", flush=True)
if args_opt.load_ckpt_epoch > 0:
time.sleep(rank * 0.1)
os.mkdir(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}"))
ckpt_name = f"code-13B{rank}-{args_opt.load_ckpt_epoch}.ckpt"
if not mox.file.exists(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name)):
print(f"Checkpoint from rank {rank} doesn't exist!")
mox.file.copy(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name),
os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
param_dict = load_checkpoint(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
if param_dict.get("epoch_num") and param_dict.get("step_num"):
args_opt.has_trained_epoches = int(param_dict["epoch_num"].data.asnumpy())
args_opt.has_trained_steps = int(param_dict["step_num"].data.asnumpy())
os.mkdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1/rank_{rank}')
while True:
num = len(os.listdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1'))
if num == device_num:
break
if rank % 8 == 0:
print("Loaded ckpt in step 1: ", num)
time.sleep(1)
net_not_load = load_param_into_net(pangu_alpha, param_dict)
print("====== load_distributed checkpoint done, net_not_load: ", net_not_load, flush=True)
return model_predict, config, rank |
Export mindir model | def export_mindir(model_predict, config):
"""Export mindir model"""
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0]), mstype.int32)
batch_valid_length = Tensor(np.array([0]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
export(model_predict.predict_network, inputs_np, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1024', file_format='MINDIR')
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
export(model_predict.predict_network, inputs_np_1, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1', file_format='MINDIR')
print("Export finished and now exit.") |
run predict | def run_predict(model_predict, config, args_opt, rank):
"""run predict"""
from src.generate_finetune import generate_increment
# Define tokenizer
tokenizer = CodeTokenizer(mode='6b')
# Tokenize input sentence to ids
lang = args_opt.language
data_path = os.path.join(args_opt.code_data, lang, 'test')
dataset = LMDBDataset(data_path)
samples = []
for i in range(len(dataset)):
prompt, length = dataset[i]
samples.append(prompt[:length])
generations = []
batch_size = config.batch_size
verbose = (rank % 8 == 0)
save_path = f'/home/work/sfs/xx/pangu_alpha_code/generation_finetune/code_translation/{lang}/temp_{args_opt.temperature}.txt' # TODO: set as current save path
save_dir = os.path.split(save_path)[0]
if rank == 0:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if not os.path.exists(save_path):
f = open(save_path, 'w')
f.close()
os.system(f'sudo chmod 777 -R {os.path.split(save_dir)[0]}')
batch = []
input_length = []
sample_ids = []
for i, sample in enumerate(samples):
tokenized_token = sample
input_ids = np.array(tokenized_token).reshape(1, -1)
batch.append(input_ids)
input_length.append(input_ids.shape[1])
sample_ids.append(i)
if (i + 1) % batch_size == 0:
valid_length = max(input_length)
for j in range(len(batch)):
batch[j] = np.pad(batch[j], ((0, 0), (0, valid_length - input_length[j])),
'constant', constant_values=(args_opt.end_token, args_opt.end_token))
input_ids = np.concatenate(batch, axis=0)
t0 = time.perf_counter()
output_ids = generate_increment(model_predict, input_ids, input_length, args_opt, tokenizer, verbose)
t1 = time.perf_counter()
batch, input_length = [], []
if rank % 8 == 0:
print(f"=== Batch time: {t1 - t0}s")
for k, out in enumerate(output_ids):
print(f"=================== generation {sample_ids[k]} ====================")
print(out, flush=True)
generations.append(out)
if rank == 0:
f = open(save_path, 'a')
f.write(generations[-1])
if not generations[-1].endswith('\n'):
f.write('\n')
f.close()
sample_ids = []
if len(batch) > 0:
valid_length = max(input_length)
for j in range(batch_size - len(sample_ids)):
batch.append(np.zeros((1, 1)))
input_length.append(-1)
for j in range(len(batch)):
batch[j] = np.pad(batch[j], ((0, 0), (0, valid_length - batch[j].shape[1])),
'constant', constant_values=(args_opt.end_token, args_opt.end_token))
input_ids = np.concatenate(batch, axis=0)
t0 = time.perf_counter()
output_ids = generate_increment(model_predict, input_ids, input_length, args_opt, tokenizer, verbose)
t1 = time.perf_counter()
if rank % 8 == 0:
print(f"=== Batch time: {t1 - t0}s")
print(f"=== Input lengths: {input_length}, length: {len(input_length)}")
print(f"=== Input ids shape: {input_ids.shape}")
print(f"=== Num of outputs: {len(output_ids)}")
for k, out in enumerate(output_ids):
if input_length[k] == -1:
break
print(f"=================== generation {sample_ids[k]} ====================")
print(out, flush=True)
generations.append(out)
if rank == 0:
f = open(save_path, 'a')
f.write(generations[-1])
if not generations[-1].endswith('\n'):
f.write('\n')
f.close() |
Main process for predict or export model | def main():
"""Main process for predict or export model"""
print("===Enter main!")
opt = get_args(True)
set_parse(opt)
model_predict, config, rank = load_model(opt)
if opt.export:
export_mindir(model_predict, config)
else:
run_predict(model_predict, config, opt, rank) |
The main function for load model | def load_model(args_opt):
r"""
The main function for load model
"""
# Set execution mode
context.set_context(save_graphs=False,
mode=context.GRAPH_MODE,
device_target=args_opt.device_target)
context.set_context(variable_memory_max_size="30GB")
# Set parallel context
if args_opt.distribute == "true":
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
print("rank_id is {}, device_num is {}".format(rank, device_num))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL,
gradients_mean=False,
full_batch=True,
loss_repeated_mean=True,
enable_parallel_optimizer=False,
pipeline_stages=args_opt.stage_num)
set_algo_parameters(elementwise_op_strategy_follow=True)
_set_multi_subgraphs()
else:
rank = 0
device_num = 1
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
strategy_ckpt_load_file=args_opt.strategy_load_ckpt_path)
context.set_context(
save_graphs=False,
save_graphs_path="/cache/graphs_of_device_id_" + str(rank),
)
use_past = (args_opt.use_past == "true")
print('local_rank:{}, start to run...'.format(rank), flush=True)
if args_opt.export:
use_past = True
# Set model property
model_parallel_num = args_opt.op_level_model_parallel_num
data_parallel_num = int(device_num / model_parallel_num)
parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num,
model_parallel=model_parallel_num,
pipeline_stage=args_opt.stage_num,
micro_batch_num=args_opt.micro_size,
optimizer_shard=False,
vocab_emb_dp=bool(args_opt.word_emb_dp),
recompute=True)
per_batch_size = args_opt.per_batch_size
batch_size = per_batch_size * data_parallel_num
config = PanguAlphaConfig(
batch_size=batch_size,
seq_length=args_opt.seq_length,
vocab_size=args_opt.vocab_size,
hidden_size=args_opt.embedding_size,
num_layers=args_opt.num_layers,
num_heads=args_opt.num_heads,
post_layernorm_residual=False,
dropout_rate=0.0,
ffn_hidden_size=args_opt.embedding_size * 4,
use_past=use_past,
eod_token=args_opt.eod_id,
eod_reset=False,
parallel_config=parallel_config,
load_ckpt_path=args_opt.load_ckpt_path,
param_init_type=mstype.float32
if args_opt.param_init_type == 'fp32'
else mstype.float16,
)
print("===config is: ", config, flush=True)
print("=====args_opt is: ", args_opt, flush=True)
ckpt_name = args_opt.load_ckpt_name
# Define network
pangu_alpha = PanguAlphaModel(config)
eval_net = EvalNet(pangu_alpha, pad_token=50256)
eval_net.set_train(False)
model_predict = Model(eval_net)
# Compile network and obtain tensor layout for loading ckpt
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0 for _ in range(batch_size)]), mstype.int32)
if args_opt.distribute == "false":
predict_layout = None
elif config.use_past:
batch_valid_length = Tensor(np.array([0 for _ in range(batch_size)]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
print("Input shape:", inputs_np.shape, flush=True)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
print("is_first_iteration=True", flush=True)
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index, init_true, batch_valid_length)
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
print("is_first_iteration=False", flush=True)
init_false = Tensor([False], mstype.bool_)
_ = model_predict.infer_predict_layout(inputs_np_1, current_index, init_false, batch_valid_length)
else:
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index)
if context.get_context("save_graphs"):
print("==============save_graph", flush=True)
jobid = os.environ["BATCH_JOB_ID"]
rank_id = rank
mox.file.make_dirs("s3://wudao-1/yyf/graphs_" + jobid)
mox.file.copy_parallel(src_url="/cache/graphs_of_device_id_" + str(rank_id),
dst_url="s3://wudao-1/yyf/graphs_" + jobid + "/" + str(rank_id))
print("======start load_distributed checkpoint", flush=True)
if args_opt.load_ckpt_epoch > 0:
time.sleep(rank * 0.1)
os.mkdir(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}"))
ckpt_name = f"code-13B{rank}-{args_opt.load_ckpt_epoch}.ckpt"
if not mox.file.exists(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name)):
print(f"Checkpoint from rank {rank} doesn't exist!")
mox.file.copy(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name),
os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
param_dict = load_checkpoint(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
if param_dict.get("epoch_num") and param_dict.get("step_num"):
args_opt.has_trained_epoches = int(param_dict["epoch_num"].data.asnumpy())
args_opt.has_trained_steps = int(param_dict["step_num"].data.asnumpy())
os.mkdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1/rank_{rank}')
while True:
num = len(os.listdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1'))
if num == device_num:
break
if rank % 8 == 0:
print("Loaded ckpt in step 1: ", num)
time.sleep(1)
net_not_load = load_param_into_net(pangu_alpha, param_dict)
print("====== load_distributed checkpoint done, net_not_load: ", net_not_load, flush=True)
return model_predict, config, rank |
Export mindir model | def export_mindir(model_predict, config):
"""Export mindir model"""
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0]), mstype.int32)
batch_valid_length = Tensor(np.array([0]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
export(model_predict.predict_network, inputs_np, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1024', file_format='MINDIR')
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
export(model_predict.predict_network, inputs_np_1, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1', file_format='MINDIR')
print("Export finished and now exit.") |
run predict | def run_predict(model_predict, config, args_opt, rank):
"""run predict"""
from src.generate_humaneval import generate_increment
# Define tokenizer
tokenizer = CodeTokenizer(mode='6b')
# Tokenize input sentence to ids
humaneval_path = '/home/work/sfs/xx/human_eval_x/data/humaneval_cpp.jsonl' # TODO: set as current humaneval path
humaneval = open(humaneval_path, 'r').readlines()
humaneval = [json.loads(task) for task in humaneval if len(task) != 0]
samples = [task['prompt'] for task in humaneval]
generations = []
batch_size = config.batch_size
verbose = (rank % 8 == 0)
part = int(args_opt.part)
gen_times = 12 # TODO: set as generation times of current task
print(f"gen times: {gen_times}, part: {part}")
save_path = f'/home/work/sfs/xx/pangu_alpha_code/generation_humanevalx/cpp/temp_{args_opt.temperature}/samples_{args_opt.load_ckpt_epoch}_part_{part}.jsonl' # TODO: set as current save path
if rank == 0 and not os.path.exists(save_path):
os.makedirs(os.path.split(save_path)[0], exist_ok=True)
f = open(save_path, 'w')
f.close()
os.system(f'sudo chmod 777 {save_path}')
for i, sample in enumerate(samples):
tag = "// language: C++\n"
sample = tag + sample
if rank % 8 == 0:
print(f"=================== prompt {i} ====================")
print(sample, flush=True)
for j in range((gen_times + batch_size - 1) // batch_size):
tokenized_token = tokenizer.encode_code(sample)
input_ids = np.array(tokenized_token).reshape(1, -1).repeat(batch_size, axis=0)
# Call inference
mindspore.set_seed(j + 8 * part)
generate_func = generate_increment
t0 = time.perf_counter()
output_ids = generate_func(model_predict, input_ids, args_opt, tokenizer, verbose)
t1 = time.perf_counter()
if rank % 8 == 0:
print(f"=== Batch time: {t1 - t0}s")
for k, out in enumerate(output_ids):
print(f"=================== generation {j * batch_size + k} ====================")
print(out, flush=True)
generations.append(json.dumps({'task_id': humaneval[i]['task_id'], 'completion': out}))
if rank == 0:
f = open(save_path, 'a')
f.write(generations[-1] + '\n')
f.close() |
Main process for predict or export model | def main():
"""Main process for predict or export model"""
print("===Enter main!")
opt = get_args(True)
set_parse(opt)
model_predict, config, rank = load_model(opt)
if opt.export:
export_mindir(model_predict, config)
else:
run_predict(model_predict, config, opt, rank) |
The main function for load model | def load_model(args_opt):
r"""
The main function for load model
"""
# Set execution mode
context.set_context(save_graphs=False,
mode=context.GRAPH_MODE,
device_target=args_opt.device_target)
context.set_context(variable_memory_max_size="30GB")
# Set parallel context
if args_opt.distribute == "true":
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
print("rank_id is {}, device_num is {}".format(rank, device_num))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL,
gradients_mean=False,
full_batch=True,
loss_repeated_mean=True,
enable_parallel_optimizer=False,
pipeline_stages=args_opt.stage_num)
set_algo_parameters(elementwise_op_strategy_follow=True)
_set_multi_subgraphs()
else:
rank = 0
device_num = 1
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
strategy_ckpt_load_file=args_opt.strategy_load_ckpt_path)
context.set_context(
save_graphs=False,
save_graphs_path="/cache/graphs_of_device_id_" + str(rank),
)
use_past = (args_opt.use_past == "true")
print('local_rank:{}, start to run...'.format(rank), flush=True)
if args_opt.export:
use_past = True
# Set model property
model_parallel_num = args_opt.op_level_model_parallel_num
data_parallel_num = int(device_num / model_parallel_num)
parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num,
model_parallel=model_parallel_num,
pipeline_stage=args_opt.stage_num,
micro_batch_num=args_opt.micro_size,
optimizer_shard=False,
vocab_emb_dp=bool(args_opt.word_emb_dp),
recompute=True)
per_batch_size = args_opt.per_batch_size
batch_size = per_batch_size * data_parallel_num
config = PanguAlphaConfig(
batch_size=batch_size,
seq_length=args_opt.seq_length,
vocab_size=args_opt.vocab_size,
hidden_size=args_opt.embedding_size,
num_layers=args_opt.num_layers,
num_heads=args_opt.num_heads,
post_layernorm_residual=False,
dropout_rate=0.0,
ffn_hidden_size=args_opt.embedding_size * 4,
use_past=use_past,
eod_token=args_opt.eod_id,
eod_reset=False,
parallel_config=parallel_config,
load_ckpt_path=args_opt.load_ckpt_path,
param_init_type=mstype.float32
if args_opt.param_init_type == 'fp32'
else mstype.float16,
)
print("===config is: ", config, flush=True)
print("=====args_opt is: ", args_opt, flush=True)
ckpt_name = args_opt.load_ckpt_name
# Define network
pangu_alpha = PanguAlphaModel(config)
eval_net = LogitsNet(pangu_alpha, pad_token=50256)
eval_net.set_train(False)
model_predict = Model(eval_net)
# Compile network and obtain tensor layout for loading ckpt
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0 for _ in range(batch_size)]), mstype.int32)
if args_opt.distribute == "false":
predict_layout = None
elif config.use_past:
batch_valid_length = Tensor(np.array([0 for _ in range(batch_size)]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
print("Input shape:", inputs_np.shape, flush=True)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
print("is_first_iteration=True", flush=True)
predict_layout = model_predict.infer_predict_layout(inputs_np, init_true, batch_valid_length)
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
print("is_first_iteration=False", flush=True)
init_false = Tensor([False], mstype.bool_)
_ = model_predict.infer_predict_layout(inputs_np_1, init_false, batch_valid_length)
else:
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index)
if context.get_context("save_graphs"):
print("==============save_graph", flush=True)
jobid = os.environ["BATCH_JOB_ID"]
rank_id = rank
mox.file.make_dirs("s3://wudao-1/yyf/graphs_" + jobid)
mox.file.copy_parallel(src_url="/cache/graphs_of_device_id_" + str(rank_id),
dst_url="s3://wudao-1/yyf/graphs_" + jobid + "/" + str(rank_id))
print("======start load_distributed checkpoint", flush=True)
if args_opt.load_ckpt_epoch > 0:
time.sleep(rank * 0.1)
os.mkdir(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}"))
ckpt_name = f"code-13B{rank}-{args_opt.load_ckpt_epoch}.ckpt"
if not mox.file.exists(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name)):
print(f"Checkpoint from rank {rank} doesn't exist!")
mox.file.copy(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name),
os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
param_dict = load_checkpoint(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
if param_dict.get("epoch_num") and param_dict.get("step_num"):
args_opt.has_trained_epoches = int(param_dict["epoch_num"].data.asnumpy())
args_opt.has_trained_steps = int(param_dict["step_num"].data.asnumpy())
os.mkdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1/rank_{rank}')
while True:
num = len(os.listdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1'))
if num == device_num:
break
if rank % 8 == 0:
print("Loaded ckpt in step 1: ", num)
time.sleep(1)
net_not_load = load_param_into_net(pangu_alpha, param_dict)
print("====== load_distributed checkpoint done, net_not_load: ", net_not_load, flush=True)
return model_predict, config, rank |
run predict | def run_predict(model_predict, config, args_opt, rank):
"""run predict"""
# Define tokenizer
tokenizer = CodeTokenizer(mode='6b')
# Tokenize input sentence to ids
batch_size = config.batch_size
input_ids = np.array(
[8189, 11059, 198, 29584, 25, 198, 11377, 1398, 28186, 1391, 198, 50268, 11377, 9037, 25131, 468, 26125, 36,
3639, 7, 600, 21737, 997, 82, 11, 493, 11387, 8, 1391, 198, 50272, 1640, 357, 600, 1312, 796, 657, 26, 1312,
1279, 997, 82, 13, 13664, 532, 352, 26, 1312, 29577, 1391, 198, 50276, 1640, 357, 600, 474, 796, 1312, 1343,
352, 26, 474, 1279, 997, 82, 13, 13664, 26, 474, 29577, 1391, 198, 50280, 361, 357, 37372, 13, 8937, 7, 77,
5700, 58, 72, 60, 532, 997, 82, 58, 73, 12962, 1279, 11387, 8, 1391, 198, 50284, 7783, 2081, 26, 198, 50280,
92, 198, 50276, 92, 198, 50272, 92, 198, 50272, 7783, 3991, 26, 198, 50268, 92, 198, 92, 198, 5247, 25, 198],
dtype=np.int32)
valid_length = input_ids.shape[0]
input_ids = np.concatenate((input_ids, np.ones(2048 - valid_length, dtype=np.int32) * 50256))
attention_mask = np.tril(np.ones((2048, 2048)))
attention_mask[valid_length:] = 0
input_ids = input_ids.reshape(1, -1).repeat(config.batch_size, axis=0)
current_index = valid_length - 1 if valid_length - 1 > 0 else 0
init = Tensor([False], mstype.bool_)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
batch_valid_length = Tensor(np.array([current_index for _ in range(batch_size)]), mstype.int32)
output_logits = model_predict.predict(Tensor(input_ids, mstype.int32),
init, batch_valid_length)
output = output_logits.asnumpy()
if rank == 0:
np.save("/home/work/sfs/xx/pangu_alpha_code/output_6_7375_8.13.npy", output) # TODO: set as current save path
os.system(
"chmod 777 /home/work/sfs/xx/pangu_alpha_code/output_6_7375_8.13.npy") # TODO: set as current save path
print("== Output shape: ", output.shape) |
Main process for predict or export model | def main():
"""Main process for predict or export model"""
print("===Enter main!")
opt = get_args(True)
set_parse(opt)
model_predict, config, rank = load_model(opt)
run_predict(model_predict, config, opt, rank) |
The main function for load model | def load_model(args_opt):
r"""
The main function for load model
"""
# Set execution mode
context.set_context(save_graphs=False,
mode=context.GRAPH_MODE,
device_target=args_opt.device_target)
context.set_context(variable_memory_max_size="30GB")
# Set parallel context
if args_opt.distribute == "true":
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
print("rank_id is {}, device_num is {}".format(rank, device_num))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL,
gradients_mean=False,
full_batch=True,
loss_repeated_mean=True,
enable_parallel_optimizer=False,
pipeline_stages=args_opt.stage_num)
set_algo_parameters(elementwise_op_strategy_follow=True)
_set_multi_subgraphs()
else:
rank = 0
device_num = 1
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
strategy_ckpt_load_file=args_opt.strategy_load_ckpt_path)
context.set_context(
save_graphs=False,
save_graphs_path="/cache/graphs_of_device_id_" + str(rank),
)
use_past = (args_opt.use_past == "true")
print('local_rank:{}, start to run...'.format(rank), flush=True)
if args_opt.export:
use_past = True
# Set model property
print("===args_opt: ", args_opt, flush=True)
print("===device_num is: ", device_num, flush=True)
args_opt.op_level_model_parallel_num = 1
model_parallel_num = args_opt.op_level_model_parallel_num
data_parallel_num = int(device_num / model_parallel_num)
print("===data_parallel_num is: ", data_parallel_num, flush=True)
parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num,
model_parallel=model_parallel_num,
pipeline_stage=args_opt.stage_num,
micro_batch_num=args_opt.micro_size,
optimizer_shard=False,
vocab_emb_dp=bool(args_opt.word_emb_dp),
recompute=True)
per_batch_size = args_opt.per_batch_size
batch_size = per_batch_size * data_parallel_num
if args_opt.run_type == "predict":
batch_size = 1
config = PanguAlphaConfig(
batch_size=batch_size,
seq_length=args_opt.seq_length,
vocab_size=args_opt.vocab_size,
hidden_size=args_opt.embedding_size,
num_layers=args_opt.num_layers,
num_heads=args_opt.num_heads,
post_layernorm_residual=False,
dropout_rate=0.0,
ffn_hidden_size=args_opt.embedding_size * 4,
use_past=use_past,
eod_token=args_opt.eod_id,
eod_reset=False,
parallel_config=parallel_config,
load_ckpt_path=args_opt.load_ckpt_path,
param_init_type=mstype.float32
if args_opt.param_init_type == 'fp32'
else mstype.float16,
)
print("===config is: ", config, flush=True)
print("=====args_opt is: ", args_opt, flush=True)
ckpt_name = args_opt.load_ckpt_name
# Define network
pangu_alpha = PanguAlphaModel(config)
eval_net = EvalNet(pangu_alpha, pad_token=50256)
eval_net.set_train(False)
model_predict = Model(eval_net)
# Compile network and obtain tensor layout for loading ckpt
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0]), mstype.int32)
if args_opt.distribute == "false":
predict_layout = None
elif config.use_past:
batch_valid_length = Tensor(np.array([0]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
print("Input shape:", inputs_np.shape, flush=True)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
print("is_first_iteration=True", flush=True)
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index, init_true, batch_valid_length)
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
print("is_first_iteration=False", flush=True)
init_false = Tensor([False], mstype.bool_)
_ = model_predict.infer_predict_layout(inputs_np_1, current_index, init_false, batch_valid_length)
else:
predict_layout = model_predict.infer_predict_layout(inputs_np, current_index)
if context.get_context("save_graphs"):
print("==============save_graph", flush=True)
jobid = os.environ["BATCH_JOB_ID"]
rank_id = rank
mox.file.make_dirs("s3://wudao-1/yyf/graphs_" + jobid)
mox.file.copy_parallel(src_url="/cache/graphs_of_device_id_" + str(rank_id),
dst_url="s3://wudao-1/yyf/graphs_" + jobid + "/" + str(rank_id))
print("======start load_distributed checkpoint", flush=True)
if args_opt.load_ckpt_epoch > 0:
time.sleep(rank * 0.5)
os.mkdir(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}"))
ckpt_name = f"code-13B0-{args_opt.load_ckpt_epoch}.ckpt"
if not mox.file.exists(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name)):
print(f"Checkpoint from rank {rank} doesn't exist!")
mox.file.copy(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name),
os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
param_dict = load_checkpoint(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
# TODO: add them back if not for the 1st run!
if param_dict.get("epoch_num") and param_dict.get("step_num"):
args_opt.has_trained_epoches = int(param_dict["epoch_num"].data.asnumpy())
args_opt.has_trained_steps = int(param_dict["step_num"].data.asnumpy())
os.mkdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1/rank_{rank}')
while True:
num = len(os.listdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1'))
if num == device_num:
break
if rank % 8 == 0:
print("Loaded ckpt in step 1: ", num)
time.sleep(1)
net_not_load = load_param_into_net(pangu_alpha, param_dict)
print("====== load_distributed checkpoint done, net_not_load: ", net_not_load, flush=True)
return model_predict, config, rank |
Export mindir model | def export_mindir(model_predict, config):
"""Export mindir model"""
inputs_np = Tensor(np.ones(shape=(config.batch_size, config.seq_length)), mstype.int32)
current_index = Tensor(np.array([0]), mstype.int32)
batch_valid_length = Tensor(np.array([0]), mstype.int32)
init_true = Tensor([True], mstype.bool_)
inputs_np_1 = Tensor(np.ones(shape=(config.batch_size, 1)), mstype.int32)
model_predict.predict_network.add_flags_recursive(is_first_iteration=True)
export(model_predict.predict_network, inputs_np, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1024', file_format='MINDIR')
model_predict.predict_network.add_flags_recursive(is_first_iteration=False)
export(model_predict.predict_network, inputs_np_1, current_index,
init_true, batch_valid_length, file_name='pangu_alpha_1', file_format='MINDIR')
print("Export finished and now exit.") |
run predict | def run_predict(model_predict, config, args_opt, rank):
"""run predict"""
from src.generate import generate, generate_increment
# Define tokenizer
tokenizer = CodeTokenizer(mode='6b')
# Tokenize input sentence to ids
samples = [
"# language: Python\ndef add(a, b):\n '''\n Find the sum of a and b.\n '''\n",
"def add(a, b):\n '''\n Find the sum of a and b.\n '''\n",
"# language: Python\ndef optimization():\n '''\n Find the maximum of P=E**2*R/(R + r)**2 if E and r are fixed but R varies. Import sympy. Use sympy. Find where the derivative is equal to zero. Substitute the value of R into P.\n '''\n",
"from typing import List\n\n\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\n \"\"\" Check if in given list of numbers, are any two numbers closer to each other than\n given threshold.\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\n False\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\n True\n \"\"\"\n",
"// language: C++\nint add(int a, int b) {\n /* Find the sum of a and b. */\n",
"int add(int a, int b) {\n /* Find the sum of a and b. */\n",
"bool prime(int n) {\n // Find whether n is a prime number\n",
"// language: JavaScript\nfunction add(a, b) {\n // Find the sum of a and b.\n",
"# language: R\nadd<-function(a, b) {\n # Find the sum of a and b.\n",
]
# verbose = (rank == 0)
verbose = False
for i, sample in enumerate(samples):
for _ in range(1):
tokenized_token = tokenizer.encode_code(sample)
input_ids = np.array(tokenized_token).reshape(1, -1)
# Call inference
generate_func = generate_increment if config.use_past else generate
t0 = time.perf_counter()
output_ids = generate_func(model_predict, input_ids, args_opt, verbose)
# Decode output ids to sentence
t1 = time.perf_counter()
output_samples = tokenizer.decode_code(output_ids.tolist())
output_samples_str = "".join(output_samples)
if rank % 8 == 0:
print(f"=================== prompt {i} ====================")
print(sample, flush=True)
print(f"=================== generation {i} ====================")
print(output_samples_str, flush=True)
print(
f"=== Total time (s): {t1 - t0}, {output_ids.shape[-1] - input_ids.shape[-1]} tokens, {(output_ids.shape[-1] - input_ids.shape[-1]) / (t1 - t0)} token/s")
break |
Main process for predict or export model | def main():
"""Main process for predict or export model"""
opt = get_args(True)
set_parse(opt)
model_predict, config, rank = load_model(opt)
if opt.export:
export_mindir(model_predict, config)
else:
run_predict(model_predict, config, opt, rank) |
Set weight decay coefficient, zero for bias and layernorm, 1e-1 for rest | def set_weight_decay(params):
"""
Set weight decay coefficient, zero for bias and layernorm, 1e-1 for rest
"""
decay_filter = lambda x: 'layernorm' not in x.name.lower() and "bias" not in x.name.lower()
decay_params = list(filter(decay_filter, params))
other_params = list(filter(lambda x: not decay_filter(x), params))
group_params = [
{"params": decay_params, "weight_decay": 1e-1},
{"params": other_params, "weight_decay": 0.0},
{"order_params": params},
]
return group_params |
Add checkpoint policy to callback. | def add_checkpoint_callback_policy(args_param, callback, rank_id):
r"""
Add checkpoint policy to callback.
"""
if args_param.save_checkpoint:
# checkpoint store epoch_num and step_num info
ckpt_append_info = [{"epoch_num": args_param.has_trained_epoches, "step_num": args_param.has_trained_steps}]
ckpt_config = CheckpointConfig(
save_checkpoint_steps=args_param.save_checkpoint_steps,
keep_checkpoint_max=args_param.keep_checkpoint_max,
integrated_save=False,
append_info=ckpt_append_info,
)
# save checkpoint into rank directory
ckpoint_cb = ModelCheckpoint(prefix=args_param.ckpt_name_prefix + str(rank_id),
directory=os.path.join(args_param.save_checkpoint_path, f"rank_{rank_id}"),
config=ckpt_config)
callback.append(ckpoint_cb)
saveckpt_cb = SaveCheckpointCallback(cache_dir=args_param.save_checkpoint_path,
bucket=args_param.save_checkpoint_obs_path,
local_rank=rank_id,
has_trained_epoch=args_param.has_trained_epoches,
has_trained_step=args_param.has_trained_steps,
syn_times=args_param.save_checkpoint_steps)
callback.append(saveckpt_cb) |
Set parallel context | def set_parallel_context(args_opt):
r"""Set parallel context"""
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
print("rank_id is {}, device_num is {}".format(rank, device_num))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, gradients_mean=False,
full_batch=bool(args_opt.full_batch), strategy_ckpt_load_file=args_opt.strategy_load_ckpt_path,
enable_parallel_optimizer=bool(args_opt.optimizer_shard), strategy_ckpt_save_file='strategy.ckpt',
optimizer_weight_shard_size=16, optimizer_weight_shard_aggregated_save=True)
set_algo_parameters(elementwise_op_strategy_follow=True)
_set_multi_subgraphs()
return rank, device_num |
Set parallel context | def set_parallel_context(args_opt):
r"""Set parallel context"""
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
print("rank_id is {}, device_num is {}".format(rank, device_num))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, gradients_mean=False,
full_batch=bool(args_opt.full_batch), strategy_ckpt_load_file=args_opt.strategy_load_ckpt_path,
enable_parallel_optimizer=bool(args_opt.optimizer_shard), strategy_ckpt_save_file='strategy.ckpt',
optimizer_weight_shard_size=16, optimizer_weight_shard_aggregated_save=True)
set_algo_parameters(elementwise_op_strategy_follow=True)
_set_multi_subgraphs()
return rank, device_num |
Set weight decay coefficient, zero for bias and layernorm, 1e-1 for rest | def set_weight_decay(params):
"""
Set weight decay coefficient, zero for bias and layernorm, 1e-1 for rest
"""
decay_filter = lambda x: 'layernorm' not in x.name.lower() and "bias" not in x.name.lower()
decay_params = list(filter(decay_filter, params))
other_params = list(filter(lambda x: not decay_filter(x), params))
group_params = [
{"params": decay_params, "weight_decay": 1e-1},
{"params": other_params, "weight_decay": 0.0},
{"order_params": params},
]
return group_params |
Add checkpoint policy to callback. | def add_checkpoint_callback_policy(args_param, callback, rank_id):
r"""
Add checkpoint policy to callback.
"""
if args_param.save_checkpoint:
# checkpoint store epoch_num and step_num info
ckpt_append_info = [{"epoch_num": args_param.has_trained_epoches, "step_num": args_param.has_trained_steps}]
ckpt_config = CheckpointConfig(
save_checkpoint_steps=args_param.save_checkpoint_steps,
keep_checkpoint_max=args_param.keep_checkpoint_max,
integrated_save=False,
append_info=ckpt_append_info,
)
# save checkpoint into rank directory
ckpoint_cb = ModelCheckpoint(prefix=args_param.ckpt_name_prefix + str(rank_id),
directory=os.path.join(args_param.save_checkpoint_path, f"rank_{rank_id}"),
config=ckpt_config)
callback.append(ckpoint_cb)
saveckpt_cb = SaveCheckpointCallback(cache_dir=args_param.save_checkpoint_path,
bucket=args_param.save_checkpoint_obs_path,
local_rank=rank_id,
has_trained_epoch=args_param.has_trained_epoches,
has_trained_step=args_param.has_trained_steps,
syn_times=args_param.save_checkpoint_steps)
callback.append(saveckpt_cb) |
Set parallel context | def set_parallel_context(args_opt):
r"""Set parallel context"""
D.init()
device_num = D.get_group_size()
rank = D.get_rank()
print("rank_id is {}, device_num is {}".format(rank, device_num))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, gradients_mean=False,
full_batch=bool(args_opt.full_batch), strategy_ckpt_load_file=args_opt.strategy_load_ckpt_path,
enable_parallel_optimizer=bool(args_opt.optimizer_shard), strategy_ckpt_save_file='strategy.ckpt',
optimizer_weight_shard_size=16)
set_algo_parameters(elementwise_op_strategy_follow=True)
_set_multi_subgraphs()
return rank, device_num |
The main training process. | def run_train(args_opt):
r"""The main training process."""
os.environ["HCCL_CONNECT_TIMEOUT"] = "2000"
# Set execution mode
context.set_context(
mode=context.GRAPH_MODE, device_target=args_opt.device_target
)
if args_opt.profiling:
profiler = Profiler(output_path="/cache/profiler_data")
context.set_context(variable_memory_max_size="30GB")
# Set parallel context
rank = 0
device_num = 1
if args_opt.distribute == "true":
rank, device_num = set_parallel_context(args_opt)
context.set_context(
save_graphs=False,
save_graphs_path="/cache/graphs_of_device_id_" + str(rank),
)
cache_url = '/cache/Data/'
eval_cache_url = '/cache/EvalData/'
if not args_opt.offline:
download_data(src_data_url=args_opt.data_url, tgt_data_path=cache_url, rank=rank)
download_data(src_data_url=args_opt.eval_data_url, tgt_data_path=eval_cache_url, rank=rank)
# Set model property
model_parallel_num = args_opt.op_level_model_parallel_num
data_parallel_num = int(device_num / model_parallel_num)
batch_size = args_opt.per_batch_size * data_parallel_num
parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num, model_parallel=model_parallel_num,
pipeline_stage=args_opt.stage_num,
micro_batch_num=args_opt.micro_size,
optimizer_shard=bool(args_opt.optimizer_shard),
vocab_emb_dp=bool(args_opt.word_emb_dp), recompute=True,
gradient_aggregation_group=args_opt.gradient_aggregation_group)
micro_interleaved_size = args_opt.micro_interleaved_size
config = PanguAlphaConfig(
batch_size=batch_size // micro_interleaved_size,
num_heads=args_opt.num_heads,
hidden_size=args_opt.embedding_size,
seq_length=args_opt.seq_length,
vocab_size=args_opt.vocab_size,
num_layers=args_opt.num_layers,
ffn_hidden_size=args_opt.embedding_size * 4,
eod_token=args_opt.eod_id,
load_ckpt_path=args_opt.load_ckpt_path,
param_init_type=mstype.float32
if args_opt.param_init_type == "fp32"
else mstype.float16,
dropout_rate=args_opt.dropout_rate,
enable_offload=bool(args_opt.opt_offload),
use_moe=bool(args_opt.use_moe),
per_dp_dim_expert_num=args_opt.per_dp_dim_expert_num,
hidden_act="fast_gelu" if args_opt.device_target != "GPU" else "gelu",
parallel_config=parallel_config,
)
print("===config is: ", config, flush=True)
# Define network
pangu_alpha = PanguAlphaModel(config=config)
loss = CrossEntropyLoss(config.parallel_config.dp_mp_config)
if micro_interleaved_size > 1:
print("===using MicroBatchInterleaved", flush=True)
pangu_alpha_with_loss_net = MicroBatchInterleaved(PanGUAlphaWithLoss(config, pangu_alpha, loss),
micro_interleaved_size)
else:
pangu_alpha_with_loss_net = PanGUAlphaWithLoss(config, pangu_alpha, loss)
pangu_alpha_with_loss = _VirtualDatasetCell(pangu_alpha_with_loss_net)
print("=====args_opt is: ", args_opt, flush=True)
# Warm-up and cosine decay learning rate
lr = LearningRate(learning_rate=args_opt.start_lr, end_learning_rate=args_opt.end_lr,
warmup_steps=args_opt.warmup_step, decay_steps=args_opt.decay_steps)
params = pangu_alpha_with_loss.trainable_params()
group_params = set_weight_decay(params)
if args_opt.optimizer == "lamb":
optimizer = nn.Lamb(group_params, learning_rate=lr)
elif args_opt.opt_offload:
optimizer = AdamWeightDecayOp(group_params, learning_rate=lr, eps=1e-8, beta1=0.9, beta2=0.95,
param_init_type=config.param_init_type)
else:
optimizer = FP32StateAdamWeightDecay(group_params, learning_rate=lr, eps=1e-8, beta1=0.9, beta2=0.95)
# Initial scaling sens
loss_scale_value = math.pow(2, 32)
epoch_num = args_opt.epoch_size
if args_opt.load_ckpt_epoch > 0:
time.sleep(rank * 0.05)
os.mkdir(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}"))
ckpt_name = f"code-13B{rank}_20-{args_opt.load_ckpt_epoch}_2.ckpt"
if not mox.file.exists(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name)):
print(f"Checkpoint from rank {rank} doesn't exist!")
mox.file.copy(os.path.join(args_opt.load_ckpt_path, f"rank_{rank}", ckpt_name),
os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
param_dict = load_checkpoint(os.path.join(args_opt.save_checkpoint_path, f"rank_{rank}", ckpt_name))
# TODO: remove after warming-up!
# param_dict.pop('global_step')
# TODO: add them back if not for the 1st run!
# if param_dict.get("epoch_num") and param_dict.get("step_num"):
# args_opt.has_trained_epoches = int(param_dict["epoch_num"].data.asnumpy())
# args_opt.has_trained_steps = int(param_dict["step_num"].data.asnumpy())
# args_opt.has_trained_steps = 9000
os.mkdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1/rank_{rank}')
while True:
num = len(os.listdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/1'))
if num == device_num:
break
if rank % 64 == 0:
print("Loaded ckpt in step 1: ", num)
time.sleep(1)
if args_opt.tb_dir is not None and rank == device_num - 1:
os.makedirs(args_opt.tb_dir, exist_ok=True)
summary_writer = SummaryWriter(args_opt.tb_dir)
os.system(f'chomd 777 -R {args_opt.tb_dir}')
else:
summary_writer = None
# Dataset loading mindrecord files
ds, ds_eval = create_dataset(config.batch_size * micro_interleaved_size, data_path=args_opt.code_data,
args_opt=args_opt, data_start_index=0,
eod_reset=config.eod_reset, full_batch=bool(args_opt.full_batch),
eod_id=args_opt.eod_id,
device_num=device_num, rank=rank, epoch=epoch_num,
train_and_eval=bool(args_opt.train_and_eval_mode), val_ratio=0.001)
actual_epoch_num = int(ds.get_dataset_size() / args_opt.sink_size)
callback = [
TimeMonitor(args_opt.sink_size),
]
update_cell = DynamicLossScaleUpdateCell(loss_scale_value=loss_scale_value, scale_factor=2, scale_window=1000)
pangu_alpha_with_grads = PanguAlphaTrainOneStepWithLossScaleCell(
pangu_alpha_with_loss, optimizer=optimizer, scale_update_cell=update_cell, enable_global_norm=True,
config=config)
if ds_eval:
ppl_metric = PPLMetric(config.seq_length)
validation_loss = ValidationLoss(eod_token=args_opt.eod_id)
model = Model(pangu_alpha_with_grads, eval_network=pangu_alpha_with_loss,
metrics={"ppl": ppl_metric, "validation_loss": validation_loss})
callback.append(
EvalCallBack(
model=model,
eval_dataset=ds_eval,
ppl_metric=ppl_metric,
validation_loss=validation_loss,
print_per_step=10,
has_trained_step=args_opt.has_trained_steps,
local_rank=rank,
rank_size=device_num,
tb_writer=summary_writer
)
)
else:
model = Model(pangu_alpha_with_grads)
if args_opt.load_ckpt_epoch > 0:
print("===build model and load ckpt")
time_stamp = datetime.datetime.now()
print(f"time stamp {time_stamp.strftime('%Y.%m.%d-%H:%M:%S')} before building", flush=True)
model.build(train_dataset=ds, sink_size=args_opt.sink_size, epoch=actual_epoch_num)
time_stamp = datetime.datetime.now()
print(f"time stamp {time_stamp.strftime('%Y.%m.%d-%H:%M:%S')} before loading ckpt", flush=True)
net_not_load = load_param_into_net(pangu_alpha_with_loss, param_dict)
opt_not_load = load_param_into_net(optimizer, param_dict)
os.mkdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/2/rank_{rank}')
while True:
num = len(os.listdir(f'/home/work/sfs/cache/{os.environ["BATCH_JOB_ID"]}/2'))
if num == device_num:
break
if rank % 64 == 0:
print("Loaded ckpt in step 2: ", num)
time.sleep(1)
callback.append(
LossCallBack(
name=args_opt.ckpt_name_prefix,
dataset_size=args_opt.sink_size,
local_rank=rank,
rank_size=device_num,
has_trained_epoch=args_opt.has_trained_epoches,
has_trained_step=args_opt.has_trained_steps,
micro_size=args_opt.micro_size * micro_interleaved_size,
tb_writer=summary_writer,
)
)
if not args_opt.profiling:
add_checkpoint_callback_policy(args_opt, callback, rank)
if args_opt.incremental_training:
strategy = model.infer_train_layout(train_dataset=ds, sink_size=args_opt.sink_size)
print("======start load_distributed checkpoint", flush=True)
# For 2.6B and 13B models, the number of ckpt files is 512.
ckpt_file_list = [os.path.join(args_opt.load_ckpt_path, f"filerted_{ckpt_rank}.ckpt") for ckpt_rank in
range(0, 512)]
print(f"Loading from path {ckpt_file_list[0]}", flush=True)
load_distributed_checkpoint(model.train_network, ckpt_file_list, strategy)
print("Dataset size: {}, actual_epoch_num: {}".format(ds.get_dataset_size(), actual_epoch_num), flush=True)
try:
model.train(10 if args_opt.profiling else actual_epoch_num, ds, callbacks=callback,
sink_size=args_opt.sink_size, dataset_sink_mode=True)
finally:
if args_opt.profiling:
jobid = os.environ["BATCH_JOB_ID"]
profiler.analyse()
rank_id = rank
if context.get_context("save_graphs"):
mox.file.make_dirs("s3://wudao-1/yyf/graphs_" + jobid)
mox.file.copy_parallel(src_url="/cache/graphs_of_device_id_" + str(rank_id),
dst_url="s3://wudao-1/yyf/graphs_" + jobid + "/" + str(rank_id))
if rank_id % 8 == 0:
mox.file.make_dirs("s3://wudao-1/yyf/profiler_" + jobid)
mox.file.copy_parallel(src_url="/cache/profiler_data",
dst_url="s3://wudao-1/yyf/profiler_" + jobid + "/" + str(rank_id)) |
Load checkpoint process. | def restore_checkpoint(args_param, sink_size, dataset, model, network, epoch):
r"""
Load checkpoint process.
"""
print("======start single checkpoint", flush=True)
ckpt_name = args_param.ckpt_name_prefix
ckpt_pattern = os.path.join(args_param.save_checkpoint_path, "rank_{}".format(D.get_rank()), f"{ckpt_name}*.ckpt")
ckpt_all_files = glob.glob(ckpt_pattern)
if not ckpt_all_files:
print(f"There is no ckpt file in {args_param.save_checkpoint_path}, "
f"current ckpt_files found is {ckpt_all_files} "
f"with pattern {ckpt_pattern}, so skip the loading.")
return
ckpt_exp_pattern = os.path.join(args_param.save_checkpoint_path, "rank_{}".format(D.get_rank()),
f"{ckpt_name}*_breakpoint.ckpt")
ckpt_exp_files = glob.glob(ckpt_exp_pattern)
ckpt_files = []
for file in ckpt_all_files:
if file not in ckpt_exp_files:
ckpt_files.append(file)
if not ckpt_files:
print(f"There is no ckpt file in {args_param.save_checkpoint_path}, "
f"current ckpt_files found is {ckpt_files} "
f"with pattern {ckpt_pattern}, so skip the loading.")
return
ckpt_files.sort(key=os.path.getmtime, reverse=True)
time_stamp = datetime.datetime.now()
print(f"time stamp {time_stamp.strftime('%Y.%m.%d-%H:%M:%S')} pre trained ckpt model {ckpt_files} loading",
flush=True)
# Load checkpoint files latest file
print(f'Start to load from {ckpt_files[0]}')
param_dict = load_checkpoint(ckpt_files[0])
if param_dict.get("epoch_num") and param_dict.get("step_num"):
args_param.has_trained_epoches = int(param_dict["epoch_num"].data.asnumpy())
args_param.has_trained_steps = int(param_dict["step_num"].data.asnumpy())
model.build(train_dataset=dataset, sink_size=sink_size, epoch=epoch)
load_param_into_net(network, param_dict) |
Get exception checkpoint based on restore ranks
Args:
args_param: training model parameters
Returns: exception checkpoint list | def get_exception_checkpoints(args_param):
"""
Get exception checkpoint based on restore ranks
Args:
args_param: training model parameters
Returns: exception checkpoint list
"""
print("======start exception checkpoint", flush=True)
restore_ranks = os.getenv("RESTORE_RANKS")
if not restore_ranks:
return None
restore_rank_list = list(map(int, restore_ranks.split(",")))
ckpt_file_list = []
ckpt_name = args_param.ckpt_name_prefix
for ckpt_rank in restore_rank_list:
ckpt_pattern = os.path.join(args_param.save_checkpoint_path,
f"rank_{ckpt_rank}",
f"{ckpt_name}*_breakpoint.ckpt")
ckpt_files = glob.glob(ckpt_pattern)
if not ckpt_files:
print(
f"There is no ckpt file in {args_param.save_checkpoint_path}, "
f"current ckpt_files found is {ckpt_files} "
f"with pattern {ckpt_pattern}, so skip the loading.")
return None
ckpt_files.sort(key=os.path.getmtime, reverse=True)
ckpt_file_list.append(ckpt_files[0])
print(f"checkpoint file {ckpt_file_list}")
return ckpt_file_list |
Checkpoint exception checkpoints size.
Args:
ckpt_file_list: exception checkpoints
Returns: result of exception checkpoints size check. | def check_exception_checkpoints(ckpt_file_list):
"""
Checkpoint exception checkpoints size.
Args:
ckpt_file_list: exception checkpoints
Returns: result of exception checkpoints size check.
"""
ckpt_size_list = []
for ckpt_file in ckpt_file_list:
ckpt_size_list.append(os.path.getsize(ckpt_file))
if len(set(ckpt_size_list)) > 1:
return False
return True |
Restore exception checkpoint.
Args:
args_param: training job params
sink_size: training job sink size
dataset: dataset for training
model: model
network: pangu_alpha network
epoch: training epoch
Returns: load exception checkpoint success or not. | def restore_exception_checkpoint(args_param, sink_size, dataset, model, network, epoch):
"""
Restore exception checkpoint.
Args:
args_param: training job params
sink_size: training job sink size
dataset: dataset for training
model: model
network: pangu_alpha network
epoch: training epoch
Returns: load exception checkpoint success or not.
"""
if os.getenv("RESTORE_RANKS") == "-1":
return False
ckpt_file_list = get_exception_checkpoints(args_param)
restore_flag = False
if ckpt_file_list:
restore_flag = check_exception_checkpoints(ckpt_file_list)
if not restore_flag:
return False
ckpt_name = args_param.ckpt_name_prefix
restore_ranks_map = os.getenv("RESTORE_RANKS_MAP")
if not restore_ranks_map:
return False
try:
print("whether run into load process")
restore_ranks_map_json = json.loads(restore_ranks_map)
map_rank_id = D.get_rank()
for key in restore_ranks_map_json.keys():
key_list = list(key.split(","))
if str(D.get_rank()) in key_list:
map_rank_id = restore_ranks_map_json.get(key)
print(f"loading map rank id {map_rank_id}")
ckpt_pattern = os.path.join(args_param.save_checkpoint_path,
f"rank_{map_rank_id}",
f"{ckpt_name}*breakpoint.ckpt")
ckpt_files = glob.glob(ckpt_pattern)
ckpt_files.sort(key=os.path.getmtime, reverse=True)
print(f" checkpoint files {ckpt_files[0]}")
param_dict = load_checkpoint(ckpt_files[0])
print(f" checkpoint param dict epoch num {param_dict.get('epoch_num')}")
if param_dict.get("epoch_num") and param_dict.get("step_num"):
args_param.has_trained_epoches = int(
param_dict["epoch_num"].data.asnumpy())
args_param.has_trained_steps = int(
param_dict["step_num"].data.asnumpy())
# Load checkpoint files
model.build(train_dataset=dataset, sink_size=sink_size, epoch=epoch)
load_param_into_net(network, param_dict)
except TypeError:
return False
else:
return True |
Set prarllel context in pipeline training process | def set_pipeline_parallel_context(args_opt):
"""
Set prarllel context in pipeline training process
"""
D.init()
device_num = D.get_group_size()
rank_id = D.get_rank()
print("rank_id is {}, device_num is {}".format(rank_id, device_num))
context.reset_auto_parallel_context()
context.set_auto_parallel_context(
parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, gradients_mean=False,
full_batch=bool(args_opt.full_batch), loss_repeated_mean=True,
device_num=device_num, enable_parallel_optimizer=bool(args_opt.optimizer_shard),
pipeline_stages=args_opt.stage_num)
set_algo_parameters(elementwise_op_strategy_follow=True)
_set_multi_subgraphs()
return rank_id, device_num |
The main training process in pipeline. | def run_train_pipeline(args_opt):
r"""The main training process in pipeline."""
# Set hccl connect time
os.environ['HCCL_CONNECT_TIMEOUT'] = "6000"
context.set_context(save_graphs=False, mode=context.GRAPH_MODE, device_target=args_opt.device_target)
if args_opt.profiling:
profiler = Profiler(output_path="./profiler_data")
context.set_context(variable_memory_max_size="30GB")
rank_id = 0
device_num = 1
if args_opt.distribute == "true":
rank_id, device_num = set_pipeline_parallel_context(args_opt)
# copy data from the cloud to the /cache/Data
cache_url = '/cache/Data/'
eval_cache_url = '/cache/EvalData/'
if not args_opt.offline:
download_data(src_data_url=args_opt.data_url, tgt_data_path=cache_url, rank=rank_id)
download_data(src_data_url=args_opt.eval_data_url, tgt_data_path=eval_cache_url, rank=rank_id)
model_parallel_num = args_opt.op_level_model_parallel_num
stage_device_num = int(device_num / args_opt.stage_num)
data_parallel_num = int(stage_device_num / model_parallel_num)
print("Topology:", model_parallel_num, data_parallel_num, stage_device_num)
if data_parallel_num <= 1 and args_opt.optimizer_shard == 1:
raise ValueError("The dp must large than 1 when applying optimizer shard.")
per_batch_size = args_opt.per_batch_size
batch_size = per_batch_size * data_parallel_num * args_opt.micro_size
parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num,
model_parallel=model_parallel_num,
pipeline_stage=args_opt.stage_num,
micro_batch_num=args_opt.micro_size,
optimizer_shard=bool(args_opt.optimizer_shard),
vocab_emb_dp=bool(args_opt.word_emb_dp),
recompute=True,
)
config = PanguAlphaConfig(
batch_size=batch_size // parallel_config.micro_batch_num,
num_heads=args_opt.num_heads,
hidden_size=args_opt.embedding_size,
seq_length=args_opt.seq_length,
vocab_size=args_opt.vocab_size,
num_layers=args_opt.num_layers,
ffn_hidden_size=args_opt.embedding_size * 4,
eod_token=args_opt.eod_id,
load_ckpt_path=args_opt.load_ckpt_path,
param_init_type=mstype.float32
if args_opt.param_init_type == "fp32"
else mstype.float16,
enable_offload=bool(args_opt.opt_offload),
parallel_config=parallel_config,
apply_scale_normalization=args_opt.apply_scale_normalization,
)
print("===config is: ", config, flush=True)
pangu_alpha = PanguAlphaModel(config=config)
loss = CrossEntropyLoss(config.parallel_config.dp_mp_config)
pangu_alpha_with_loss_net = PipelineCell(PanGUAlphaWithLoss(config, pangu_alpha, loss),
config.parallel_config.micro_batch_num)
pangu_alpha_with_loss = _VirtualDatasetCell(pangu_alpha_with_loss_net)
print("=====args_opt is: ", args_opt, flush=True)
lr = LearningRate(learning_rate=args_opt.start_lr, end_learning_rate=args_opt.end_lr,
warmup_steps=args_opt.warmup_step, decay_steps=args_opt.decay_steps)
params = pangu_alpha.infer_param_pipeline_stage()
group_params = set_weight_decay(params)
if args_opt.optimizer == "lamb":
optimizer = nn.Lamb(group_params, learning_rate=lr)
elif args_opt.opt_offload:
optimizer = AdamWeightDecayOp(group_params, learning_rate=lr, eps=1e-8, beta1=0.9, beta2=0.95,
param_init_type=config.param_init_type)
else:
optimizer = nn.AdamWeightDecay(group_params, learning_rate=lr, beta1=0.9, beta2=0.95, eps=1e-8)
ds = create_dataset(config.batch_size * parallel_config.micro_batch_num, data_path=args_opt.code_data,
device_num=stage_device_num, args_opt=args_opt,
rank=rank_id % stage_device_num, eod_reset=True, data_start_index=0,
full_batch=context.get_auto_parallel_context("full_batch"),
column_name=args_opt.data_column_name)
epoch_num = args_opt.epoch_size
step_per_epoch = ds.get_dataset_size()
callback_size = args_opt.sink_size
actual_epoch_num = int(epoch_num * step_per_epoch / callback_size)
callback = [
TimeMonitor(callback_size),
LossCallBack(
args_opt.ckpt_name_prefix,
callback_size,
rank_id,
device_num,
micro_size=parallel_config.micro_batch_num,
tb_dir=args_opt.tb_dir,
),
]
loss_scale_value = math.pow(2, 32)
update_cell = DynamicLossScaleUpdateCell(loss_scale_value=loss_scale_value, scale_factor=2, scale_window=1000)
pangu_alpha_with_grads = PanguAlphaTrainPipelineWithLossScaleCell(
pangu_alpha_with_loss, optimizer=optimizer, config=config, scale_update_cell=update_cell)
if args_opt.train_and_eval_mode:
ds_eval = create_dataset(config.batch_size * parallel_config.micro_batch_num, data_path=eval_cache_url,
args_opt=args_opt,
device_num=stage_device_num, rank=rank_id % stage_device_num, eod_reset=True,
data_start_index=0, full_batch=bool(args_opt.full_batch),
column_name=args_opt.data_column_name,
num_samples=args_opt.eval_steps * config.batch_size)
ppl_metric = PPLMetric(config.seq_length)
pangu_alpha_with_loss_eval_net = _VirtualDatasetCell(PanGUAlphaWithLoss(config, pangu_alpha, loss))
model = Model(pangu_alpha_with_grads, eval_network=pangu_alpha_with_loss_eval_net, metrics={"ppl": ppl_metric})
model.build(ds, ds_eval, sink_size=callback_size)
eval_callback = EvalCallBack(model, ds_eval, ppl_metric)
callback.append(eval_callback)
else:
model = Model(pangu_alpha_with_grads)
if args_opt.pre_trained:
flag = restore_exception_checkpoint(args_opt, callback_size, ds, model,
pangu_alpha_with_grads, epoch=actual_epoch_num)
if not flag:
restore_checkpoint(args_opt, callback_size, ds, model, pangu_alpha_with_grads, epoch=actual_epoch_num)
callback = [
TimeMonitor(callback_size),
LossCallBack(
args_opt.ckpt_name_prefix,
callback_size,
rank_id,
device_num,
args_opt.has_trained_epoches,
args_opt.has_trained_steps,
tb_dir=args_opt.tb_dir,
),
]
# add_checkpoint_callback_policy(args_opt, callback, rank_id)
print("------ train start -------")
model.train(10 if args_opt.profiling else actual_epoch_num, ds, callbacks=callback,
sink_size=callback_size, dataset_sink_mode=True)
if args_opt.profiling:
profiler.analyse() |
get_op_support_info | def get_op_support_info(input_x, input_gamma, input_beta,
output_y, output_mean, output_variance,
begin_norm_axis, begin_params_axis,
epsilon=1e-12, kernel_name="layer_norm",
impl_mode="high_performance"):
"""
get_op_support_info
"""
format_x = input_x.get("format").upper()
shape_x = input_x.get("shape")
ori_shape_x = input_x.get("ori_shape")
begin_norm_axis = shape_util.axis_check(len(shape_x), begin_norm_axis)
begin_params_axis = shape_util.axis_check(len(shape_x), begin_params_axis)
axis_split_matrix = []
if format_x in ("ND", "NCHW", "NHWC", "NC1HWC0"):
if begin_params_axis == 0:
for i in range(begin_norm_axis):
split_0 = [SplitInput([0, [i], [-1], [-1]], [1, [i], [-1], [-1]], [2, [i], [-1], [-1]]),
SplitOutput([0, [i]], [1, [i]], [2, [i]])]
axis_split_matrix.append(split_0)
else:
if begin_norm_axis <= begin_params_axis:
for i in range(begin_norm_axis):
split_0 = [SplitInput([0, [i], [-1], [-1]]),
SplitOutput([0, [i]], [1, [i]], [2, [i]])]
axis_split_matrix.append(split_0)
else:
for i in range(begin_params_axis):
split_0 = [SplitInput([0, [i], [-1], [-1]]),
SplitOutput([0, [i]], [1, [i]], [2, [i]])]
axis_split_matrix.append(split_0)
elif format_x == "FRACTAL_NZ":
index_list = tuple(index for index, _ in enumerate(ori_shape_x))
start_axis = min(begin_norm_axis, begin_params_axis)
no_split_axis = index_list[start_axis:]
no_split_axis = to_frac_z_axis(ori_shape_x, no_split_axis)
for i in range(len(shape_x)):
if i not in no_split_axis:
split_0 = [SplitInput([0, [i], [-1], [-1]]),
SplitOutput([0, [i]], [1, [i]], [2, [i]])]
axis_split_matrix.append(split_0)
else:
axis_split_matrix = None
axis_reduce_list = None
op_cal_info_in_json = get_op_cal_info(axis_split_matrix, axis_reduce_list, 0, 0)
return op_cal_info_in_json |
division_sixteen | def _division_sixteen(shape, begin_norm_axis):
"""
division_sixteen
"""
if len(shape) < 2:
if shape[-1] == 0:
error_detail = "value of shape_x is illegal"
error_manager_vector.raise_err_input_shape_invalid("layer_norm", "input_x",
error_detail)
return False
if shape[-1] == 0 or shape[-2] == 0:
error_detail = "value of shape_x is illegal"
error_manager_vector.raise_err_input_shape_invalid("layer_norm", "input_x",
error_detail)
is_reduce_last = begin_norm_axis in (-1, len(shape) - 1)
# if shape[-2] % constant.C0_SIZE == 0:
# if shape[-1] % constant.C0_SIZE == 0 or (shape[-1] % constant.C0_SIZE != 0 and is_reduce_last):
# return True
return False |
select format dynamically | def op_select_format(input_x, input_gamma, input_beta,
output_y, output_mean, output_variance,
begin_norm_axis, begin_params_axis,
kernel_name="layer_norm"):
"""
select format dynamically
"""
shape_x = input_x.get("ori_shape")
shape_x = shape_util.scalar2tensor_one(shape_x)
shape_gamma = input_gamma.get("ori_shape")
shape_gamma = shape_util.scalar2tensor_one(shape_gamma)
if begin_params_axis == 0:
if len(shape_gamma) >= 2 or (not _division_sixteen(shape_x, begin_norm_axis)):
input0 = util_select_op_base.gen_param(classify="input0", name="x",
datatype="float16,float16,float16,float16,"
"float,float,float,float",
format="NCHW,NC1HWC0,NHWC,ND,NCHW,NC1HWC0,NHWC,ND")
input1 = util_select_op_base.gen_param(classify="input1", name="gamma",
datatype="float16,float16,float16,float16,float,"
"float,float,float",
format="NCHW,NC1HWC0,NHWC,ND,NCHW,NC1HWC0,NHWC,ND")
input2 = util_select_op_base.gen_param(classify="input2", name="beta",
datatype="float16,float16,float16,float16,float,"
"float,float,float",
format="NCHW,NC1HWC0,NHWC,ND,NCHW,NC1HWC0,NHWC,ND")
output0 = util_select_op_base.gen_param(classify="output0", name="y",
datatype="float16,float16,float16,float16,float,"
"float,float,float",
format="NCHW,NC1HWC0,NHWC,ND,NCHW,NC1HWC0,NHWC,ND")
output1 = util_select_op_base.gen_param(classify="output1", name="mean",
datatype="float16,float16,float16,float16,float,"
"float,float,float",
format="NCHW,NC1HWC0,NHWC,ND,NCHW,NC1HWC0,NHWC,ND")
output2 = util_select_op_base.gen_param(classify="output2", name="variance",
datatype="float16,float16,float16,float16,float,"
"float,float,float",
format="NCHW,NC1HWC0,NHWC,ND,NCHW,NC1HWC0,NHWC,ND")
else:
input0 = util_select_op_base.gen_param(classify="input0", name="x",
datatype="float16,float,float16,float16,float16,"
"float16,float,float,float,float",
format="FRACTAL_NZ,FRACTAL_NZ,NCHW,NC1HWC0,NHWC,"
"ND,NCHW,NC1HWC0,NHWC,ND")
input1 = util_select_op_base.gen_param(classify="input1", name="gamma",
datatype="float16,float,float16,float16,float16,"
"float16,float,float,float,float",
format="ND,ND,NCHW,NC1HWC0,NHWC,ND,NCHW,NC1HWC0,"
"NHWC,ND")
input2 = util_select_op_base.gen_param(classify="input2", name="beta",
datatype="float16,float,float16,float16,float16,"
"float16,float,float,float,float",
format="ND,ND,NCHW,NC1HWC0,NHWC,ND,NCHW,NC1HWC0,"
"NHWC,ND")
output0 = util_select_op_base.gen_param(classify="output0", name="y",
datatype="float16,float,float16,float16,float16,"
"float16,float,float,float,float",
format="FRACTAL_NZ,FRACTAL_NZ,NCHW,NC1HWC0,NHWC,ND,"
"NCHW,NC1HWC0,NHWC,ND")
output1 = util_select_op_base.gen_param(classify="output1", name="mean",
datatype="float16,float,float16,float16,float16,"
"float16,float,float,float,float",
format="ND,ND,NCHW,NC1HWC0,NHWC,ND,NCHW,NC1HWC0,"
"NHWC,ND")
output2 = util_select_op_base.gen_param(classify="output2", name="variance",
datatype="float16,float,float16,float16,float16,"
"float16,float,float,float,float",
format="ND,ND,NCHW,NC1HWC0,NHWC,ND,NCHW,NC1HWC0,"
"NHWC,ND")
else:
if len(shape_gamma) >= 2 or (not _division_sixteen(shape_x, begin_norm_axis)):
input0 = util_select_op_base.gen_param(classify="input0", name="x",
datatype="float16,float16,float16,"
"float,float,float",
format="NCHW,NHWC,ND,NCHW,NHWC,ND")
input1 = util_select_op_base.gen_param(classify="input1", name="gamma",
datatype="float16,float16,float16,"
"float,float,float",
format="NCHW,NHWC,ND,NCHW,NHWC,ND")
input2 = util_select_op_base.gen_param(classify="input2", name="beta",
datatype="float16,float16,float16,"
"float,float,float",
format="NCHW,NHWC,ND,NCHW,NHWC,ND")
output0 = util_select_op_base.gen_param(classify="output0", name="y",
datatype="float16,float16,float16,"
"float,float,float",
format="NCHW,NHWC,ND,NCHW,NHWC,ND")
output1 = util_select_op_base.gen_param(classify="output1", name="mean",
datatype="float16,float16,float16,"
"float,float,float",
format="NCHW,NHWC,ND,NCHW,NHWC,ND")
output2 = util_select_op_base.gen_param(classify="output2", name="variance",
datatype="float16,float16,float16,"
"float,float,float",
format="NCHW,NHWC,ND,NCHW,NHWC,ND")
else:
input0 = util_select_op_base.gen_param(classify="input0", name="x",
datatype="float16,float,float16,float16,"
"float16,float,float,float",
format="FRACTAL_NZ,FRACTAL_NZ,NCHW,NHWC,"
"ND,NCHW,NHWC,ND")
input1 = util_select_op_base.gen_param(classify="input1", name="gamma",
datatype="float16,float,float16,float16,"
"float16,float,float,float",
format="ND,ND,NCHW,NHWC,ND,NCHW,"
"NHWC,ND")
input2 = util_select_op_base.gen_param(classify="input2", name="beta",
datatype="float16,float,float16,float16,"
"float16,float,float,float",
format="ND,ND,NCHW,NHWC,ND,NCHW,"
"NHWC,ND")
output0 = util_select_op_base.gen_param(classify="output0", name="y",
datatype="float16,float,float16,float16,"
"float16,float,float,float",
format="FRACTAL_NZ,FRACTAL_NZ,NCHW,NHWC,ND,"
"NCHW,NHWC,ND")
output1 = util_select_op_base.gen_param(classify="output1", name="mean",
datatype="float16,float,float16,float16,"
"float16,float,float,float",
format="ND,ND,NCHW,NHWC,ND,NCHW,"
"NHWC,ND")
output2 = util_select_op_base.gen_param(classify="output2", name="variance",
datatype="float16,float,float16,float16,"
"float16,float,float,float",
format="ND,ND,NCHW,NHWC,ND,NCHW,"
"NHWC,ND")
param_list = [input0, input1, input2, output0, output1, output2]
param_dynamic_in_json = util_select_op_base.get_dynamic_param_in_json(param_list)
return param_dynamic_in_json |
judge the format is fractal NZ
Parameters
----------
ori_shape: list or tuple
original shape of input
ori_axis: list or tuple
original axis of original shape to operate
Returns
-------
output: list
axis of the fractal Nz shape | def to_frac_z_axis(ori_shape, ori_axis):
"""
judge the format is fractal NZ
Parameters
----------
ori_shape: list or tuple
original shape of input
ori_axis: list or tuple
original axis of original shape to operate
Returns
-------
output: list
axis of the fractal Nz shape
"""
frac_z_axis = list(ori_axis)
shape_len = len(ori_shape)
axis_count = len(frac_z_axis)
axis_negative_1 = shape_len - 1
axis_negative_2 = shape_len - 2
for i in range(axis_count):
axis_index = (frac_z_axis[i] + shape_len) % shape_len
if axis_index == axis_negative_1:
if frac_z_axis[i] > shape_len - 2:
frac_z_axis[i] = axis_index - 1
frac_z_axis.append(axis_index + 1)
else:
frac_z_axis[i] = axis_index - 1
frac_z_axis.append(axis_index + 2)
elif axis_index == axis_negative_2:
frac_z_axis[i] = axis_index + 1
frac_z_axis.append(axis_index + 2)
else:
frac_z_axis[i] = axis_index
return frac_z_axis |
broadcast_nz | def _broadcast_nz(tensor, shape):
"""
broadcast_nz
"""
broadcast_axes = []
src_shape = shape_util.shape_to_list(tensor.shape)
for i, _ in enumerate(shape):
if shape[i] != src_shape[i]:
broadcast_axes.append(i)
if len(broadcast_axes) == 2 and \
broadcast_axes[1] - broadcast_axes[0] != 1 and \
broadcast_axes[1] + 1 == len(shape):
temp_shape = src_shape[:-1] + [shape[-1]]
tensor = tbe.broadcast(tensor, temp_shape)
tensor = tbe.broadcast(tensor, shape)
return tensor |
judge case using cube to handle reducesum
only supported follow case in Ascend910 and Ascend710:
ori_shape: ((batch), m, 1024(768)), "shape": ((batch), 64(48), m//16, 16, 16), "dtype": fp16 | def _check_vector_to_cube(dtype, ori_shape_x, shape_x, begin_norm_axis, impl_mode):
"""
judge case using cube to handle reducesum
only supported follow case in Ascend910 and Ascend710:
ori_shape: ((batch), m, 1024(768)), "shape": ((batch), 64(48), m//16, 16, 16), "dtype": fp16
"""
def _check_shape_and_dtype():
if dtype != "float16":
return False
if len(ori_shape_x) not in (2, 3) or ori_shape_x[-1] not in (1024, 768, 96, 384, 192, 128, 512, 256):
return False
if len(shape_x) not in (4, 5) or shape_x[-4] not in (64, 48, 6, 12, 24, 16, 32):
return False
if "Ascend910" not in get_soc_spec(SOC_VERSION) and "Ascend710" not in get_soc_spec(SOC_VERSION):
return False
if begin_norm_axis != (len(ori_shape_x) - 1):
return False
return True
return impl_mode == "high_performance" and _check_shape_and_dtype() |
DSL description of the layernorm operator's mathematical calculation process for non_aligned scene | def nz_non_aligned(input_x, input_gamma, input_beta,
output_y, output_mean, output_variance,
begin_norm_axis, begin_params_axis,
ori_shape, epsilon, kernel_name="layer_norm",
impl_mode="high_performance"):
"""
DSL description of the layernorm operator's mathematical calculation process for non_aligned scene
"""
shape_x = shape_util.shape_to_list(input_x.shape)
dtype = input_x.dtype.lower()
cast_dtype = "float16"
if dtype == "float16" and \
((tbe_platform.cce_conf.api_check_support
("te.lang.cce.vexp", "float32") and
impl_mode == "high_performance") or
impl_mode == "high_precision"):
cast_dtype = "float32"
input_x = tbe.cast_to(input_x, "float32")
input_gamma = tbe.cast_to(input_gamma, "float32")
input_beta = tbe.cast_to(input_beta, "float32")
else:
input_x = tbe.vadds(input_x, 0)
# Calculate the scaling ratio of the average
reduce_elts = 1.0
index_list = tuple(index for index, _ in enumerate(ori_shape))
reduce_axis = index_list[begin_norm_axis:]
for i in reduce_axis:
reduce_elts *= ori_shape[i]
reduce_axis = to_frac_z_axis(ori_shape, reduce_axis)
mean_cof = reduce_elts ** (-1)
# DSL description of the mean calculation process
with tvm.tag_scope("tail_block_pretreatment"):
lambda_func = lambda *indice: tvm.const(0, input_x.dtype)
temp = tvm.compute(input_x.shape, lambda_func, name="tail_block_pretreatment")
input_x = tbe.vadd(input_x, temp)
mean_muls = tbe.vmuls(input_x, mean_cof)
mean = tbe.sum(mean_muls, axis=reduce_axis, keepdims=True)
mean_square = tbe.vmul(mean, mean)
x_square = tbe.vmul(input_x, input_x)
x_square = tbe.vmuls(x_square, mean_cof)
x_square_mean = tbe.sum(x_square, axis=reduce_axis, keepdims=True)
variance = tbe.vsub(x_square_mean, mean_square)
# DSL description of the normalize calculation process
mean_normalize_broadcast = _broadcast_nz(mean, shape_x)
normalize_sub = tbe.vsub(input_x, mean_normalize_broadcast)
epsilon = tvm.const(epsilon, dtype=cast_dtype)
normalize_add = tbe.vadds(variance, epsilon)
normalize_log = tbe.vlog(normalize_add)
normalize_log_mul = \
tbe.vmuls(normalize_log, tvm.const(-0.5, dtype=cast_dtype))
normalize_exp = tbe.vexp(normalize_log_mul)
variance_normalize_broadcast = _broadcast_nz(normalize_exp, shape_x)
normalize_mul = tbe.vmul(normalize_sub, variance_normalize_broadcast)
# DSL description of the scale and translate calculation process
gamma_broadcast = _broadcast_nz(input_gamma, shape_x)
beta_broadcast = _broadcast_nz(input_beta, shape_x)
scale_mul = tbe.vmul(gamma_broadcast, normalize_mul)
res = tbe.vadd(scale_mul, beta_broadcast)
if dtype == "float16" and \
((tbe_platform.cce_conf.api_check_support
("te.lang.cce.vexp", "float32") and
impl_mode == "high_performance") or
impl_mode == "high_precision"):
mean = tbe.cast_to(mean, "float16")
variance = tbe.cast_to(variance, "float16")
res = tbe.cast_to(res, "float16")
return mean, variance, res |
DSL description of the layernorm operator's mathematical calculation process
Parameters
----------
input_x: TVM tensor
the placeholder of x input data
input_gamma: TVM tensor
the placeholder of gamma input data
input_beta: TVM tensor
the placeholder of beta input data
output_data: dict
shape and dtype of output
begin_norm_axis: int
The first normalization dimension: normalization will be
performed along dimensions `begin_norm_axis : rank(inputs)`
begin_params_axis: int
The first parameter (beta, gamma) dimension: scale
and centering parameters will have dimensions
`begin_params_axis : rank(inputs)` and will be broadcast with the
normalized inputs accordingly.
epsilon: float,
Minimum positive number greater than 0
kernel_name: str
cce kernel name, default value is "cce_layernorm"
Returns
-------
res_tuple: tuple
(mean, variance, result) | def layer_norm_compute_nz(input_x, input_gamma, input_beta,
output_y, output_mean, output_variance,
begin_norm_axis, begin_params_axis,
ori_shape, epsilon, kernel_name="layer_norm",
impl_mode="high_performance"):
"""
DSL description of the layernorm operator's mathematical calculation process
Parameters
----------
input_x: TVM tensor
the placeholder of x input data
input_gamma: TVM tensor
the placeholder of gamma input data
input_beta: TVM tensor
the placeholder of beta input data
output_data: dict
shape and dtype of output
begin_norm_axis: int
The first normalization dimension: normalization will be
performed along dimensions `begin_norm_axis : rank(inputs)`
begin_params_axis: int
The first parameter (beta, gamma) dimension: scale
and centering parameters will have dimensions
`begin_params_axis : rank(inputs)` and will be broadcast with the
normalized inputs accordingly.
epsilon: float,
Minimum positive number greater than 0
kernel_name: str
cce kernel name, default value is "cce_layernorm"
Returns
-------
res_tuple: tuple
(mean, variance, result)
"""
shape_x = shape_util.shape_to_list(input_x.shape)
dtype = input_x.dtype.lower()
cast_dtype, cast_fp16_dtype = "float16", "float16"
cast_dtype_precision = dtype
if dtype == "float16" and \
((tbe_platform.cce_conf.api_check_support
("te.lang.cce.vexp", "float32") and
impl_mode == "high_performance") or
impl_mode == "high_precision"):
cast_dtype = "float32"
cast_dtype_precision = "float32"
input_x = tbe.cast_to(input_x, "float32")
input_gamma = tbe.cast_to(input_gamma, "float32")
input_beta = tbe.cast_to(input_beta, "float32")
# Calculate the scaling ratio of the average
reduce_elts = 1.0
index_list = tuple(index for index, _ in enumerate(ori_shape))
reduce_axis = index_list[begin_norm_axis:]
for i in reduce_axis:
reduce_elts *= ori_shape[i]
reduce_axis = to_frac_z_axis(ori_shape, reduce_axis)
mean_cof = reduce_elts ** (-1)
if impl_mode != "keep_fp16":
# DSL description of the mean calculation process
mean_muls = tbe.vmuls(input_x, mean_cof)
mean = tbe.sum(mean_muls, axis=reduce_axis, keepdims=True)
# DSL description of the variance calculation process
mean_variance_broadcast = _broadcast_nz(mean, shape_x)
variance_sub = tbe.vsub(input_x, mean_variance_broadcast)
variance_mul = tbe.vmul(variance_sub, variance_sub)
variance_muls = tbe.vmuls(variance_mul, mean_cof)
variance = tbe.sum(variance_muls, axis=reduce_axis, keepdims=True)
else:
# DSL description of the mean calculation process
x_sum = tbe.sum(input_x, axis=reduce_axis, keepdims=True)
mean = tbe.vmuls(x_sum, mean_cof)
# DSL description of the variance calculation process
mean_variance_broadcast = _broadcast_nz(mean, shape_x)
variance_sub = tbe.vsub(input_x, mean_variance_broadcast)
variance_mul = tbe.vmul(variance_sub, variance_sub)
variance_sum = tbe.sum(variance_mul, axis=reduce_axis, keepdims=True)
variance = tbe.vmuls(variance_sum, mean_cof)
# DSL description of the normalize calculation process
if impl_mode == "high_performance":
mean_normalize_broadcast = _broadcast_nz(mean, shape_x)
normalize_sub = tbe.vsub(input_x, mean_normalize_broadcast)
epsilon = tvm.const(epsilon, dtype=cast_dtype)
variance_normalize_broadcast = _broadcast_nz(variance, shape_x)
normalize_add = tbe.vadds(variance_normalize_broadcast, epsilon)
normalize_log = tbe.vlog(normalize_add)
normalize_log_mul = \
tbe.vmuls(normalize_log, tvm.const(-0.5, dtype=cast_dtype))
normalize_exp = tbe.vexp(normalize_log_mul)
normalize_mul = tbe.vmul(normalize_sub, normalize_exp)
elif impl_mode == "high_precision":
tesor_one = tbe.broadcast(tvm.const
(1, cast_dtype_precision),
shape_x)
mean_normalize_broadcast = _broadcast_nz(mean, shape_x)
normalize_sub = tbe.vsub(input_x, mean_normalize_broadcast)
variance_normalize_broadcast = _broadcast_nz(variance, shape_x)
epsilon = tvm.const(epsilon, dtype=cast_dtype_precision)
normalize_add = tbe.vadds(variance_normalize_broadcast, epsilon)
normalize_sqrt = tbe.vsqrt(normalize_add, 0)
normalize_rsqrt = tbe.vdiv(tesor_one, normalize_sqrt)
normalize_mul = tbe.vmul(normalize_sub, normalize_rsqrt)
else:
epsilon = tvm.const(epsilon, dtype=cast_fp16_dtype)
normalize_add = tbe.vadds(variance, epsilon)
normalize_log = tbe.vlog(normalize_add)
normalize_log_mul = \
tbe.vmuls(normalize_log, tvm.const(-0.5, dtype=cast_fp16_dtype))
normalize_exp = tbe.vexp(normalize_log_mul)
variance_normalize_broadcast = _broadcast_nz(normalize_exp, shape_x)
normalize_mul = tbe.vmul(variance_sub, variance_normalize_broadcast)
# DSL description of the scale and translate calculation process
if begin_params_axis == 0:
scale_mul = tbe.vmul(input_gamma, normalize_mul)
res = tbe.vadd(scale_mul, input_beta)
else:
gamma_broadcast = _broadcast_nz(input_gamma, shape_x)
beta_broadcast = _broadcast_nz(input_beta, shape_x)
scale_mul = tbe.vmul(gamma_broadcast, normalize_mul)
res = tbe.vadd(scale_mul, beta_broadcast)
if dtype == "float16" and \
((tbe_platform.cce_conf.api_check_support
("te.lang.cce.vexp", "float32") and
impl_mode == "high_performance") or
impl_mode == "high_precision"):
mean = tbe.cast_to(mean, "float16")
variance = tbe.cast_to(variance, "float16")
res = tbe.cast_to(res, "float16")
return mean, variance, res |
DSL description of the layernorm operator's mathematical calculation process
Parameters
----------
input_x: TVM tensor
the placeholder of x input data
input_gamma: TVM tensor
the placeholder of gamma input data
input_beta: TVM tensor
the placeholder of beta input data
output_data: dict
shape and dtype of output
begin_norm_axis: int
The first normalization dimension: normalization will be
performed along dimensions `begin_norm_axis : rank(inputs)`
begin_params_axis: int
The first parameter (beta, gamma) dimension: scale
and centering parameters will have dimensions
`begin_params_axis : rank(inputs)` and will be broadcast with the
normalized inputs accordingly.
epsilon: float,
Minimum positive number greater than 0
kernel_name: str
cce kernel name, default value is "cce_layernorm"
Returns
-------
res_tuple: tuple
(mean, variance, result) | def layer_norm_compute(input_x, input_gamma, input_beta,
output_y, output_mean, output_variance,
begin_norm_axis, begin_params_axis,
epsilon, kernel_name="layer_norm",
impl_mode="high_performance"):
"""
DSL description of the layernorm operator's mathematical calculation process
Parameters
----------
input_x: TVM tensor
the placeholder of x input data
input_gamma: TVM tensor
the placeholder of gamma input data
input_beta: TVM tensor
the placeholder of beta input data
output_data: dict
shape and dtype of output
begin_norm_axis: int
The first normalization dimension: normalization will be
performed along dimensions `begin_norm_axis : rank(inputs)`
begin_params_axis: int
The first parameter (beta, gamma) dimension: scale
and centering parameters will have dimensions
`begin_params_axis : rank(inputs)` and will be broadcast with the
normalized inputs accordingly.
epsilon: float,
Minimum positive number greater than 0
kernel_name: str
cce kernel name, default value is "cce_layernorm"
Returns
-------
res_tuple: tuple
(mean, variance, result)
"""
shape_x = shape_util.shape_to_list(input_x.shape)
dtype = input_x.dtype.lower()
cast_dtype, cast_fp16_dtype = "float16", "float16"
cast_dtype_precision = dtype
if dtype == "float16" and \
((tbe_platform.cce_conf.api_check_support
("te.lang.cce.vexp", "float32") and
impl_mode == "high_performance") or
impl_mode == "high_precision"):
cast_dtype = "float32"
cast_dtype_precision = "float32"
input_x = tbe.cast_to(input_x, "float32")
input_gamma = tbe.cast_to(input_gamma, "float32")
input_beta = tbe.cast_to(input_beta, "float32")
# Calculate the scaling ratio of the average
index_list = tuple(index for index, _ in enumerate(shape_x))
reduce_axis = index_list[begin_norm_axis:]
reduce_elts = 1.0
for i in reduce_axis:
reduce_elts *= shape_x[i]
mean_cof = reduce_elts ** (-1)
if impl_mode != "keep_fp16":
# DSL description of the mean calculation process
mean_muls = tbe.vmuls(input_x, mean_cof)
mean = tbe.sum(mean_muls, axis=reduce_axis, keepdims=True)
# DSL description of the variance calculation process
mean_variance_broadcast = tbe.broadcast(mean, shape_x)
variance_sub = tbe.vsub(input_x, mean_variance_broadcast)
variance_mul = tbe.vmul(variance_sub, variance_sub)
variance_muls = tbe.vmuls(variance_mul, mean_cof)
variance = tbe.sum(variance_muls, axis=reduce_axis, keepdims=True)
else:
# DSL description of the mean calculation process
x_sum = tbe.sum(input_x, axis=reduce_axis, keepdims=True)
mean = tbe.vmuls(x_sum, mean_cof)
# DSL description of the variance calculation process
mean_variance_broadcast = tbe.broadcast(mean, shape_x)
variance_sub = tbe.vsub(input_x, mean_variance_broadcast)
variance_mul = tbe.vmul(variance_sub, variance_sub)
variance_sum = tbe.sum(variance_mul, axis=reduce_axis, keepdims=True)
variance = tbe.vmuls(variance_sum, mean_cof)
# DSL description of the normalize calculation process
if impl_mode == "high_performance":
mean_normalize_broadcast = tbe.broadcast(mean, shape_x)
normalize_sub = tbe.vsub(input_x, mean_normalize_broadcast)
epsilon = tvm.const(epsilon, dtype=cast_dtype)
variance_normalize_broadcast = tbe.broadcast(variance, shape_x)
normalize_add = tbe.vadds(variance_normalize_broadcast, epsilon)
normalize_log = tbe.vlog(normalize_add)
normalize_log_mul = \
tbe.vmuls(normalize_log, tvm.const(-0.5, dtype=cast_dtype))
normalize_exp = tbe.vexp(normalize_log_mul)
normalize_mul = tbe.vmul(normalize_sub, normalize_exp)
elif impl_mode == "high_precision":
tesor_one = tbe.broadcast(tvm.const
(1, cast_dtype_precision),
shape_x)
mean_normalize_broadcast = tbe.broadcast(mean, shape_x)
normalize_sub = tbe.vsub(input_x, mean_normalize_broadcast)
variance_normalize_broadcast = tbe.broadcast(variance, shape_x)
epsilon = tvm.const(epsilon, dtype=cast_dtype_precision)
normalize_add = tbe.vadds(variance_normalize_broadcast, epsilon)
normalize_sqrt = tbe.vsqrt(normalize_add, 0)
normalize_rsqrt = tbe.vdiv(tesor_one, normalize_sqrt)
normalize_mul = tbe.vmul(normalize_sub, normalize_rsqrt)
else:
epsilon = tvm.const(epsilon, dtype=cast_fp16_dtype)
normalize_add = tbe.vadds(variance, epsilon)
normalize_log = tbe.vlog(normalize_add)
normalize_log_mul = \
tbe.vmuls(normalize_log, tvm.const(-0.5, dtype=cast_fp16_dtype))
normalize_exp = tbe.vexp(normalize_log_mul)
variance_normalize_broadcast = tbe.broadcast(normalize_exp, shape_x)
normalize_mul = tbe.vmul(variance_sub, variance_normalize_broadcast)
# DSL description of the scale and translate calculation process
if begin_params_axis == 0:
scale_mul = tbe.vmul(input_gamma, normalize_mul)
res = tbe.vadd(scale_mul, input_beta)
else:
gamma_broadcast = tbe.broadcast(input_gamma, shape_x)
beta_broadcast = tbe.broadcast(input_beta, shape_x)
scale_mul = tbe.vmul(gamma_broadcast, normalize_mul)
res = tbe.vadd(scale_mul, beta_broadcast)
if dtype == "float16" and \
((tbe_platform.cce_conf.api_check_support
("te.lang.cce.vexp", "float32") and
impl_mode == "high_performance") or
impl_mode == "high_precision"):
mean = tbe.cast_to(mean, "float16")
variance = tbe.cast_to(variance, "float16")
res = tbe.cast_to(res, "float16")
return mean, variance, res |
is_support_nz_non_aligned | def is_support_nz_non_aligned(ori_shape_x, begin_params_axis, impl_mode):
"""
is_support_nz_non_aligned
"""
if ori_shape_x[-1] % constant.C0_SIZE != 0:
if begin_params_axis != 0:
return True
return False |
layernorm operator interface implementation
calculating: x, gamma, beta
mean = np.mean(x, reduce_axis, keepdims=True)
variance = np.mean(np.power((x - mean),2), reduce_axis, keepdims=True)
result = gamma*((x - mean) / np.sqrt(variance + 0.001)) + beta
Parameters
----------
input_x : dict
shape and dtype of input x, only support float16, float32
input_gamma: dict
shape and dtype of input gamma, only support float16, float32
input_beta: dict
shape and dtype of input beta, only support float16, float32
output_y: dict
shape and dtype of output, only support float16, float32
begin_norm_axis: int
The first normalization dimension: normalization will be
performed along dimensions `begin_norm_axis : rank(inputs)`
begin_params_axis: int
The first parameter (beta, gamma) dimension: scale
and centering parameters will have dimensions
`begin_params_axis : rank(inputs)` and will be broadcast with the
normalized inputs accordingly.
epsilon: float,
Minimum positive number greater than 0
kernel_name: str
cce kernel name, default value is "layernorm"
Returns
-------
None | def layer_norm(input_x, input_gamma, input_beta,
output_y, output_mean, output_variance,
begin_norm_axis, begin_params_axis,
epsilon=1e-12, kernel_name="layer_norm",
impl_mode="high_performance"):
"""
layernorm operator interface implementation
calculating: x, gamma, beta
mean = np.mean(x, reduce_axis, keepdims=True)
variance = np.mean(np.power((x - mean),2), reduce_axis, keepdims=True)
result = gamma*((x - mean) / np.sqrt(variance + 0.001)) + beta
Parameters
----------
input_x : dict
shape and dtype of input x, only support float16, float32
input_gamma: dict
shape and dtype of input gamma, only support float16, float32
input_beta: dict
shape and dtype of input beta, only support float16, float32
output_y: dict
shape and dtype of output, only support float16, float32
begin_norm_axis: int
The first normalization dimension: normalization will be
performed along dimensions `begin_norm_axis : rank(inputs)`
begin_params_axis: int
The first parameter (beta, gamma) dimension: scale
and centering parameters will have dimensions
`begin_params_axis : rank(inputs)` and will be broadcast with the
normalized inputs accordingly.
epsilon: float,
Minimum positive number greater than 0
kernel_name: str
cce kernel name, default value is "layernorm"
Returns
-------
None
"""
shape_x = list(input_x.get("shape"))
input_gamma_shape = input_gamma.get("shape")
input_beta_shape = input_beta.get("shape")
ori_shape_x = list(input_x.get("ori_shape"))
input_format = input_x.get("format").upper()
input_gamma_format = input_gamma.get("format").upper()
input_beta_format = input_beta.get("format").upper()
para_check.check_shape(input_gamma_shape, param_name="input_gamma")
para_check.check_shape(input_beta_shape, param_name="input_beta")
para_check.check_shape(shape_x, param_name="input_x")
check_list = ("float16", "float32")
dtype = input_x.get("dtype").lower()
dtype_gamma = input_gamma.get("dtype").lower()
dtype_beta = input_gamma.get("dtype").lower()
para_check.check_dtype(dtype, check_list, param_name="input_x")
para_check.check_dtype(dtype_gamma, check_list, param_name="input_gamma")
para_check.check_dtype(dtype_beta, check_list, param_name="input_gamma")
shape_gamma = list(input_gamma.get("shape"))
shape_beta = list(input_beta.get("shape"))
flag_vector2cube = False
tik_support = if_tik_support(input_x, input_gamma, input_beta, output_y, output_mean,
output_variance, begin_norm_axis, begin_params_axis, epsilon)
if tik_support:
layer_normalize(input_x, input_gamma, input_beta,
output_y, output_mean, output_variance,
begin_norm_axis, begin_params_axis,
epsilon, kernel_name)
else:
if input_format == "FRACTAL_NZ":
begin_norm_axis = shape_util.axis_check(len(ori_shape_x), begin_norm_axis)
begin_params_axis = shape_util.axis_check(len(ori_shape_x), begin_params_axis)
flag_vector2cube = _check_vector_to_cube(dtype, ori_shape_x, shape_x, begin_norm_axis, impl_mode)
if input_gamma_format == "FRACTAL_NZ" or input_beta_format == "FRACTAL_NZ":
error_detail = "gamma and beta not support Nz in bert"
error_manager_vector.raise_err_two_input_format_invalid(kernel_name, "input_gamma",
"input_beta", error_detail)
if shape_gamma != shape_beta:
error_detail = "gamma and beta's shape must be same."
error_manager_vector.raise_err_two_input_shape_invalid(kernel_name, "input_gamma",
"input_beta", error_detail)
if ori_shape_x[begin_params_axis:] != shape_gamma:
error_detail = "x or gamma or begin_params_axis is wrong."
error_manager_vector.raise_err_two_input_shape_invalid(kernel_name, "x",
"input_gamma", error_detail)
if len(shape_gamma) > 1:
error_detail = "shape of gamma or beta only support 1D in bert"
error_manager_vector.raise_err_input_shape_invalid(kernel_name, "input_gamma", error_detail)
# make shape_x,shape_gamma,shape_beta dim same in vector case
if not flag_vector2cube:
if begin_params_axis != 0:
for i in range(begin_params_axis):
shape_gamma.insert(i, 1)
shape_gamma[-2] = shape_x[-4]
shape_gamma[-1] = 1
shape_gamma.append(1)
shape_gamma.append(shape_x[-1])
if begin_params_axis > len(ori_shape_x) - 2:
shape_x[-3:] = [shape_x[-3] * shape_x[-2], shape_x[-1]]
shape_gamma[-3:] = [shape_gamma[-3] * shape_gamma[-2], shape_gamma[-1]]
shape_beta = shape_gamma
else:
begin_norm_axis = shape_util.axis_check(len(shape_x), begin_norm_axis)
begin_params_axis = shape_util.axis_check(len(shape_x), begin_params_axis)
if shape_gamma != shape_beta:
error_detail = "gamma and beta's shape must be same."
error_manager_vector.raise_err_two_input_shape_invalid(kernel_name, "input_gamma",
"input_beta", error_detail)
no_need_fix_gamma = False
no_need_fix_beta = False
if shape_x[begin_params_axis:] != shape_gamma:
if len(shape_x) == len(shape_gamma):
no_need_fix_gamma = True
else:
error_detail = "x or gamma or begin_params_axis is wrong."
error_manager_vector.raise_err_two_input_shape_invalid(kernel_name, "x",
"input_gamma", error_detail)
if shape_x[begin_params_axis:] != shape_beta:
if len(shape_x) == len(shape_beta):
no_need_fix_beta = True
else:
error_detail = "x or gamma or begin_params_axis is wrong."
error_manager_vector.raise_err_two_input_shape_invalid(kernel_name, "x",
"input_beta", error_detail)
# make shape_x,shape_gamma,shape_beta dim same
if begin_params_axis != 0 and not no_need_fix_gamma:
for i in range(begin_params_axis):
shape_gamma.insert(i, 1)
if begin_params_axis != 0 and not no_need_fix_beta:
for i in range(begin_params_axis):
shape_beta.insert(i, 1)
attr = {"ori_shape": ori_shape_x}
data_x = tvm.placeholder(shape_x, name="x", dtype=dtype, attrs=attr)
data_gamma = tvm.placeholder(shape_gamma, name="gamma", dtype=dtype)
data_beta = tvm.placeholder(shape_beta, name="beta", dtype=dtype)
if input_format == "FRACTAL_NZ":
dyn_input_x = deepcopy(input_x)
dyn_input_x["shape"] = shape_x
if flag_vector2cube:
layer_norm_cube = LayerNormCube({"ori_shape": ori_shape_x,
"epsilon" : epsilon})
mean, variance, res = \
layer_norm_cube.layer_norm_cube_compute(data_x, data_gamma, data_beta)
elif is_support_nz_non_aligned(ori_shape_x, begin_params_axis, impl_mode):
mean, variance, res = \
nz_non_aligned(data_x, data_gamma, data_beta,
output_y, output_mean, output_variance,
begin_norm_axis, begin_params_axis,
ori_shape_x, epsilon, kernel_name, impl_mode)
elif layer_norm_unify.is_special_cases(dyn_input_x, input_gamma, input_beta, begin_norm_axis, impl_mode):
__dynamic_template_api(input_x, input_gamma, input_beta, output_y, output_mean, output_variance,
begin_norm_axis, begin_params_axis, epsilon, kernel_name, impl_mode)
return
else:
mean, variance, res = \
layer_norm_compute_nz(data_x, data_gamma, data_beta,
output_y, output_mean, output_variance,
begin_norm_axis, begin_params_axis,
ori_shape_x, epsilon, kernel_name, impl_mode)
else:
if layer_norm_unify.is_special_cases(input_x, input_gamma, input_beta, begin_norm_axis, impl_mode):
__dynamic_template_api(input_x, input_gamma, input_beta, output_y, output_mean, output_variance,
begin_norm_axis, begin_params_axis, epsilon, kernel_name, impl_mode)
return
else:
mean, variance, res = \
layer_norm_compute(data_x, data_gamma, data_beta,
output_y, output_mean,
output_variance,
begin_norm_axis, begin_params_axis,
epsilon, kernel_name, impl_mode)
with tvm.target.cce():
sch = tbe.auto_schedule([res, mean, variance])
config = {"print_ir" : False,
"name" : kernel_name,
"tensor_list": [data_x, data_gamma,
data_beta, res, mean, variance]}
tbe.cce_build_code(sch, config) |
get_op_support_info | def get_op_support_info(input_dy,
input_x,
input_variance,
input_mean,
input_gamma,
output_pd_x,
res_for_gamma,
kernel_name="layer_norm_x_backprop_v2"):
"""
get_op_support_info
"""
shape_x = input_x.get("shape")
shape_mean = input_mean.get("shape")
shape_gamma = input_gamma.get("shape")
format_dy = input_dy.get("format").upper()
if format_dy in ("ND", "NCHW", "NHWC", "NC1HWC0"):
if len(shape_x) == len(shape_gamma):
axis_split_matrix = []
flag = -1
for i, (xtem, mean) in enumerate(zip(shape_x, shape_mean)):
if xtem != mean:
flag = i
break
if flag == -1:
for i in range(len(shape_x) - 1):
split_0 = [
SplitInput([0, [i], [-1], [-1]], [1, [i], [-1], [-1]], [2, [i], [-1], [-1]],
[3, [i], [-1], [-1]], [4, [i], [-1], [-1]]),
SplitOutput([0, [i]])
]
axis_split_matrix.append(split_0)
else:
for i in range(flag):
split_0 = [
SplitInput([0, [i], [-1], [-1]], [1, [i], [-1], [-1]], [2, [i], [-1], [-1]],
[3, [i], [-1], [-1]], [4, [i], [-1], [-1]]),
SplitOutput([0, [i]], [1, [i]])
]
axis_split_matrix.append(split_0)
else:
axis_split_matrix = None
else:
axis_split_matrix = None
axis_reduce_list = None
op_cal_info_in_json = get_op_cal_info(axis_split_matrix, axis_reduce_list, 0, 0)
return op_cal_info_in_json |
check dynamic format branch | def _check_dynamic_format(shape_dy, shape_gamma, c_0):
"""
check dynamic format branch
"""
if len(shape_dy) < 2 or len(shape_gamma) != 1:
return True
if shape_dy[-1] % c_0 != 0 or shape_dy[-2] % c_0 != 0 \
or shape_gamma[-1] % c_0 != 0:
return True
return True |
function of selecting dynamic format
Parameters
----------
input_dy : dict
shape and dtype of input dy, only support float16, float32
input_x: dict
shape and dtype of input x, only support float16, float32
input_variance: dict
shape and dtype of input variance, only support float16, float32
input_mean: dict
shape and dtype of input mean, only support float16, float32
input_gamma: dict
shape and dtype of input gamma, only support float16, float32
output_pd_x: dict
shape and dtype of output, only support float16, float32
kernel_name: str
cce kernel name, default value is "layer_norm_x_backprop_v2"
Returns
-------
None | def op_select_format(input_dy,
input_x,
input_variance,
input_mean,
input_gamma,
output_pd_x,
res_for_gamma,
kernel_name="layer_norm_x_backprop_v2"):
"""
function of selecting dynamic format
Parameters
----------
input_dy : dict
shape and dtype of input dy, only support float16, float32
input_x: dict
shape and dtype of input x, only support float16, float32
input_variance: dict
shape and dtype of input variance, only support float16, float32
input_mean: dict
shape and dtype of input mean, only support float16, float32
input_gamma: dict
shape and dtype of input gamma, only support float16, float32
output_pd_x: dict
shape and dtype of output, only support float16, float32
kernel_name: str
cce kernel name, default value is "layer_norm_x_backprop_v2"
Returns
-------
None
"""
shape_dy = input_dy.get("ori_shape")
shape_gamma = input_gamma.get("ori_shape")
shape_dy = shape_util.scalar2tensor_one(shape_dy)
shape_gamma = shape_util.scalar2tensor_one(shape_gamma)
c_0 = 16
if _check_dynamic_format(shape_dy, shape_gamma, c_0):
input0 = util_select_op_base.gen_param(classify="input0",
name="dy",
datatype="float16,float16,float16,float,"
"float,float",
format="NCHW,NHWC,ND,NCHW,NHWC,ND")
input1 = util_select_op_base.gen_param(classify="input1",
name="x",
datatype="float16,float16,float16,float,"
"float,float",
format="NCHW,NHWC,ND,NCHW,NHWC,ND")
input2 = util_select_op_base.gen_param(classify="input2",
name="variance",
datatype="float16,float16,float16,float,"
"float,float",
format="NCHW,NHWC,ND,NCHW,NHWC,ND")
input3 = util_select_op_base.gen_param(classify="input3",
name="mean",
datatype="float16,float16,float16,float,"
"float,float",
format="NCHW,NHWC,ND,NCHW,NHWC,ND")
input4 = util_select_op_base.gen_param(classify="input4",
name="gamma",
datatype="float16,float16,float16,float,"
"float,float",
format="NCHW,NHWC,ND,NCHW,NHWC,ND")
output0 = util_select_op_base.gen_param(classify="output0",
name="pd_x",
datatype="float16,float16,float16,float,"
"float,float",
format="NCHW,NHWC,ND,NCHW,NHWC,ND")
output1 = util_select_op_base.gen_param(classify="output1",
name="res_for_gamma",
datatype="float,float,float,float,"
"float,float",
format="NCHW,NHWC,ND,NCHW,NHWC,ND")
else:
input0 = util_select_op_base.gen_param(classify="input0",
name="dy",
datatype="float16, float,float16,float16,"
"float16,float,float,float",
format="FRACTAL_NZ,FRACTAL_NZ,NCHW,NHWC,ND,"
"NCHW,NHWC,ND")
input1 = util_select_op_base.gen_param(classify="input1",
name="x",
datatype="float16, float,float16,float16,"
"float16,float,float,float",
format="FRACTAL_NZ,FRACTAL_NZ,NCHW,NHWC,ND,"
"NCHW,NHWC,ND")
input2 = util_select_op_base.gen_param(classify="input2",
name="variance",
datatype="float16, float,float16,float16,"
"float16,float,float,float",
format="ND,ND,NCHW,NHWC,ND,NCHW,"
"NHWC,ND")
input3 = util_select_op_base.gen_param(classify="input3",
name="mean",
datatype="float16, float,float16,float16,"
"float16,float,float,float",
format="ND,ND,NCHW,NHWC,ND,NCHW,"
"NHWC,ND")
input4 = util_select_op_base.gen_param(classify="input4",
name="gamma",
datatype="float16, float,float16,float16,"
"float16,float,float,float",
format="ND,ND,NCHW,NHWC,ND,NCHW,"
"NHWC,ND")
output0 = util_select_op_base.gen_param(classify="output0",
name="pd_x",
datatype="float16, float,float16,float16,"
"float16,float,float,float",
format="FRACTAL_NZ,FRACTAL_NZ,NCHW,NHWC,"
"ND,NCHW,NHWC,ND")
output1 = util_select_op_base.gen_param(classify="output1",
name="res_for_gamma",
datatype="float, float,float,float,"
"float,float,float,float",
format="FRACTAL_NZ,FRACTAL_NZ,NCHW,NHWC,"
"ND,NCHW,NHWC,ND")
param_list = [input0, input1, input2, input3, input4, output0, output1]
param_dynamic_in_json = util_select_op_base.get_dynamic_param_in_json(param_list)
return param_dynamic_in_json |
check parameters including shape_dy, shape_x, shape_var,
shape_mean, shape_gamma, dtype and kernel_name
Parameters
----------
params_map: dict
{"shape_dy": shape_dy, "shape_x": shape_x, "shape_var": shape_variance,
"shape_mean": shape_mean, "shape_gamma": shape_gamma,
"dtype": dtype, "kernel_name": kernel_name}
Returns
-------
None | def _check_params(params_map):
"""
check parameters including shape_dy, shape_x, shape_var,
shape_mean, shape_gamma, dtype and kernel_name
Parameters
----------
params_map: dict
{"shape_dy": shape_dy, "shape_x": shape_x, "shape_var": shape_variance,
"shape_mean": shape_mean, "shape_gamma": shape_gamma,
"dtype": dtype, "kernel_name": kernel_name}
Returns
-------
None
"""
check_list = ("float16", "float32")
para_check.check_dtype(params_map.get("dtype"), check_list, param_name="input_dy")
_check_shape(params_map) |
check parameters including shape_dy, shape_x, shape_var,
shape_mean and shape_gamma
Parameters
----------
params_map: dict
{"shape_dy": shape_dy, "shape_x": shape_x, "shape_var": shape_variance,
"shape_mean": shape_mean, "shape_gamma": shape_gamma,
"dtype": dtype, "kernel_name": kernel_name}
Returns
-------
None | def _check_shape(params_map):
"""
check parameters including shape_dy, shape_x, shape_var,
shape_mean and shape_gamma
Parameters
----------
params_map: dict
{"shape_dy": shape_dy, "shape_x": shape_x, "shape_var": shape_variance,
"shape_mean": shape_mean, "shape_gamma": shape_gamma,
"dtype": dtype, "kernel_name": kernel_name}
Returns
-------
None
"""
if operator.ne(tuple(params_map.get("shape_dy")), tuple(params_map.get("shape_x"))):
error_detail = "shape of input_dy and input_x should be same"
error_manager_vector.raise_err_two_input_shape_invalid("layer_norm_x_backprop_v2", "input_dy", "input_x",
error_detail)
if operator.ne(tuple(params_map.get("shape_var")), tuple(params_map.get("shape_mean"))):
error_detail = "shape of input_variance and input_mean should be same"
error_manager_vector.raise_err_two_input_shape_invalid("layer_norm_x_backprop_v2", "input_variance",
"input_mean", error_detail)
shape_x = params_map.get("shape_x")
shape_mean = params_map.get("shape_mean")
shape_gamma = params_map.get("shape_gamma")
para_check.check_shape(shape_x, param_name="input_x")
para_check.check_shape(shape_mean, param_name="input_mean")
para_check.check_shape(shape_gamma, param_name="input_gamma")
_check_shape_mean(shape_x, shape_mean)
_check_shape_gamma(shape_x, shape_gamma) |
check if parameter shape_mean meets the requirements of function
Parameters
----------
shape_x: list or tuple
shape of x
shape_mean: list or tuple
shape of mean
Returns
-------
None | def _check_shape_mean(shape_x, shape_mean):
"""
check if parameter shape_mean meets the requirements of function
Parameters
----------
shape_x: list or tuple
shape of x
shape_mean: list or tuple
shape of mean
Returns
-------
None
"""
if len(shape_x) != len(shape_mean):
error_detail = "length of shape_x and shape_mean should be same"
error_manager_vector.raise_err_two_input_shape_invalid("layer_norm_x_backprop_v2", "input_x", "input_mean",
error_detail)
if shape_mean[-1] != 1:
error_detail = "value of shape_mean's last dim must be 1"
error_manager_vector.raise_err_input_shape_invalid("layer_norm_x_backprop_v2", "input_mean", error_detail)
flag = -1
for i, (xtem, mean) in enumerate(zip(shape_x, shape_mean)):
if xtem != mean:
flag = i
break
if flag != -1:
for i, mean in enumerate(shape_mean):
if i < flag:
continue
if mean != 1:
error_detail = "value of shape_mean must be 1"
error_manager_vector.raise_err_input_shape_invalid("layer_norm_x_backprop_v2", "input_mean",
error_detail) |
check if parameter shape_gamma meets the requirements of function
Parameters
----------
shape_x: list or tuple
shape of x
shape_gamma: list or tuple
shape of gamma
Returns
-------
None | def _check_shape_gamma(shape_x, shape_gamma):
"""
check if parameter shape_gamma meets the requirements of function
Parameters
----------
shape_x: list or tuple
shape of x
shape_gamma: list or tuple
shape of gamma
Returns
-------
None
"""
if len(shape_gamma) > len(shape_x):
error_detail = "length of shape_gamma can not be longer than shape_x"
error_manager_vector.raise_err_two_input_shape_invalid("layer_norm_x_backprop_v2", "input_gamma", "input_x",
error_detail)
for xtem, gamma in zip(reversed(shape_x), reversed(shape_gamma)):
if xtem != gamma:
error_detail = "value of shape_gamma is wrong"
error_manager_vector.raise_err_input_shape_invalid("layer_norm_x_backprop_v2", "input_gamma", error_detail) |
update shape_gamma for subsequent calculation
Parameters
----------
shape_x: list or tuple
shape of x
shape_gamma: list or tuple
shape of gamma
Returns
-------
shape_gamma_new: tuple
new shape_gamma after update
params_axis: tuple
the list of axis for gamma reduce_sum | def _update_gamma_shape(shape_x, shape_gamma):
"""
update shape_gamma for subsequent calculation
Parameters
----------
shape_x: list or tuple
shape of x
shape_gamma: list or tuple
shape of gamma
Returns
-------
shape_gamma_new: tuple
new shape_gamma after update
params_axis: tuple
the list of axis for gamma reduce_sum
"""
params_axis_tmp = []
if len(shape_x) != len(shape_gamma):
sub = len(shape_x) - len(shape_gamma)
shape_gamma = list(shape_gamma)
for i in range(sub):
shape_gamma.insert(0, 1)
params_axis_tmp.append(i)
shape_gamma_new = tuple(shape_gamma)
params_axis = tuple(params_axis_tmp)
return shape_gamma_new, params_axis |
get placeholders of data_dy, data_x, data_variance, data_mean and data_gamma
Parameters
----------
shapes: dict
{"shape_dy": shape_dy, "shape_x": shape_x, "shape_var": shape_variance,
"shape_mean": shape_mean, "shape_gamma": shape_gamma}
dtype: str
the data type
Returns
-------
data_gm: tuple
(data_dy, data_x, data_variance, data_mean, data_gamma) | def _get_data_gm(shapes, dtype):
"""
get placeholders of data_dy, data_x, data_variance, data_mean and data_gamma
Parameters
----------
shapes: dict
{"shape_dy": shape_dy, "shape_x": shape_x, "shape_var": shape_variance,
"shape_mean": shape_mean, "shape_gamma": shape_gamma}
dtype: str
the data type
Returns
-------
data_gm: tuple
(data_dy, data_x, data_variance, data_mean, data_gamma)
"""
data_dy = tvm.placeholder(shapes.get("shape_dy"), name="data_dy", dtype=dtype)
data_x = tvm.placeholder(shapes.get("shape_x"), name="data_x", dtype=dtype)
data_variance = tvm.placeholder(shapes.get("shape_var"), name="data_variance", dtype=dtype)
data_mean = tvm.placeholder(shapes.get("shape_mean"), name="data_mean", dtype=dtype)
data_gamma = tvm.placeholder(shapes.get("shape_gamma"), name="data_gamma", dtype=dtype)
data_gm = (data_dy, data_x, data_variance, data_mean, data_gamma)
return data_gm |
compute parameters including param_axis, reduce_axis and mean_num
Parameters
----------
shape_x: list or tuple
shape of x
shape_mean: list or tuple
shape of mean
shape_gamma: list or tuple
shape of gamma
Returns
-------
params: dict
{"param_axis": param_axis, "reduce_axis": reduce_axis,
"mean_num": mean_num} | def _get_params(shape_x, shape_mean, shape_gamma):
"""
compute parameters including param_axis, reduce_axis and mean_num
Parameters
----------
shape_x: list or tuple
shape of x
shape_mean: list or tuple
shape of mean
shape_gamma: list or tuple
shape of gamma
Returns
-------
params: dict
{"param_axis": param_axis, "reduce_axis": reduce_axis,
"mean_num": mean_num}
"""
param_axis = _update_gamma_shape(shape_x, shape_gamma)[1]
reduce_axis_tmp = []
flag = -1
for i, (xtem, mean) in enumerate(zip(shape_x, shape_mean)):
if xtem != mean:
flag = i
break
if flag != -1:
for i in range(flag, len(shape_x)):
reduce_axis_tmp.append(i)
else:
reduce_axis_tmp.append(len(shape_x) - 1)
reduce_axis = tuple(reduce_axis_tmp)
mean_num = 1.0
for i in reduce_axis:
mean_num *= shape_x[i]
params = {"param_axis": param_axis, "reduce_axis": reduce_axis, "mean_num": mean_num}
return params |
compute pd_xl according to data_dy, data_gamma and shape_x
Parameters
----------
data: dict
placeholders after cast
shape_x: list or tuple
shape of x
Returns
-------
pd_xl: tvm.tensor
data_dy*data_gamma | def _get_pd_xl(data, shape_x):
"""
compute pd_xl according to data_dy, data_gamma and shape_x
Parameters
----------
data: dict
placeholders after cast
shape_x: list or tuple
shape of x
Returns
-------
pd_xl: tvm.tensor
data_dy*data_gamma
"""
data_gamma_cast = tbe.broadcast(data.get("data_gamma"), shape_x)
pd_xl = tbe.vmul(data_gamma_cast, data.get("data_dy"))
return pd_xl |
compute front part of pd_var according to data_variance
Parameters
----------
data: dict
placeholders after cast
cast_dtype: str
if api_check_support float32, then equal to float32 else float16
Returns
-------
pd_var_1: tvm.tensor
np.power((data_variance + EPSLON), (-1.5))
var_elta_2: tvm.tensor
np.power((data_variance + EPSLON), (-0.5)) | def _get_pd_var_front(data, cast_dtype):
"""
compute front part of pd_var according to data_variance
Parameters
----------
data: dict
placeholders after cast
cast_dtype: str
if api_check_support float32, then equal to float32 else float16
Returns
-------
pd_var_1: tvm.tensor
np.power((data_variance + EPSLON), (-1.5))
var_elta_2: tvm.tensor
np.power((data_variance + EPSLON), (-0.5))
"""
var_elta = tbe.vadds(data.get("data_variance"), tvm.const(EPSLON, dtype=cast_dtype))
var_elta_log = tbe.vlog(var_elta)
var_elta_mul = tbe.vmuls(var_elta_log, tvm.const(-0.5, dtype=cast_dtype))
var_elta_2 = tbe.vexp(var_elta_mul)
pdvar1_mul = tbe.vmul(var_elta_2, var_elta_2)
pd_var_1 = tbe.vmul(pdvar1_mul, var_elta_2)
return pd_var_1, var_elta_2 |
compute pd_var according to data_x, data_mean, reduce_axis and pd_xl
Parameters
----------
data: dict
placeholders after cast
params: dict
{"param_axis": param_axis, "reduce_axis": reduce_axis,
"mean_num": mean_num}
shape_x: list or tuple
shape of x
pd_xl: tvm.tensor
data_dy*data_gamma
cast_dtype: str
if api_check_support float32, then equal to float32 else float16
Returns
-------
pd_var: tvm.tensor
np.sum(((-0.5)*pd_xl*(data_x - data_mean)
*np.power((data_variance + EPSLON), (-1.5))), reduce_axis,
keepdims=True)
var_elta_2: tvm.tensor
np.power((data_variance + EPSLON), (-0.5))
sub_x_mean: tvm.tensor
data_x - data_mean | def _get_pd_var(data, params, shape_x, pd_xl, cast_dtype):
"""
compute pd_var according to data_x, data_mean, reduce_axis and pd_xl
Parameters
----------
data: dict
placeholders after cast
params: dict
{"param_axis": param_axis, "reduce_axis": reduce_axis,
"mean_num": mean_num}
shape_x: list or tuple
shape of x
pd_xl: tvm.tensor
data_dy*data_gamma
cast_dtype: str
if api_check_support float32, then equal to float32 else float16
Returns
-------
pd_var: tvm.tensor
np.sum(((-0.5)*pd_xl*(data_x - data_mean)
*np.power((data_variance + EPSLON), (-1.5))), reduce_axis,
keepdims=True)
var_elta_2: tvm.tensor
np.power((data_variance + EPSLON), (-0.5))
sub_x_mean: tvm.tensor
data_x - data_mean
"""
pd_var_1, var_elta_2 = _get_pd_var_front(data, cast_dtype)
data_mean_cast = tbe.broadcast(data.get("data_mean"), shape_x)
sub_x_mean = tbe.vsub(data.get("data_x"), data_mean_cast)
pdvar_mul1 = tbe.vmul(sub_x_mean, pd_xl)
pdvar_sum = tbe.sum(pdvar_mul1, params.get("reduce_axis"), keepdims=True)
pdvar_mul3 = tbe.vmul(pdvar_sum, pd_var_1)
pd_var = tbe.vmuls(pdvar_mul3, tvm.const(-0.5, dtype=cast_dtype))
return pd_var, var_elta_2, sub_x_mean |
compute pd_mean according to reduce_axis, pd_xl, pd_var, var_elta_2
and sub_x_mean
Parameters
----------
params: dict
{"param_axis": param_axis, "reduce_axis": reduce_axis,
"mean_num": mean_num}
pd_xl: tvm.tensor
data_dy*data_gamma
pd_var: tvm.tensor
np.sum(((-0.5)*pd_xl*(data_x - data_mean)
*np.power((data_variance + EPSLON), (-1.5))), reduce_axis,
keepdims=True)
var_elta_2: tvm.tensor
np.power((data_variance + EPSLON), (-0.5))
sub_x_mean: tvm.tensor
data_x - data_mean
cast_dtype: str
if api_check_support float32, then equal to float32 else float16
Returns
-------
pd_mean: tvm.tensor
np.sum(((-1.0)*pd_xl
*np.power((data_variance + EPSLON), (-0.5))), reduce_axis,
keepdims=True)
+ pd_var*(1.0/m)*np.sum(((-2.0)*(data_x - data_mean)),
reduce_axis, keepdims=True) | def _get_pd_mean(params, pd_xl, pd_var, var_elta_2, sub_x_mean, cast_dtype):
"""
compute pd_mean according to reduce_axis, pd_xl, pd_var, var_elta_2
and sub_x_mean
Parameters
----------
params: dict
{"param_axis": param_axis, "reduce_axis": reduce_axis,
"mean_num": mean_num}
pd_xl: tvm.tensor
data_dy*data_gamma
pd_var: tvm.tensor
np.sum(((-0.5)*pd_xl*(data_x - data_mean)
*np.power((data_variance + EPSLON), (-1.5))), reduce_axis,
keepdims=True)
var_elta_2: tvm.tensor
np.power((data_variance + EPSLON), (-0.5))
sub_x_mean: tvm.tensor
data_x - data_mean
cast_dtype: str
if api_check_support float32, then equal to float32 else float16
Returns
-------
pd_mean: tvm.tensor
np.sum(((-1.0)*pd_xl
*np.power((data_variance + EPSLON), (-0.5))), reduce_axis,
keepdims=True)
+ pd_var*(1.0/m)*np.sum(((-2.0)*(data_x - data_mean)),
reduce_axis, keepdims=True)
"""
pdmean1_sum = tbe.sum(pd_xl, params.get("reduce_axis"), keepdims=True)
pdmean1_mul = tbe.vmul(pdmean1_sum, var_elta_2)
pd_mean_1 = tbe.vmuls(pdmean1_mul, tvm.const(-1.0, dtype=cast_dtype))
return pd_mean_1 |
compute pd_x, res_for_gamma according to data, params and shape_x
`pd_x = pd_xl * np.power((data_variance + EPSLON), (-0.5))`
`+ pd_var * (2.0 / m) * (data_x - data_mean) + pd_mean * (1.0 / m)`
`res_for_gamma = (data_x - data_mean) * np.power((data_variance + EPSLON), (-0.5))`
Parameters
----------
data: dict
placeholders after cast
params: dict
{"param_axis": param_axis, "reduce_axis": reduce_axis,
"mean_num": mean_num}
shape_x: list or tuple
shape of x
dtype: str
the data type
cast_dtype: str
if api_check_support float32, then equal to float32 else float16
Returns
-------
pd_x: tvm.tensor
partial derivation of x
res_for_gamma: tvm.tensor
`(data_x - data_mean)*np.power((data_variance + EPSLON), (-0.5))` | def _get_pd_x(data, params, shape_x, dtype, cast_dtype):
"""
compute pd_x, res_for_gamma according to data, params and shape_x
`pd_x = pd_xl * np.power((data_variance + EPSLON), (-0.5))`
`+ pd_var * (2.0 / m) * (data_x - data_mean) + pd_mean * (1.0 / m)`
`res_for_gamma = (data_x - data_mean) * np.power((data_variance + EPSLON), (-0.5))`
Parameters
----------
data: dict
placeholders after cast
params: dict
{"param_axis": param_axis, "reduce_axis": reduce_axis,
"mean_num": mean_num}
shape_x: list or tuple
shape of x
dtype: str
the data type
cast_dtype: str
if api_check_support float32, then equal to float32 else float16
Returns
-------
pd_x: tvm.tensor
partial derivation of x
res_for_gamma: tvm.tensor
`(data_x - data_mean)*np.power((data_variance + EPSLON), (-0.5))`
"""
pd_xl = _get_pd_xl(data, shape_x)
pd_var, var_elta_2, sub_x_mean = _get_pd_var(data, params, shape_x, pd_xl, cast_dtype)
pd_mean = _get_pd_mean(params, pd_xl, pd_var, var_elta_2, sub_x_mean, cast_dtype)
var_elta_2_cast = tbe.broadcast(var_elta_2, shape_x)
pd_x_1 = tbe.vmul(var_elta_2_cast, pd_xl)
res_for_gamma = tbe.vmul(var_elta_2_cast, sub_x_mean)
pd_var = tbe.vmuls(pd_var, tvm.const((2 * (params.get("mean_num") ** (-1))), dtype=cast_dtype))
pdx2_broad = tbe.broadcast(pd_var, shape_x)
pd_x_2 = tbe.vmul(pdx2_broad, sub_x_mean)
pd_x_3 = tbe.vmuls(pd_mean, tvm.const((params.get("mean_num") ** (-1)), dtype=cast_dtype))
pdx_broad = tbe.broadcast(pd_x_3, shape_x)
pdx_add = tbe.vadd(pd_x_1, pd_x_2)
pd_x_ub = tbe.vadd(pdx_add, pdx_broad)
if dtype == "float16" and cast_dtype == "float32":
pd_x = tbe.cast_to(pd_x_ub, dtype)
else:
return pd_x_ub, res_for_gamma
return pd_x, res_for_gamma |
compute pd_x, pd_gamma, pd_beta according to data, params and shape_x
Parameters
----------
data: dict
placeholders after cast
params: dict
{"param_axis": param_axis, "reduce_axis": reduce_axis,
"mean_num": mean_num}
shape_x: list or tuple
shape of x
dtype: str
the data type
cast_dtype: str
if api_check_support float32, then equal to float32 else float16
Returns
-------
pd_x: tvm.tensor
partial derivation of x
pd_gamma: tvm.tensor
partial derivation of gamma
pd_beta: tvm.tensor
partial derivation of beta | def _get_res(data, params, shape_x, dtype, cast_dtype):
"""
compute pd_x, pd_gamma, pd_beta according to data, params and shape_x
Parameters
----------
data: dict
placeholders after cast
params: dict
{"param_axis": param_axis, "reduce_axis": reduce_axis,
"mean_num": mean_num}
shape_x: list or tuple
shape of x
dtype: str
the data type
cast_dtype: str
if api_check_support float32, then equal to float32 else float16
Returns
-------
pd_x: tvm.tensor
partial derivation of x
pd_gamma: tvm.tensor
partial derivation of gamma
pd_beta: tvm.tensor
partial derivation of beta
"""
pd_x, res_for_gamma = _get_pd_x(data, params, shape_x, dtype, cast_dtype)
return pd_x, res_for_gamma |
get params and data, compute pd_x, pd_gamma, pd_beta.
Parameters
----------
data_dy: TVM tensor
the placeholder of dy input data
data_x: TVM tensor
the placeholder of x input data
data_variance: TVM tensor
the placeholder of variance input data
data_mean: TVM tensor
the placeholder of mean input data
data_gamma: TVM tensor
the placeholder of gamma input data
shape_gamma_ori: list or tuple
original shape of gamma
Returns
-------
pd_x: tvm.tensor
partial derivation of x
pd_gamma: tvm.tensor
partial derivation of gamma
pd_beta: tvm.tensor
partial derivation of beta | def _get_pds(data_dy, data_x, data_variance, data_mean, data_gamma, shape_gamma_ori):
"""
get params and data, compute pd_x, pd_gamma, pd_beta.
Parameters
----------
data_dy: TVM tensor
the placeholder of dy input data
data_x: TVM tensor
the placeholder of x input data
data_variance: TVM tensor
the placeholder of variance input data
data_mean: TVM tensor
the placeholder of mean input data
data_gamma: TVM tensor
the placeholder of gamma input data
shape_gamma_ori: list or tuple
original shape of gamma
Returns
-------
pd_x: tvm.tensor
partial derivation of x
pd_gamma: tvm.tensor
partial derivation of gamma
pd_beta: tvm.tensor
partial derivation of beta
"""
dtype = data_dy.dtype.lower()
shape_x = shape_util.shape_to_list(data_x.shape)
shape_mean = shape_util.shape_to_list(data_mean.shape)
has_improve_precision = False
cast_dtype = dtype
if dtype == "float16" and tbe_platform.cce_conf.api_check_support("te.lang.cce.vexp", "float32"):
has_improve_precision = True
cast_dtype = "float32"
params = _get_params(shape_x, shape_mean, shape_gamma_ori)
if has_improve_precision:
data_dy = tbe.cast_to(data_dy, "float32")
data_x = tbe.cast_to(data_x, "float32")
data_variance = tbe.cast_to(data_variance, "float32")
data_mean = tbe.cast_to(data_mean, "float32")
data_gamma = tbe.cast_to(data_gamma, "float32")
data = {
"data_dy" : data_dy,
"data_x" : data_x,
"data_variance": data_variance,
"data_mean" : data_mean,
"data_gamma" : data_gamma
}
pd_x, res_for_gamma = _get_res(data, params, shape_x, dtype, cast_dtype)
return pd_x, res_for_gamma |
DSL description of the layernorm_grad operator's mathematical
calculation process
Parameters
----------
input_dy : dict
shape and dtype of input dy, only support float16, float32
input_x: dict
shape and dtype of input x, only support float16, float32
input_variance: dict
shape and dtype of input variance, only support float16, float32
input_mean: dict
shape and dtype of input mean, only support float16, float32
input_gamma: dict
shape and dtype of input gamma, only support float16, float32
output_pd_x: dict
shape and dtype of output, only support float16, float32
kernel_name: str
cce kernel name, default value is "layer_norm_x_backprop_v2"
Returns
-------
res_tuple: tuple
(pd_x, pd_gamma, pd_beta) | def layer_norm_x_backprop_v2_compute(input_dy,
input_x,
input_variance,
input_mean,
input_gamma,
output_pd_x,
kernel_name="layer_norm_x_backprop_v2"):
"""
DSL description of the layernorm_grad operator's mathematical
calculation process
Parameters
----------
input_dy : dict
shape and dtype of input dy, only support float16, float32
input_x: dict
shape and dtype of input x, only support float16, float32
input_variance: dict
shape and dtype of input variance, only support float16, float32
input_mean: dict
shape and dtype of input mean, only support float16, float32
input_gamma: dict
shape and dtype of input gamma, only support float16, float32
output_pd_x: dict
shape and dtype of output, only support float16, float32
kernel_name: str
cce kernel name, default value is "layer_norm_x_backprop_v2"
Returns
-------
res_tuple: tuple
(pd_x, pd_gamma, pd_beta)
"""
pd_x, res_for_gamma = _get_pds(input_dy, input_x, input_variance, input_mean, input_gamma, input_gamma.shape)
res_list = [pd_x, res_for_gamma]
return res_list |
function of updating Nz shape | def update_shape_nz(shape_x, shape_var, shape_gamma):
"""
function of updating Nz shape
"""
# ND shape of x >= two dim
# Nz shape of x >= four dim
len_x = len(shape_x)
nz_begin = len_x - 4
shape_x_nz = []
for i in range(0, nz_begin):
shape_x_nz.append(shape_x[i])
shape_x_nz.append(shape_x[nz_begin])
shape_x_nz.append(shape_x[nz_begin + 1] * shape_x[nz_begin + 2])
shape_x_nz.append(shape_x[nz_begin + 2])
# ND shape of var >= two dim
shape_var_nz = []
len_var = len(shape_var)
var_nz_begin = len_var - 2
for i in range(0, var_nz_begin):
shape_var_nz.append(shape_var[i])
shape_var_nz.append(1)
shape_var_nz.append(shape_var[var_nz_begin])
shape_var_nz.append(1)
# ND shape of gamma is one dim
shape_gamma_nz = []
for i in range(0, nz_begin):
shape_gamma_nz.append(1)
shape_gamma_nz.append(shape_x[nz_begin])
shape_gamma_nz.append(1)
shape_gamma_nz.append(shape_x[nz_begin + 2])
reduce_nz_axis = []
param_nz_axis = []
for i, (xtem, var) in enumerate(zip(shape_x_nz, shape_var_nz)):
if xtem != var:
reduce_nz_axis.append(i)
for i, (xtem, gamma) in enumerate(zip(shape_x_nz, shape_gamma_nz)):
if xtem != gamma or (xtem == 1 and gamma == 1):
param_nz_axis.append(i)
mean_nz_num = 1.0
for i in reduce_nz_axis:
mean_nz_num *= shape_x_nz[i]
param_nz = {
"shape_x_nz" : shape_x_nz,
"shape_var_nz" : shape_var_nz,
"shape_gamma_nz": shape_gamma_nz,
"reduce_axis" : reduce_nz_axis,
"param_axis" : param_nz_axis,
"mean_num" : mean_nz_num
}
return param_nz |
get placeholders of data_dy, data_x, data_variance, data_mean and data_gamma | def _get_data_nz(param_nz, dtype):
"""
get placeholders of data_dy, data_x, data_variance, data_mean and data_gamma
"""
data_dy = tvm.placeholder(param_nz.get("shape_x_nz"), name="data_dy", dtype=dtype)
data_x = tvm.placeholder(param_nz.get("shape_x_nz"), name="data_x", dtype=dtype)
data_variance = tvm.placeholder(param_nz.get("shape_var_nz"), name="data_variance", dtype=dtype)
data_mean = tvm.placeholder(param_nz.get("shape_var_nz"), name="data_mean", dtype=dtype)
data_gamma = tvm.placeholder(param_nz.get("shape_gamma_nz"), name="data_gamma", dtype=dtype)
data_gm = (data_dy, data_x, data_variance, data_mean, data_gamma)
return data_gm |
compute pd_xl according to data_dy, data_gamma and shape_x | def _get_pd_xl_nz(data, param_nz):
"""
compute pd_xl according to data_dy, data_gamma and shape_x
"""
data_gamma_cast = tbe.broadcast(data.get("data_gamma"), param_nz.get("shape_x_nz"))
pd_xl = tbe.vmul(data_gamma_cast, data.get("data_dy"))
return pd_xl |
compute front part of pd_var according to data_variance | def _get_pd_var_front_nz(data, cast_dtype):
"""
compute front part of pd_var according to data_variance
"""
var_elta = tbe.vadds(data.get("data_variance"), tvm.const(EPSLON, dtype=cast_dtype))
var_elta_log = tbe.vlog(var_elta)
var_elta_mul = tbe.vmuls(var_elta_log, tvm.const(-0.5, dtype=cast_dtype))
var_elta_2 = tbe.vexp(var_elta_mul)
pdvar1_mul = tbe.vmul(var_elta_2, var_elta_2)
pd_var_1 = tbe.vmul(pdvar1_mul, var_elta_2)
return pd_var_1, var_elta_2 |
compute pd_var according to data_x, data_mean, reduce_axis and pd_xl | def _get_pd_var_nz(data, param_nz, pd_xl, cast_dtype):
"""
compute pd_var according to data_x, data_mean, reduce_axis and pd_xl
"""
pd_var_1, var_elta_2 = _get_pd_var_front_nz(data, cast_dtype)
data_mean_cast = tbe.broadcast(data.get("data_mean"), param_nz.get("shape_x_nz"))
sub_x_mean = tbe.vsub(data.get("data_x"), data_mean_cast)
pdvar_mul1 = tbe.vmul(sub_x_mean, pd_xl)
pdvar_sum = tbe.sum(pdvar_mul1, param_nz.get("reduce_axis"), keepdims=True)
pdvar_mul3 = tbe.vmul(pdvar_sum, pd_var_1)
pd_var = tbe.vmuls(pdvar_mul3, tvm.const(-0.5, dtype=cast_dtype))
return pd_var, var_elta_2, sub_x_mean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.