response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
compute pd_mean according to reduce_axis, pd_xl, pd_var,
var_elta_2 and sub_x_mean | def _get_pd_mean_nz(param_nz, pd_xl, pd_var, var_elta_2, sub_x_mean, cast_dtype):
"""
compute pd_mean according to reduce_axis, pd_xl, pd_var,
var_elta_2 and sub_x_mean
"""
pdmean1_sum = tbe.sum(pd_xl, param_nz.get("reduce_axis"), keepdims=True)
pdmean1_mul = tbe.vmul(pdmean1_sum, var_elta_2)
pd_mean_1 = tbe.vmuls(pdmean1_mul, tvm.const(-1.0, dtype=cast_dtype))
return pd_mean_1 |
compute pd_x according to data, params and shape_x | def _get_pd_x_nz(data, param_nz, dtype, cast_dtype):
"""
compute pd_x according to data, params and shape_x
"""
pd_xl = _get_pd_xl_nz(data, param_nz)
pd_var, var_elta_2, sub_x_mean = _get_pd_var_nz(data, param_nz, pd_xl, cast_dtype)
pd_mean = _get_pd_mean_nz(param_nz, pd_xl, pd_var, var_elta_2, sub_x_mean, cast_dtype)
var_elta_2_cast = _broadcast_nz(var_elta_2, param_nz.get("shape_x_nz"))
pd_x_1 = tbe.vmul(var_elta_2_cast, pd_xl)
res_for_gamma = tbe.vmul(var_elta_2_cast, sub_x_mean)
pd_var = tbe.vmuls(pd_var, tvm.const((2 * (param_nz.get("mean_num") ** (-1))), dtype=cast_dtype))
pdx2_broad = _broadcast_nz(pd_var, param_nz.get("shape_x_nz"))
pd_x_2 = tbe.vmul(pdx2_broad, sub_x_mean)
pd_x_3 = tbe.vmuls(pd_mean, tvm.const((param_nz.get("mean_num") ** (-1)), dtype=cast_dtype))
pdx_broad = _broadcast_nz(pd_x_3, param_nz.get("shape_x_nz"))
pdx_add = tbe.vadd(pd_x_1, pd_x_2)
pd_x_ub = tbe.vadd(pdx_add, pdx_broad)
if dtype == "float16" and cast_dtype == "float32":
pd_x = tbe.cast_to(pd_x_ub, dtype)
else:
return pd_x_ub, res_for_gamma
return pd_x, res_for_gamma |
compute pd_x, pd_gamma, pd_beta according to data, params and shape_x | def _get_res_nz(data, param_nz, dtype, cast_dtype):
"""
compute pd_x, pd_gamma, pd_beta according to data, params and shape_x
"""
pd_x, res_for_gamma = _get_pd_x_nz(data, param_nz, dtype, cast_dtype)
return pd_x, res_for_gamma |
get params and data, compute pd_x, pd_gamma, pd_beta. | def _get_pds_nz(data_dy, data_x, data_variance, data_mean, data_gamma, param_nz):
"""
get params and data, compute pd_x, pd_gamma, pd_beta.
"""
dtype = data_dy.dtype.lower()
has_improve_precision = False
cast_dtype = dtype
if dtype == "float16" and tbe_platform.cce_conf.api_check_support("te.lang.cce.vexp", "float32"):
has_improve_precision = True
cast_dtype = "float32"
if has_improve_precision:
data_dy = tbe.cast_to(data_dy, "float32")
data_x = tbe.cast_to(data_x, "float32")
data_variance = tbe.cast_to(data_variance, "float32")
data_mean = tbe.cast_to(data_mean, "float32")
data_gamma = tbe.cast_to(data_gamma, "float32")
data = {
"data_dy" : data_dy,
"data_x" : data_x,
"data_variance": data_variance,
"data_mean" : data_mean,
"data_gamma" : data_gamma
}
pd_x, res_for_gamma = _get_res_nz(data, param_nz, dtype, cast_dtype)
return pd_x, res_for_gamma |
DSL description of the layernorm_grad operator's mathematical
calculation process
Parameters
----------
data_dy: TVM tensor
the placeholder of dy input data
data_x: TVM tensor
the placeholder of x input data
data_variance: TVM tensor
the placeholder of variance input data
data_mean: TVM tensor
the placeholder of mean input data
data_gamma: TVM tensor
the placeholder of gamma input data
shape_gamma_ori: list or tuple
original shape of gamma
Returns
-------
res_tuple: tuple
(pd_x, res_for_gamma) | def layer_norm_x_back_nz_compute(data_dy, data_x, data_variance, data_mean, data_gamma, param_nz):
"""
DSL description of the layernorm_grad operator's mathematical
calculation process
Parameters
----------
data_dy: TVM tensor
the placeholder of dy input data
data_x: TVM tensor
the placeholder of x input data
data_variance: TVM tensor
the placeholder of variance input data
data_mean: TVM tensor
the placeholder of mean input data
data_gamma: TVM tensor
the placeholder of gamma input data
shape_gamma_ori: list or tuple
original shape of gamma
Returns
-------
res_tuple: tuple
(pd_x, res_for_gamma)
"""
pd_x, res_for_gamma = _get_pds_nz(data_dy, data_x, data_variance, data_mean, data_gamma, param_nz)
return [pd_x, res_for_gamma] |
algorithm: layernorm_grad
calculating: gradient of layernorm
compute partial derivation of x, gamma and beta
pd_xl = data_dy*data_gamma
pd_var = np.sum(((-0.5)*pd_xl*(data_x - data_mean)
*np.power((data_variance + EPSLON), (-1.5))),
reduce_axis, keepdims=True)
pd_mean = np.sum(((-1.0)*pd_xl
*np.power((data_variance + EPSLON), (-0.5))),
reduce_axis, keepdims=True)
+ pd_var*(1.0/m)
*np.sum(((-2.0)*(data_x - data_mean)),
reduce_axis, keepdims=True)
pd_x = pd_xl*np.power((data_variance + EPSLON), (-0.5))
+ pd_var*(2.0/m)*(data_x - data_mean) + pd_mean*(1.0/m)
pd_gamma = np.sum((data_dy*(data_x - data_mean)
*np.power((data_variance + EPSLON), (-0.5))),
param_axis, keepdims=True)
pd_beta = np.sum(data_dy, param_axis, keepdims=True)
Parameters
----------
input_dy : dict
shape and dtype of input dy, only support float16, float32
input_x: dict
shape and dtype of input x, only support float16, float32
input_variance: dict
shape and dtype of input variance, only support float16, float32
input_mean: dict
shape and dtype of input mean, only support float16, float32
input_gamma: dict
shape and dtype of input gamma, only support float16, float32
output_y: dict
shape and dtype of output, only support float16, float32
res_for_gamma: dict
shape and dtype of output, only support float16, float32
kernel_name: str
cce kernel name, default value is "layer_norm_x_backprop_v2"
Returns
-------
None | def layer_norm_x_backprop_v2(input_dy,
input_x,
input_variance,
input_mean,
input_gamma,
output_pd_x,
res_for_gamma,
kernel_name="layer_norm_x_backprop_v2"):
"""
algorithm: layernorm_grad
calculating: gradient of layernorm
compute partial derivation of x, gamma and beta
pd_xl = data_dy*data_gamma
pd_var = np.sum(((-0.5)*pd_xl*(data_x - data_mean)
*np.power((data_variance + EPSLON), (-1.5))),
reduce_axis, keepdims=True)
pd_mean = np.sum(((-1.0)*pd_xl
*np.power((data_variance + EPSLON), (-0.5))),
reduce_axis, keepdims=True)
+ pd_var*(1.0/m)
*np.sum(((-2.0)*(data_x - data_mean)),
reduce_axis, keepdims=True)
pd_x = pd_xl*np.power((data_variance + EPSLON), (-0.5))
+ pd_var*(2.0/m)*(data_x - data_mean) + pd_mean*(1.0/m)
pd_gamma = np.sum((data_dy*(data_x - data_mean)
*np.power((data_variance + EPSLON), (-0.5))),
param_axis, keepdims=True)
pd_beta = np.sum(data_dy, param_axis, keepdims=True)
Parameters
----------
input_dy : dict
shape and dtype of input dy, only support float16, float32
input_x: dict
shape and dtype of input x, only support float16, float32
input_variance: dict
shape and dtype of input variance, only support float16, float32
input_mean: dict
shape and dtype of input mean, only support float16, float32
input_gamma: dict
shape and dtype of input gamma, only support float16, float32
output_y: dict
shape and dtype of output, only support float16, float32
res_for_gamma: dict
shape and dtype of output, only support float16, float32
kernel_name: str
cce kernel name, default value is "layer_norm_x_backprop_v2"
Returns
-------
None
"""
dtype = input_dy.get("dtype").lower()
shape_dy = input_dy.get("shape")
shape_x = input_x.get("shape")
shape_variance = input_variance.get("shape")
shape_mean = input_mean.get("shape")
shape_gamma = input_gamma.get("shape")
format_dy = input_dy.get("format")
global EPSLON
EPSLON = 1e-5 if dtype == "float16" else 1e-12
if layer_norm_x_backprop_v2_unify.is_special_cases(shape_dy, shape_variance, shape_gamma):
context = tbe_context.op_context.get_context()
if context is not None:
context.set_op_mode("static")
context.add_addition("is_static", True)
layer_norm_x_backprop_v2_unify.layer_norm_x_backprop_v2(input_dy, input_x, input_variance, input_mean,
input_gamma, output_pd_x, res_for_gamma,
kernel_name)
else:
with tbe_context.op_context.OpContext("static"):
tbe_context.op_context.get_context().add_addition("is_static", True)
layer_norm_x_backprop_v2_unify.layer_norm_x_backprop_v2(input_dy, input_x, input_variance, input_mean,
input_gamma, output_pd_x, res_for_gamma,
kernel_name)
return
else:
if format_dy.upper() == "FRACTAL_NZ":
param_nz = update_shape_nz(shape_x, shape_variance, shape_gamma)
data_dy = tvm.placeholder(param_nz.get("shape_x_nz"), name="data_dy", dtype=dtype)
data_x = tvm.placeholder(param_nz.get("shape_x_nz"), name="data_x", dtype=dtype)
data_variance = tvm.placeholder(param_nz.get("shape_var_nz"), name="data_variance", dtype=dtype)
data_mean = tvm.placeholder(param_nz.get("shape_var_nz"), name="data_mean", dtype=dtype)
data_gamma = tvm.placeholder(param_nz.get("shape_gamma_nz"), name="data_gamma", dtype=dtype)
res_list = layer_norm_x_back_nz_compute(data_dy, data_x, data_variance, data_mean, data_gamma, param_nz)
tensor_list = [data_dy, data_x, data_variance, data_mean, data_gamma] + res_list
with tvm.target.cce():
sch = tbe.auto_schedule(res_list)
config = {"print_ir": False, "name": kernel_name, "tensor_list": tensor_list}
tbe.cce_build_code(sch, config)
else:
_check_params({
"shape_dy" : shape_dy,
"shape_x" : shape_x,
"shape_var" : shape_variance,
"shape_mean" : shape_mean,
"shape_gamma": shape_gamma,
"dtype" : dtype,
"kernel_name": kernel_name
})
shape_gamma = _update_gamma_shape(shape_x, shape_gamma)[0]
data_gm = _get_data_gm(
{
"shape_dy" : shape_dy,
"shape_x" : shape_x,
"shape_var" : shape_variance,
"shape_mean" : shape_mean,
"shape_gamma": shape_gamma
}, dtype)
res_list = layer_norm_x_backprop_v2_compute(data_gm[0], data_gm[1], data_gm[2], data_gm[3], data_gm[4],
output_pd_x)
with tvm.target.cce():
sch = tbe.auto_schedule(res_list)
tensor_list = list(data_gm) + list(res_list)
config = {"print_ir": False, "name": kernel_name, "tensor_list": tensor_list}
tbe.cce_build_code(sch, config) |
Update parameters by AdamWeightDecay op. | def _update_run_kernel(opt, clip_value, beta1, beta2, eps, lr, weight_decay,
param, m, v, gradient, decay_flags, optim_filter):
"""
Update parameters by AdamWeightDecay op.
"""
success = True
if optim_filter:
if decay_flags:
next_param = opt(param, m, v, lr, beta1, beta2, eps, weight_decay,
_cpu_div(P.Cast()(gradient, mstype.float16), clip_value))
else:
next_param = opt(param, m, v, lr, beta1, beta2, eps, 0.0,
_cpu_div(P.Cast()(gradient, mstype.float16), clip_value))
return F.depend(success, next_param)
return success |
Check the type of inputs. | def _check_param_value(beta1, beta2, eps, prim_name):
"""Check the type of inputs."""
validator.check_value_type("beta1", beta1, [float], prim_name)
validator.check_value_type("beta2", beta2, [float], prim_name)
validator.check_value_type("eps", eps, [float], prim_name)
validator.check_float_range(beta1, 0.0, 1.0, Rel.INC_NEITHER, "beta1", prim_name)
validator.check_float_range(beta2, 0.0, 1.0, Rel.INC_NEITHER, "beta2", prim_name)
validator.check_positive_float(eps, "eps", prim_name) |
Encode whitespaces to extra tokens in GPT-J.
>>> encode_whitespaces('a\n b\n c', 10, 10)
'a\n<|extratoken_10|>b\n<|extratoken_11|>c' | def encode_whitespaces(text, start_extra_id: int, max_len: int):
""" Encode whitespaces to extra tokens in GPT-J.
>>> encode_whitespaces('a\\n b\\n c', 10, 10)
'a\\n<|extratoken_10|>b\\n<|extratoken_11|>c'
"""
def push_acc_space(acc_len: int, text: str):
if acc_len == 0:
return text
if acc_len == 1:
return text + ' '
assert acc_len <= max_len, f'Max whitespace run length {max_len}, but found {acc_len}'
extra_id = start_extra_id - 2 + acc_len
extra_token = f'<|extratoken_{extra_id}|>'
return text + extra_token
acc_len = 0
res = ''
for ch in text:
if ch == ' ':
acc_len += 1
if acc_len == max_len:
res = push_acc_space(acc_len, res)
acc_len = 0
else:
res = push_acc_space(acc_len, res)
acc_len = 0
res = res + ch
res = push_acc_space(acc_len, res)
return res |
Decode the whitespace-encoded strings produced by encode_whitespace.
>>> text = 'a\n b\n c'
>>> s, l = 10, 10
>>> text == decode_whitespaces(encode_whitespaces(text, s, l), s, l)
True | def decode_whitespaces(text: str, start_extra_id: int, max_len: int):
""" Decode the whitespace-encoded strings produced by encode_whitespace.
>>> text = 'a\\n b\\n c'
>>> s, l = 10, 10
>>> text == decode_whitespaces(encode_whitespaces(text, s, l), s, l)
True
"""
for l in range(2, max_len + 1):
token_id = start_extra_id - 2 + l
token = f'<|extratoken_{token_id}|>'
text = text.replace(token, ' ' * l)
return text |
Generate position_id and attention_mask according to input_ids considering eod reset
Inputs:
input_ids: the input token ids
eod_id: the id for <EOD>
rank: the current rank
dis: the slice value for each rank
eod_reset: whether to open eod reset or not
returns:
input_ids: the input token ids
position_id: the position ids cosidering eod reset
attention_mask: the attention mask considering eod reset | def get_input_data_batch_slice_map(input_ids, eod_id, rank, dis, eod_reset):
"""
Generate position_id and attention_mask according to input_ids considering eod reset
Inputs:
input_ids: the input token ids
eod_id: the id for <EOD>
rank: the current rank
dis: the slice value for each rank
eod_reset: whether to open eod reset or not
returns:
input_ids: the input token ids
position_id: the position ids cosidering eod reset
attention_mask: the attention mask considering eod reset
"""
# rank = int(rank)
# input_ids = input_ids[rank * dis : (rank + 1) * dis]
if np.any(input_ids > 60000):
raise ValueError("==exceed error")
# print("===input_ids tpye: ", input_ids.dtype, flush=True)
if not eod_reset:
return input_ids
seq_length = input_ids.shape[1] - 1
# Initialize position_ids and attention_mask
batch_input_ids = deepcopy(input_ids)
batch_position_ids = np.ones((dis, seq_length))
batch_attention_mask = np.ones((dis, seq_length, seq_length))
# Loop through batches
for bs_i in range(len(input_ids)):
# Get normal position_ids and attention_mask
local_ids = input_ids[bs_i]
batch_attention_mask[bs_i] = np.tril(np.ones(shape=(seq_length, seq_length)))
batch_position_ids[bs_i] = np.arange(seq_length)
# Find eod_of_document
eod_index = batch_position_ids[bs_i, local_ids[:-1] == eod_id].astype(np.int32)
prev_index = 0
for i in range(eod_index.size):
# Reset position_ids and attention_mask considering EOD
index = eod_index[i]
batch_attention_mask[bs_i, (index + 1):, :(index + 1)] = 0
batch_position_ids[bs_i, (index + 1):] -= (index + 1 - prev_index)
prev_index = index + 1
return batch_input_ids, batch_position_ids, batch_attention_mask |
Create dataset
Inputs:
batch_size: batch size
data_path: path of your MindRecord files
device_num: total device number
rank: current rank id
drop: whether drop remainder
eod_reset: whether enable position reset and attention mask reset
eod_id: the id for <EOD>
column_name: the column name of the mindrecord file. Default is input_ids
epoch: The repeat times of the dataset
Returns:
dataset_restore: the dataset for training or evaluating | def create_dataset(batch_size, data_path, args_opt, device_num=1, rank=0, drop=True, full_batch=False,
data_start_index=0,
eod_reset=False, eod_id=50256, column_name='input_ids', epoch=1, num_samples=None,
train_and_eval=False, val_ratio=0):
"""
Create dataset
Inputs:
batch_size: batch size
data_path: path of your MindRecord files
device_num: total device number
rank: current rank id
drop: whether drop remainder
eod_reset: whether enable position reset and attention mask reset
eod_id: the id for <EOD>
column_name: the column name of the mindrecord file. Default is input_ids
epoch: The repeat times of the dataset
Returns:
dataset_restore: the dataset for training or evaluating
"""
ds.config.set_seed(1)
# Control the size of data queue in the consideration of the memory
ds.config.set_prefetch_size(1)
if full_batch:
# no need to slice from the inputs
rank = 0
dis = batch_size
else:
# Each card slice a small batch from the full batch
dis = int(batch_size / device_num)
if batch_size % device_num != 0:
raise ValueError(
f"batch size {batch_size} should be a multiple of device number {device_num}."
" You should change the args: per_batch_size."
)
skip_num = args_opt.has_trained_steps * dis
# skip_num = 0
num_parallel_workers = 4
train_data = get_code_data_train(data_path, args_opt, skip_num=(skip_num // num_parallel_workers))
if train_and_eval:
val_data = get_code_data_eval("/home/work/sfs/xx/data_valid",
args_opt) # TODO: set as current validation set path
else:
val_data = None
dataset_train = ds.GeneratorDataset(train_data, column_names=[column_name], num_samples=num_samples,
num_shards=device_num, shard_id=rank, shuffle=True,
num_parallel_workers=num_parallel_workers)
if train_and_eval:
dataset_val = ds.GeneratorDataset(val_data, column_names=[column_name], num_samples=num_samples,
num_shards=device_num, shard_id=rank, shuffle=True,
num_parallel_workers=num_parallel_workers)
else:
dataset_val = None
type_cast_op = C.TypeCast(mstype.int32)
type_cast_op_float = C.TypeCast(mstype.float16)
map_func = (lambda input_ids: get_input_data_batch_slice_map(input_ids, eod_id, rank, dis, eod_reset))
# If eod_reset enabled, another two inputs will be generated through input_ids
dataset_train = dataset_train.skip(skip_num)
if eod_reset:
dataset_train = dataset_train.batch(dis, drop_remainder=drop)
dataset_train = dataset_train.map(operations=map_func, input_columns=[column_name],
output_columns=[column_name, "position_id", "attention_mask"],
column_order=[column_name, "position_id", "attention_mask"])
dataset_train = dataset_train.map(input_columns="position_id", operations=type_cast_op)
dataset_train = dataset_train.map(input_columns="attention_mask", operations=type_cast_op_float)
else:
dataset_train = dataset_train.map(input_columns=[column_name], operations=type_cast_op)
dataset_train = dataset_train.batch(batch_size, drop_remainder=drop)
dataset_train = dataset_train.map(operations=map_func, input_columns=[column_name],
output_columns=[column_name])
dataset_train = dataset_train.map(input_columns=column_name, operations=type_cast_op)
dataset_train = dataset_train.repeat(epoch)
if dataset_val is not None:
if eod_reset:
dataset_val = dataset_val.batch(dis, drop_remainder=drop)
dataset_val = dataset_val.map(operations=map_func, input_columns=[column_name],
output_columns=[column_name, "position_id", "attention_mask"],
column_order=[column_name, "position_id", "attention_mask"])
dataset_val = dataset_val.map(input_columns="position_id", operations=type_cast_op)
dataset_val = dataset_val.map(input_columns="attention_mask", operations=type_cast_op_float)
else:
dataset_val = dataset_val.map(input_columns=[column_name], operations=type_cast_op)
dataset_val = dataset_val.batch(batch_size, drop_remainder=drop)
dataset_val = dataset_val.map(operations=map_func, input_columns=[column_name],
output_columns=[column_name])
dataset_val = dataset_val.map(input_columns=column_name, operations=type_cast_op)
return dataset_train, dataset_val |
Generate position_id and attention_mask according to input_ids considering eod reset
Inputs:
input_ids: the input token ids
loss_mask: the loss mask
eod_id: the id for <EOD>
rank: the current rank
dis: the slice value for each rank
eod_reset: whether to open eod reset or not
returns:
input_ids: the input token ids
position_id: the position ids cosidering eod reset
attention_mask: the attention mask considering eod reset
loss_mask: the loss mask considering prompt and eod reset | def get_input_data_batch_slice_map(input_ids, loss_mask, eod_id, rank, dis, eod_reset):
"""
Generate position_id and attention_mask according to input_ids considering eod reset
Inputs:
input_ids: the input token ids
loss_mask: the loss mask
eod_id: the id for <EOD>
rank: the current rank
dis: the slice value for each rank
eod_reset: whether to open eod reset or not
returns:
input_ids: the input token ids
position_id: the position ids cosidering eod reset
attention_mask: the attention mask considering eod reset
loss_mask: the loss mask considering prompt and eod reset
"""
# rank = int(rank)
# input_ids = input_ids[rank * dis : (rank + 1) * dis]
if np.any(input_ids > 60000):
raise ValueError("==exceed error")
# print("===input_ids tpye: ", input_ids.dtype, flush=True)
if not eod_reset:
return input_ids
seq_length = input_ids.shape[1] - 1
# Initialize position_ids and attention_mask
batch_input_ids = deepcopy(input_ids)
batch_position_ids = np.ones((dis, seq_length))
batch_attention_mask = np.ones((dis, seq_length, seq_length))
batch_loss_mask = deepcopy(loss_mask)
# Loop through batches
for bs_i in range(len(batch_input_ids)):
# Get normal position_ids and attention_mask
local_ids = batch_input_ids[bs_i]
batch_attention_mask[bs_i] = np.tril(np.ones(shape=(seq_length, seq_length)))
batch_position_ids[bs_i] = np.arange(seq_length)
# Find eod_of_document
eod_index = batch_position_ids[bs_i, local_ids[:-1] == eod_id].astype(np.int32)
prev_index = 0
for i in range(eod_index.size):
# Reset position_ids and attention_mask considering EOD
index = eod_index[i]
batch_attention_mask[bs_i, (index + 1):, :(index + 1)] = 0
batch_position_ids[bs_i, (index + 1):] -= (index + 1 - prev_index)
prev_index = index + 1
# print(f"===batch_loss_mask: {batch_loss_mask}, shape: {batch_loss_mask.shape}, nonzero: {batch_loss_mask.nonzero()}")
return batch_input_ids, batch_loss_mask, batch_position_ids, batch_attention_mask |
Create dataset
Inputs:
batch_size: batch size
data_path: path of your MindRecord files
device_num: total device number
rank: current rank id
drop: whether drop remainder
eod_reset: whether enable position reset and attention mask reset
eod_id: the id for <EOD>
column_name: the column name of the mindrecord file. Default is input_ids
epoch: The repeat times of the dataset
Returns:
dataset_restore: the dataset for training or evaluating | def create_dataset(batch_size, data_path, args_opt, device_num=1, rank=0, drop=True, full_batch=False,
data_start_index=0,
eod_reset=False, eod_id=50256, epoch=1, num_samples=None, train_and_eval=False, val_ratio=0):
"""
Create dataset
Inputs:
batch_size: batch size
data_path: path of your MindRecord files
device_num: total device number
rank: current rank id
drop: whether drop remainder
eod_reset: whether enable position reset and attention mask reset
eod_id: the id for <EOD>
column_name: the column name of the mindrecord file. Default is input_ids
epoch: The repeat times of the dataset
Returns:
dataset_restore: the dataset for training or evaluating
"""
ds.config.set_seed(1)
# Control the size of data queue in the consideration of the memory
ds.config.set_prefetch_size(1)
if full_batch:
# no need to slice from the inputs
rank = 0
dis = batch_size
else:
# Each card slice a small batch from the full batch
dis = int(batch_size / device_num)
if batch_size % device_num != 0:
raise ValueError(
f"batch size {batch_size} should be a multiple of device number {device_num}."
" You should change the args: per_batch_size."
)
# skip_num = args_opt.has_trained_steps * dis
# skip_num = 0
num_parallel_workers = 4
train_data = get_code_data(data_path, 'train', args_opt)
if train_and_eval:
val_data = get_code_data(data_path, 'val', args_opt)
else:
val_data = None
dataset_train = ds.GeneratorDataset(train_data, column_names=['input_ids', 'loss_mask'], num_samples=num_samples,
num_shards=device_num, shard_id=rank, shuffle=True,
num_parallel_workers=num_parallel_workers)
if train_and_eval:
dataset_val = ds.GeneratorDataset(val_data, column_names=['input_ids', 'loss_mask'], num_samples=num_samples,
num_shards=device_num, shard_id=rank, shuffle=True,
num_parallel_workers=num_parallel_workers)
else:
dataset_val = None
type_cast_op = C.TypeCast(mstype.int32)
type_cast_op_float = C.TypeCast(mstype.float16)
type_cast_op_float2 = C.TypeCast(mstype.float32)
map_func = (
lambda input_ids, loss_mask: get_input_data_batch_slice_map(input_ids, loss_mask, eod_id, rank, dis, eod_reset))
# If eod_reset enabled, another two inputs will be generated through input_ids
# dataset_train = dataset_train.skip(skip_num)
dataset_train = dataset_train.batch(dis, drop_remainder=drop)
dataset_train = dataset_train.map(operations=map_func, input_columns=["input_ids", "loss_mask"],
output_columns=["input_ids", "loss_mask", "position_id", "attention_mask"],
column_order=["input_ids", "loss_mask", "position_id", "attention_mask"])
dataset_train = dataset_train.map(input_columns="position_id", operations=type_cast_op)
dataset_train = dataset_train.map(input_columns="attention_mask", operations=type_cast_op_float)
dataset_train = dataset_train.map(input_columns="loss_mask", operations=type_cast_op_float2)
dataset_train = dataset_train.map(input_columns="input_ids", operations=type_cast_op)
dataset_train = dataset_train.repeat(epoch)
if dataset_val is not None:
dataset_val = dataset_val.batch(dis, drop_remainder=drop)
dataset_val = dataset_val.map(operations=map_func, input_columns=["input_ids", "loss_mask"],
output_columns=["input_ids", "loss_mask", "position_id", "attention_mask"],
column_order=["input_ids", "loss_mask", "position_id", "attention_mask"])
dataset_val = dataset_val.map(input_columns="position_id", operations=type_cast_op)
dataset_val = dataset_val.map(input_columns="attention_mask", operations=type_cast_op_float)
dataset_val = dataset_val.map(input_columns="loss_mask", operations=type_cast_op_float2)
dataset_val = dataset_val.map(input_columns="input_ids", operations=type_cast_op)
return dataset_train, dataset_val |
Get topk | def topk_fun(logits, topk=5):
"""Get topk"""
target_column = logits[0].tolist()
sorted_array = [(k, v) for k, v in enumerate(target_column)]
sorted_array.sort(key=lambda x: x[1], reverse=True)
topk_array = sorted_array[:topk]
index, value = zip(*topk_array)
index = np.array([index])
value = np.array([value])
return value, index |
Convert the log_probs to probability | def sampler(log_probs_revised, top_p, top_k_num, use_pynative=False):
"""Convert the log_probs to probability"""
if use_pynative:
logits = P.Pow()(np.e, Tensor(log_probs_revised, mstype.float32))
else:
logits = np.power(np.e, np.array(log_probs_revised, np.float32))
# If top_p is less than 1.0, use top_p sampling
if top_p < 1.0:
# Only consider the 5000 largest logits to reduce computation
if use_pynative:
sorted_logits, index = P.TopK(sorted=True)(logits, 5000)
index = index.asnumpy()
sorted_logits = sorted_logits.asnumpy()
else:
sorted_logits, index = topk_fun(logits, 5000)
index = index[0]
sorted_p = sorted_logits / sum(sorted_logits)
cumsum_p = np.cumsum(sorted_p, axis=1)
sorted_logits = sorted_logits[0]
cumsum_p = cumsum_p[0]
top_p_num = sum(cumsum_p < top_p) + 1
# Get the corresponding probs and indices
probs = sorted_logits[:top_p_num]
p_args = index[:top_p_num]
p = probs / sum(probs)
# if top_p is set to 1.0, use top_k sampling
else:
# Get the corresponding probs and indices
if use_pynative:
probs, p_args = P.TopK(sorted=True)(logits, top_k_num)
probs = probs.asnumpy()
p_args = p_args.asnumpy()
else:
probs, p_args = topk_fun(logits, top_k_num)
probs = probs[0]
p_args = p_args[0]
# Avoid rounding error
# if sum(probs) == 0:
# probs = np.array([1 / top_k_num for _ in range(top_k_num)])
p = probs / sum(probs)
return p, p_args |
Text generation
Inputs:
model: the model for inferencing
origin_inputs: the original inputs based on which the model will continue writing
config: inference configurations
Returns:
outputs: the ids for the generated text | def generate(model, origin_inputs, config, verbose=False):
"""
Text generation
Inputs:
model: the model for inferencing
origin_inputs: the original inputs based on which the model will continue writing
config: inference configurations
Returns:
outputs: the ids for the generated text
"""
# Get configurations for inference
frequency_penalty = config.frequency_penalty
presence_penalty = config.presence_penalty
top_p = config.top_p
top_k_num = config.top_k_num
temperature = config.temperature
max_generate_length = config.max_generate_length
seq_length = config.seq_length
end_token = config.end_token
use_pynative = config.use_pynative_op
vocab_embedding_vocab_size = (config.vocab_size // 1024 + 1) * 1024
_, valid_length = origin_inputs.shape
if verbose:
print("Original input shape", origin_inputs.shape)
# If target length exceeds seq_length, use seq_length instead
target_length = valid_length + max_generate_length
target_length = seq_length if target_length > seq_length else target_length
# A list of the frequency of each token
frequency_list = np.array([[0 for _ in range(vocab_embedding_vocab_size)]])
pad_length = seq_length - origin_inputs.shape[-1]
# Pad original inputs to seq_length
print("Original shape:", origin_inputs.shape)
input_ids = np.pad(origin_inputs, ((0, 0), (0, pad_length)),
'constant', constant_values=(end_token, end_token))
# print("input_ids is ", input_ids)
# A single loop generates one token, loop until reaching target seq_length or generating eod token
while valid_length < target_length:
inputs = Tensor(input_ids, mstype.int32)
# Indicate the exact token position
current_index = valid_length - 1 if valid_length - 1 > 0 else 0
current_index = Tensor([current_index], mstype.int32)
# Call a single inference
log_probs = model.predict(inputs, current_index)
# Get the revised log_probs considering frequency and presence penalty to eliminate duplicate in generated results
log_probs = log_probs.asnumpy().reshape(1, -1)
log_probs_revised = log_probs - frequency_list * \
frequency_penalty - (frequency_list > 0) * presence_penalty
log_probs_revised /= temperature
p, p_args = sampler(log_probs_revised, top_p, top_k_num, use_pynative)
# Random select a token as final output for this round
target_index = np.random.choice(len(p), p=p)
if verbose:
print("=== log_probs_revised is", log_probs_revised)
print("=== p:", p, "shape:", p.shape)
print("=== p_args:", p_args, "shape", p_args.shape)
print(f"=== Length {valid_length}, target index {target_index}, chosen token {p_args[target_index]}.")
# Stop judgment
if p_args[target_index] == end_token or valid_length == target_length - 1:
outputs = input_ids
if verbose:
print(
f"=== generation end, last token: {p_args[target_index]}")
break
# update frequency list
target = p_args[target_index]
frequency_list[0][target] = frequency_list[0][target] + 1
# Modify input_ids with newly generated token
input_ids[0][valid_length] = p_args[target_index]
valid_length += 1
# Return valid outputs out of padded outputs
length = np.sum(outputs != end_token)
outputs = outputs[0][:length]
return outputs |
Text generation for incremental inference
Inputs:
model: the model for inferencing
origin_inputs: the original inputs based on which the model will continue writing
config: inference configurations
Returns:
outputs: the ids for the generated text | def generate_increment(model, origin_inputs, config, verbose=False):
"""
Text generation for incremental inference
Inputs:
model: the model for inferencing
origin_inputs: the original inputs based on which the model will continue writing
config: inference configurations
Returns:
outputs: the ids for the generated text
"""
# Get configurations for inference
frequency_penalty = config.frequency_penalty
presence_penalty = config.presence_penalty
top_p = config.top_p
top_k_num = config.top_k_num
temperature = config.temperature
max_generate_length = config.max_generate_length
seq_length = config.seq_length
end_token = config.end_token
use_pynative = config.use_pynative_op
vocab_embedding_vocab_size = (config.vocab_size // 1024 + 1) * 1024
_, valid_length = origin_inputs.shape
# Init outputs with original inputs
outputs = [origin_inputs[0][i] for i in range(valid_length)]
# If target length exceeds seq_length, use seq_length instead
target_length = valid_length + max_generate_length
target_length = seq_length if target_length > seq_length else target_length
# A list of the frequency of each token
frequency_list = np.array([[0 for _ in range(vocab_embedding_vocab_size)]])
pad_length = seq_length - origin_inputs.shape[-1]
# Pad original inputs to seq_length
input_ids = np.pad(origin_inputs, ((0, 0), (0, pad_length)),
'constant', constant_values=(end_token, end_token))
print("input_ids is ", input_ids)
# Indicate the exact token position
current_index = valid_length - 1 if valid_length - 1 > 0 else 0
batch_valid_length = Tensor(np.array([current_index]), mstype.int32)
current_index = Tensor(np.array([current_index]), mstype.int32)
# For first graph, not_init should be false
init_true = Tensor([True], mstype.bool_)
init_false = Tensor([False], mstype.bool_)
init = init_false
# Claim the first graph
model.predict_network.add_flags_recursive(is_first_iteration=True)
# Call a single inference with input size of (bs, seq_length)
logits = model.predict(Tensor(input_ids, mstype.int32),
current_index, init, batch_valid_length)
# Claim the second graph and set not_init to true
init = init_true
model.predict_network.add_flags_recursive(is_first_iteration=False)
# A single loop generates one token, loop until reaching target seq_length or generating eod token
while valid_length < target_length:
# Reshape the output logits
logits = logits.asnumpy()
log_probs = logits.reshape(1, vocab_embedding_vocab_size)
# Get the revised log_probs considering frequency and presence penalty to eliminate duplicate in generated results
log_probs_revised = log_probs - frequency_list * \
frequency_penalty - (frequency_list > 0) * presence_penalty
log_probs_revised /= temperature
p, p_args = sampler(log_probs_revised, top_p, top_k_num, use_pynative)
# Random select a token as final output for this round
target_index = np.random.choice(len(p), p=p)
if verbose:
print("=== log_probs_revised is", log_probs_revised)
print("=== p:", p, "shape:", p.shape)
print("=== p_args:", p_args, "shape", p_args.shape)
print(f"=== Length {valid_length}, target index {target_index}, chosen token {p_args[target_index]}.")
# Stop judgment
if p_args[target_index] == end_token or valid_length == target_length - 1:
break
# Update frequency list
target = p_args[target_index]
frequency_list[0][target] = frequency_list[0][target] + 1
valid_length += 1
batch_valid_length = Tensor(np.array([valid_length - 1]), mstype.int32)
current_index = Tensor(np.array([0]), mstype.int32)
input_id = Tensor([[target]], mstype.int32)
# Update outputs with current generated token
outputs.append(int(target))
# Call a single inference with input size of (bs, 1)
logits = model.predict(input_id, current_index,
init, batch_valid_length)
# Return valid outputs out of padded outputs
return np.array(outputs) |
Get topk | def topk_fun(logits, topk=5):
"""Get topk"""
value = np.flip(np.sort(logits), axis=-1)[..., :topk]
index = np.flip(np.argsort(logits), axis=-1)[..., :topk]
return value, index |
Text generation for incremental inference
Inputs:
model: the model for inferencing
origin_inputs: the original inputs based on which the model will continue writing
config: inference configurations
Returns:
outputs: the ids for the generated text | def generate_increment(model, origin_inputs, origin_length, config, tokenizer, verbose=False):
"""
Text generation for incremental inference
Inputs:
model: the model for inferencing
origin_inputs: the original inputs based on which the model will continue writing
config: inference configurations
Returns:
outputs: the ids for the generated text
"""
# Get configurations for inference
frequency_penalty = config.frequency_penalty
presence_penalty = config.presence_penalty
top_p = config.top_p
top_k_num = config.top_k_num
temperature = config.temperature
max_generate_length = config.max_generate_length
seq_length = config.seq_length
end_token = config.end_token
use_pynative = config.use_pynative_op
vocab_embedding_vocab_size = (config.vocab_size // 1024 + 1) * 1024
batch_size, valid_length = origin_inputs.shape
# Init outputs with original inputs
outputs = [[origin_inputs[i][j] for j in range(valid_length)] for i in range(batch_size)]
output_codes = [[] for _ in range(batch_size)]
# If target length exceeds seq_length, use seq_length instead
target_lengths = [min(seq_length, l + max_generate_length) for l in origin_length]
valid_lengths = deepcopy(origin_length)
gen_end = [(l == -1) for l in origin_length]
# A list of the frequency of each token
frequency_list = np.zeros((batch_size, vocab_embedding_vocab_size))
pad_length = seq_length - origin_inputs.shape[-1]
# Pad original inputs to seq_length
input_ids = np.pad(origin_inputs, ((0, 0), (0, pad_length)),
'constant', constant_values=(end_token, end_token))
if verbose:
print("input_ids is ", input_ids)
# Indicate the exact token position
current_indexes = [max(l - 1, 0) for l in valid_lengths]
# batch_valid_length = Tensor(np.array([current_index for _ in range(batch_size)]), mstype.int32)
batch_valid_length = Tensor(np.array(current_indexes), mstype.int32)
current_indexes = Tensor(np.array([current_indexes[i] + i * seq_length for i in range(batch_size)]), mstype.int32)
# For first graph, not_init should be false
init_true = Tensor([True], mstype.bool_)
init_false = Tensor([False], mstype.bool_)
init = init_false
# Claim the first graph
model.predict_network.add_flags_recursive(is_first_iteration=True)
# Call a single inference with input size of (bs, seq_length)
logits = model.predict(Tensor(input_ids, mstype.int32),
current_indexes, init, batch_valid_length)
# Claim the second graph and set not_init to true
init = init_true
model.predict_network.add_flags_recursive(is_first_iteration=False)
comments_index = [2, ] # '#': 2, ' #': 1303
newline_index = [198, ] # '\n': 198
# A single loop generates one token, loop until reaching target seq_length or generating eod token
while not all(gen_end):
# Reshape the output logits
logits = logits.asnumpy()
log_probs = logits.reshape(batch_size, vocab_embedding_vocab_size)
# Get the revised log_probs considering frequency and presence penalty to eliminate duplicate in generated results
log_probs_revised = log_probs - frequency_list * frequency_penalty - (frequency_list > 0) * presence_penalty
log_probs_revised /= temperature
bad_words_index = [[] for _ in range(batch_size)]
p, p_args = sampler(log_probs_revised, top_p, top_k_num, use_pynative, bad_words_index=bad_words_index)
# Random select a token as final output for this round
target_index = np.zeros(batch_size, dtype=np.int64)
for i in range(batch_size):
target_index[i] = np.random.choice(len(p[i]), p=p[i])
if verbose:
print("=== p:", p, "shape:", p.shape)
print("=== p_args:", p_args, "shape", p_args.shape)
print(
f"=== Length {valid_lengths}, target index {target_index}, chosen token {p_args[np.arange(batch_size), target_index]}, generation end status {gen_end}.")
# Update frequency list
target = p_args[np.arange(batch_size), target_index]
frequency_list[np.arange(batch_size), target] = frequency_list[np.arange(batch_size), target] + 1
batch_valid_length = Tensor(np.array(valid_lengths), mstype.int32)
current_indexes = Tensor(np.arange(batch_size, dtype=np.int32), mstype.int32)
input_id = Tensor([target], mstype.int32).reshape(-1, 1)
# Update outputs with current generated token
for i in range(batch_size):
if not gen_end[i]:
if int(target[i]) == 50256:
gen_end[i] = True
else:
output_codes[i].append(int(target[i]))
outputs[i].append(int(target[i]))
valid_lengths[i] += 1
if valid_lengths[i] >= target_lengths[i]:
gen_end[i] = True
# Call a single inference with input size of (bs, 1)
logits = model.predict(input_id, current_indexes,
init, batch_valid_length)
return tokenizer.decode_code(output_codes) |
Get topk | def topk_fun(logits, topk=5):
"""Get topk"""
value = np.flip(np.sort(logits), axis=-1)[..., :topk]
index = np.flip(np.argsort(logits), axis=-1)[..., :topk]
return value, index |
Text generation for incremental inference
Inputs:
model: the model for inferencing
origin_inputs: the original inputs based on which the model will continue writing
config: inference configurations
Returns:
outputs: the ids for the generated text | def generate_increment(model, origin_inputs, origin_length, config, tokenizer, verbose=False):
"""
Text generation for incremental inference
Inputs:
model: the model for inferencing
origin_inputs: the original inputs based on which the model will continue writing
config: inference configurations
Returns:
outputs: the ids for the generated text
"""
# Get configurations for inference
frequency_penalty = config.frequency_penalty
presence_penalty = config.presence_penalty
top_p = config.top_p
top_k_num = config.top_k_num
temperature = config.temperature
max_generate_length = config.max_generate_length
seq_length = config.seq_length
end_token = config.end_token
use_pynative = config.use_pynative_op
vocab_embedding_vocab_size = (config.vocab_size // 1024 + 1) * 1024
batch_size, valid_length = origin_inputs.shape
# Init outputs with original inputs
outputs = [[origin_inputs[i][j] for j in range(valid_length)] for i in range(batch_size)]
output_codes = [[] for _ in range(batch_size)]
# If target length exceeds seq_length, use seq_length instead
target_lengths = [min(seq_length, l + max_generate_length) for l in origin_length]
valid_lengths = deepcopy(origin_length)
gen_end = [(l == -1) for l in origin_length]
# A list of the frequency of each token
frequency_list = np.zeros((batch_size, vocab_embedding_vocab_size))
pad_length = seq_length - origin_inputs.shape[-1]
# Pad original inputs to seq_length
input_ids = np.pad(origin_inputs, ((0, 0), (0, pad_length)),
'constant', constant_values=(end_token, end_token))
if verbose:
print("input_ids is ", input_ids)
# Indicate the exact token position
current_indexes = [max(l - 1, 0) for l in valid_lengths]
# batch_valid_length = Tensor(np.array([current_index for _ in range(batch_size)]), mstype.int32)
batch_valid_length = Tensor(np.array(current_indexes), mstype.int32)
current_indexes = Tensor(np.array([current_indexes[i] + i * seq_length for i in range(batch_size)]), mstype.int32)
# For first graph, not_init should be false
init_true = Tensor([True], mstype.bool_)
init_false = Tensor([False], mstype.bool_)
init = init_false
# Claim the first graph
model.predict_network.add_flags_recursive(is_first_iteration=True)
# Call a single inference with input size of (bs, seq_length)
logits = model.predict(Tensor(input_ids, mstype.int32),
current_indexes, init, batch_valid_length)
# Claim the second graph and set not_init to true
init = init_true
model.predict_network.add_flags_recursive(is_first_iteration=False)
comments_index = [2, ] # '#': 2, ' #': 1303
newline_index = [198, ] # '\n': 198
# A single loop generates one token, loop until reaching target seq_length or generating eod token
while not all(gen_end):
# Reshape the output logits
logits = logits.asnumpy()
log_probs = logits.reshape(batch_size, vocab_embedding_vocab_size)
# Get the revised log_probs considering frequency and presence penalty to eliminate duplicate in generated results
log_probs_revised = log_probs - frequency_list * frequency_penalty - (frequency_list > 0) * presence_penalty
bad_words_index = [[] for _ in range(batch_size)]
target_index = sampler(log_probs_revised, top_p, top_k_num, use_pynative, bad_words_index=bad_words_index)
if verbose:
print(f"=== Length {valid_lengths}, target index {target_index}, generation end status {gen_end}.")
# Update frequency list
target = target_index
frequency_list[np.arange(batch_size), target] = frequency_list[np.arange(batch_size), target] + 1
batch_valid_length = Tensor(np.array(valid_lengths), mstype.int32)
current_indexes = Tensor(np.arange(batch_size, dtype=np.int32), mstype.int32)
input_id = Tensor([target], mstype.int32).reshape(-1, 1)
# Update outputs with current generated token
for i in range(batch_size):
if not gen_end[i]:
if int(target[i]) == 50256:
gen_end[i] = True
else:
output_codes[i].append(int(target[i]))
outputs[i].append(int(target[i]))
valid_lengths[i] += 1
if valid_lengths[i] >= target_lengths[i]:
gen_end[i] = True
# Call a single inference with input size of (bs, 1)
logits = model.predict(input_id, current_indexes,
init, batch_valid_length)
return tokenizer.decode_code(output_codes) |
Checks whether the generated code text is finished. | def is_code_generation_finished(text: str):
"""
Checks whether the generated code text is finished.
"""
# end_words = ['\ndef', '\nclass', '\nif', '\n#', '\nprint', '<|endoftext|>']
end_words = ['\n}']
for w in end_words:
if w in text:
return True
return False |
Cleans up the generated code text. | def cleanup_text(text: str):
"""
Cleans up the generated code text.
"""
# end_words = ['\ndef', '\nclass', '\nif', '\n#', '\nprint', '<|endoftext|>']
end_words = ['\n}']
for w in end_words:
if text.endswith(w):
text = text[:-len(w)]
return text |
Cleans up the generated code text. | def truncate_text(text: str):
"""
Cleans up the generated code text.
"""
# end_words = ['\ndef', '\nclass', '\nif', '\n#', '\nprint', '<|endoftext|>']
end_words = ['\n}']
for w in end_words:
idx = text.find(w)
if idx != -1:
text = text[:idx] + w
# text = text[:idx]
return text |
Get topk | def topk_fun(logits, topk=5):
"""Get topk"""
# target_column = logits[0].tolist()
# sorted_array = [(k, v) for k, v in enumerate(target_column)]
# sorted_array.sort(key=lambda x: x[1], reverse=True)
# topk_array = sorted_array[:topk]
# index, value = zip(*topk_array)
# index = np.array([index])
# value = np.array([value])
value = np.flip(np.sort(logits), axis=-1)[..., :topk]
index = np.flip(np.argsort(logits), axis=-1)[..., :topk]
return value, index |
Text generation for incremental inference
Inputs:
model: the model for inferencing
origin_inputs: the original inputs based on which the model will continue writing
config: inference configurations
Returns:
outputs: the ids for the generated text | def generate_increment(model, origin_inputs, config, tokenizer, verbose=False):
"""
Text generation for incremental inference
Inputs:
model: the model for inferencing
origin_inputs: the original inputs based on which the model will continue writing
config: inference configurations
Returns:
outputs: the ids for the generated text
"""
# Get configurations for inference
frequency_penalty = config.frequency_penalty
presence_penalty = config.presence_penalty
top_p = config.top_p
top_k_num = config.top_k_num
temperature = config.temperature
max_generate_length = config.max_generate_length
seq_length = config.seq_length
end_token = config.end_token
use_pynative = config.use_pynative_op
vocab_embedding_vocab_size = (config.vocab_size // 1024 + 1) * 1024
batch_size, valid_length = origin_inputs.shape
# Init outputs with original inputs
# outputs = deepcopy(origin_inputs)
outputs = [[origin_inputs[i][j] for j in range(valid_length)] for i in range(batch_size)]
output_codes = ["" for _ in range(batch_size)]
# If target length exceeds seq_length, use seq_length instead
target_length = valid_length + max_generate_length
if verbose:
print("target_length was ", valid_length, " + ", max_generate_length, " = ", target_length)
target_length = seq_length if target_length > seq_length else target_length
if verbose:
print("target_length is ", target_length)
gen_end = [False for _ in range(batch_size)]
allow_comments_next = [True for _ in range(batch_size)]
allow_comments = [True for _ in range(batch_size)]
# A list of the frequency of each token
frequency_list = np.zeros((batch_size, vocab_embedding_vocab_size))
pad_length = seq_length - origin_inputs.shape[-1]
# Pad original inputs to seq_length
input_ids = np.pad(origin_inputs, ((0, 0), (0, pad_length)),
'constant', constant_values=(end_token, end_token))
if verbose:
print("input_ids is ", input_ids)
# Indicate the exact token position
current_index = valid_length - 1 if valid_length - 1 > 0 else 0
batch_valid_length = Tensor(np.array([current_index for _ in range(batch_size)]), mstype.int32)
current_index = Tensor(np.array([current_index + i * seq_length for i in range(batch_size)]), mstype.int32)
# For first graph, not_init should be false
init_true = Tensor([True], mstype.bool_)
init_false = Tensor([False], mstype.bool_)
init = init_false
# Claim the first graph
model.predict_network.add_flags_recursive(is_first_iteration=True)
# Call a single inference with input size of (bs, seq_length)
logits = model.predict(Tensor(input_ids, mstype.int32),
current_index, init, batch_valid_length)
# Claim the second graph and set not_init to true
init = init_true
model.predict_network.add_flags_recursive(is_first_iteration=False)
comments_index = [2, ] # '#': 2, ' #': 1303
newline_index = [198, ] # '\n': 198
# A single loop generates one token, loop until reaching target seq_length or generating eod token
while valid_length < target_length:
if all(gen_end):
break
# Reshape the output logits
logits = logits.asnumpy()
log_probs = logits.reshape(batch_size, vocab_embedding_vocab_size)
# Get the revised log_probs considering frequency and presence penalty to eliminate duplicate in generated results
log_probs_revised = log_probs - frequency_list * \
frequency_penalty - (frequency_list > 0) * presence_penalty
log_probs_revised /= temperature
bad_words_index = [[] for _ in range(batch_size)]
# for i in range(batch_size):
# if not allow_comments[i]:
# bad_words_index[i] += comments_index
p, p_args = sampler(log_probs_revised, top_p, top_k_num, use_pynative, bad_words_index=bad_words_index)
# Random select a token as final output for this round
target_index = np.zeros(batch_size, dtype=np.int64)
for i in range(batch_size):
target_index[i] = np.random.choice(len(p[i]), p=p[i])
if verbose:
# print("=== log_probs_revised is", log_probs_revised)
print("=== p:", p, "shape:", p.shape)
print("=== p_args:", p_args, "shape", p_args.shape)
print(
f"=== Length {valid_length}, target index {target_index}, chosen token {p_args[np.arange(batch_size), target_index]}, generation end status {gen_end}.")
# Update frequency list
target = p_args[np.arange(batch_size), target_index]
frequency_list[np.arange(batch_size), target] = frequency_list[np.arange(batch_size), target] + 1
batch_valid_length = Tensor(np.array([valid_length for _ in range(batch_size)]), mstype.int32)
current_index = Tensor(np.arange(batch_size, dtype=np.int32), mstype.int32)
input_id = Tensor([target], mstype.int32).reshape(-1, 1)
for i in range(batch_size):
if not gen_end[i]:
output_codes[i] += tokenizer.decode_code([int(target[i])])[0]
if is_code_generation_finished(output_codes[i]):
gen_end[i] = True
output_codes[i] = truncate_text(output_codes[i])
if output_codes[i].endswith('#'):
allow_comments_next[i] = False
elif output_codes[i].endswith('\n'):
allow_comments[i] = allow_comments_next[i]
allow_comments_next[i] = True
outputs[i].append(int(target[i]))
# Call a single inference with input size of (bs, 1)
logits = model.predict(input_id, current_index,
init, batch_valid_length)
valid_length += 1
return output_codes |
Default setting for the pipeline is: `(layer_id + offset) // (layers / pipeline_stage)`.
Args:
network(Cell) - Represents the transformer block
layer_id(int) - Means the layer index for the current module, counts from zero.
offset(int) - Means the layer_index needs a offset, if there are other modules in the net.
layers(int) - The total layers used for the model. | def set_parallel_configure_for_layer(
network, layer_id, offset, parallel_config, layers
):
r"""
Default setting for the pipeline is: `(layer_id + offset) // (layers / pipeline_stage)`.
Args:
network(Cell) - Represents the transformer block
layer_id(int) - Means the layer index for the current module, counts from zero.
offset(int) - Means the layer_index needs a offset, if there are other modules in the net.
layers(int) - The total layers used for the model.
"""
# Used for the pipeline's stages setting
# As the final layer is not included here, so we need to manually add here.
# original: if set two stages, layers on two stages will be [15, 16+1]
# with 1 added, the layers on two stages will be [16, 15 +1]
pp_dis = max(int((layers + 1) / parallel_config.pipeline_stage), 1)
# the pipeline stage must be in [0, parallel_config.pipeline_stage - 1]
pp_id = min((layer_id + offset) // pp_dis, parallel_config.pipeline_stage - 1)
network.pipeline_stage = pp_id
print(f"pipeline stage id is {pp_id}", flush=True)
# Used for optimizer's fusion tag
dis = max(int((layers + 1) / parallel_config.gradient_aggregation_group), 1)
if parallel_config.pipeline_stage > 1:
# we give the fusion in pipeline mode a fixed value, otherwise the performance may become worse.
network.set_comm_fusion(2)
else:
network.set_comm_fusion(int((layer_id + offset) / dis) + 1)
# Used for enabling recomputation of the block
if parallel_config.recompute:
network.recompute(recompute_slice_activation=True) |
Set config according to the mode | def set_parse(args_opt):
r"""
Set config according to the mode
"""
if args_opt.mode == "200B":
args_opt.embedding_size = 16384
args_opt.num_layers = 64
args_opt.num_heads = 128
if args_opt.per_batch_size == 0:
args_opt.per_batch_size = 1
args_opt.word_emb_dp = 0
if args_opt.run_type == "train":
args_opt.start_lr = 6e-5
args_opt.end_lr = 6e-6
args_opt.stage_num = 16
args_opt.micro_size = 32
args_opt.op_level_model_parallel_num = 16
if args_opt.optimizer_shard == 1:
args_opt.op_level_model_parallel_num = 8
elif args_opt.run_type == "predict":
args_opt.stage_num = 4
args_opt.micro_size = 1
args_opt.op_level_model_parallel_num = 16
if args_opt.optimizer_shard == 1:
args_opt.op_level_model_parallel_num = 8
elif args_opt.mode == "13B":
args_opt.embedding_size = 5120
args_opt.num_layers = 40
args_opt.num_heads = 40
args_opt.word_emb_dp = 0
args_opt.op_level_model_parallel_num = 8
if args_opt.run_type == "train":
args_opt.start_lr = 1e-4
args_opt.end_lr = 1e-6
# args_opt.start_lr = 5e-5
# args_opt.end_lr = 5e-7
args_opt.optimizer_shard = 1
args_opt.full_batch = args_opt.opt_offload
if args_opt.per_batch_size == 0:
args_opt.per_batch_size = 8
if args_opt.stage_num > 1:
args_opt.word_emb_dp = 0
elif args_opt.run_type == "predict":
args_opt.stage_num = 1
args_opt.micro_size = 1
if args_opt.per_batch_size == 0:
args_opt.per_batch_size = 1
elif args_opt.mode == "2.6B":
args_opt.embedding_size = 2560
args_opt.num_layers = 32
args_opt.num_heads = 32
args_opt.op_level_model_parallel_num = 8
if args_opt.run_type == "train":
args_opt.start_lr = 3e-6
# args_opt.start_lr = 1e-4
args_opt.end_lr = 1e-6
args_opt.optimizer_shard = 1
args_opt.full_batch = args_opt.opt_offload
if args_opt.per_batch_size == 0:
args_opt.per_batch_size = 16
if args_opt.stage_num > 1:
args_opt.word_emb_dp = 0
elif args_opt.run_type == "predict":
args_opt.stage_num = 1
args_opt.micro_size = 1
if args_opt.per_batch_size == 0:
args_opt.per_batch_size = 1
elif args_opt.mode == "base":
args_opt.embedding_size = 768
args_opt.num_layers = 12
args_opt.num_heads = 12
args_opt.op_level_model_parallel_num = 2
if args_opt.run_type == "train":
args_opt.start_lr = 4e-4
args_opt.end_lr = 1e-6
args_opt.optimizer_shard = 1
args_opt.warmup_step = 6000
args_opt.full_batch = args_opt.opt_offload
if args_opt.per_batch_size == 0:
args_opt.per_batch_size = 16
if args_opt.stage_num > 1:
args_opt.word_emb_dp = 0
elif args_opt.run_type == "predict":
args_opt.stage_num = 1
args_opt.micro_size = 1
if args_opt.per_batch_size == 0:
args_opt.per_batch_size = 1
elif args_opt.mode == "dev":
args_opt.embedding_size = 2048
args_opt.num_layers = 16
args_opt.num_heads = 16
args_opt.op_level_model_parallel_num = 4
if args_opt.run_type == "train":
args_opt.start_lr = 1e-4
args_opt.end_lr = 1e-6
args_opt.optimizer_shard = 1
args_opt.full_batch = args_opt.opt_offload
if args_opt.per_batch_size == 0:
args_opt.per_batch_size = 16
if args_opt.stage_num > 1:
args_opt.word_emb_dp = 0
elif args_opt.run_type == "predict":
args_opt.stage_num = 1
args_opt.micro_size = 1
if args_opt.per_batch_size == 0:
args_opt.per_batch_size = 1 |
Default setting for the pipeline is: `(layer_id + offset) // (layers / pipeline_stage)`.
Args:
network(Cell) - Represents the transformer block
layer_id(int) - Means the layer index for the current module, counts from zero.
offset(int) - Means the layer_index needs a offset, if there are other modules in the net.
layers(int) - The total layers used for the model. | def set_parallel_configure_for_layer(
network, layer_id, offset, parallel_config, layers
):
r"""
Default setting for the pipeline is: `(layer_id + offset) // (layers / pipeline_stage)`.
Args:
network(Cell) - Represents the transformer block
layer_id(int) - Means the layer index for the current module, counts from zero.
offset(int) - Means the layer_index needs a offset, if there are other modules in the net.
layers(int) - The total layers used for the model.
"""
# Used for the pipeline's stages setting
# As the final layer is not included here, so we need to manually add here.
# original: if set two stages, layers on two stages will be [15, 16+1]
# with 1 added, the layers on two stages will be [16, 15 +1]
pp_dis = max(int((layers + 1) / parallel_config.pipeline_stage), 1)
# the pipeline stage must be in [0, parallel_config.pipeline_stage - 1]
pp_id = min((layer_id + offset) // pp_dis, parallel_config.pipeline_stage - 1)
network.pipeline_stage = pp_id
print(f"pipeline stage id is {pp_id}", flush=True)
# Used for optimizer's fusion tag
dis = max(int((layers + 1) / parallel_config.gradient_aggregation_group), 1)
if parallel_config.pipeline_stage > 1:
# we give the fusion in pipeline mode a fixed value, otherwise the performance may become worse.
network.set_comm_fusion(2)
else:
network.set_comm_fusion(int((layer_id + offset) / dis) + 1)
# Used for enabling recomputation of the block
if parallel_config.recompute:
network.recompute(recompute_slice_activation=True) |
Clip gradients.
Inputs:
clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients. | def _clip_grad(clip_type, clip_value, grad):
"""
Clip gradients.
Inputs:
clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients.
"""
if clip_type not in [0, 1]:
return grad
dt = F.dtype(grad)
# 0 for clip_by_value and 1 for clip_by_norm
if clip_type == 0:
new_grad = C.clip_by_value(
grad,
F.cast(F.tuple_to_array((-clip_value,)), dt),
F.cast(F.tuple_to_array((clip_value,)), dt),
)
else:
new_grad = nn.ClipByNorm()(
grad, F.cast(F.tuple_to_array((clip_value,)), dt)
)
return new_grad |
Clip gradients.
Inputs:
clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients. | def _clip_grad(clip_type, clip_value, grad):
"""
Clip gradients.
Inputs:
clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients.
"""
if clip_type not in [0, 1]:
return grad
dt = F.dtype(grad)
# 0 for clip_by_value and 1 for clip_by_norm
if clip_type == 0:
new_grad = C.clip_by_value(
grad,
F.cast(F.tuple_to_array((-clip_value,)), dt),
F.cast(F.tuple_to_array((clip_value,)), dt),
)
else:
new_grad = nn.ClipByNorm()(
grad, F.cast(F.tuple_to_array((clip_value,)), dt)
)
return new_grad |
yield n sized chunks from list | def chunks(lst, n):
"""yield n sized chunks from list"""
for i in range(0, len(lst), n):
yield lst[i: i + n] |
package multiple files | def package_file(it, n):
"""package multiple files"""
stop = False
while not stop:
batch = []
for _ in range(n):
try:
batch.append(next(it))
except StopIteration:
stop = True
if not batch:
break
yield batch |
cleaning wikitext dataset | def clean_wikitext(string):
"""cleaning wikitext dataset"""
# contractions
string = string.replace("s '", "s'")
string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string)
# number separators
string = string.replace(" @-@ ", "-")
string = string.replace(" @,@ ", ",")
string = string.replace(" @.@ ", ".")
# punctuation
string = string.replace(" : ", ": ")
string = string.replace(" ; ", "; ")
string = string.replace(" . ", ". ")
string = string.replace(" ! ", "! ")
string = string.replace(" ? ", "? ")
string = string.replace(" , ", ", ")
# double brackets
string = re.sub(r"\(\s*([^\)]*?)\s*\)", r"(\1)", string)
string = re.sub(r"\[\s*([^\]]*?)\s*\]", r"[\1]", string)
string = re.sub(r"{\s*([^}]*?)\s*}", r"{\1}", string)
string = re.sub(r"\"\s*([^\"]*?)\s*\"", r'"\1"', string)
string = re.sub(r"'\s*([^']*?)\s*'", r"'\1'", string)
# miscellaneous
string = string.replace("= = = =", "====")
string = string.replace("= = =", "===")
string = string.replace("= =", "==")
string = string.replace(" " + chr(176) + " ", chr(176))
string = string.replace(" \n", "\n")
string = string.replace("\n ", "\n")
string = string.replace(" N ", " 1 ")
string = string.replace(" 's", "'s")
return string |
tokenize openwebtext dataset | def tokenize_openwebtext(tokenizer, iterator, seq_length, eot):
"""tokenize openwebtext dataset"""
for file_path in iterator:
if os.path.getsize(file_path) == 0:
continue
content = []
with open(file_path, "r", encoding="utf-8") as f:
for para in f.read().split("\n\n"):
if para:
tokenized_text = tokenizer.tokenize(para)
content += tokenizer.convert_tokens_to_ids(
tokenized_text
) + [eot]
for chunk in chunks(content, seq_length):
sample = {}
if len(chunk) == seq_length:
sample["input_ids"] = np.array(chunk, dtype=np.int32)
yield sample |
tokenize wikitext-2/wikitext-103 dataset | def tokenize_wiki(tokenizer, file_path, seq_length, eot):
"""tokenize wikitext-2/wikitext-103 dataset"""
content = []
with open(file_path, "r", encoding="utf-8") as f:
for para in clean_wikitext(f.read()).split("\n\n"):
if para and para.strip().startswith("=") is False:
tokenized_text = tokenizer.tokenize(para)
content += tokenizer.convert_tokens_to_ids(tokenized_text) + [
eot
]
for chunk in chunks(content, seq_length):
sample = {}
if len(chunk) == seq_length:
sample["input_ids"] = np.array(chunk, dtype=np.int32)
yield sample |
tokenize lambada dataset | def tokenize_lambada(tokenizer, file_path, seq_length, eot):
"""tokenize lambada dataset"""
content = []
with open(file_path, "r", encoding="utf-8") as f:
for line in f.readlines():
para = (
json.loads(line)["text"]
.replace("“", '"')
.replace("”", '"')
.strip()
.strip(".")
)
tokenized_text = tokenizer.tokenize(para)
content += tokenizer.convert_tokens_to_ids(tokenized_text) + [eot]
for chunk in chunks(content, seq_length):
sample = {}
if len(chunk) == seq_length:
sample["input_ids"] = np.array(chunk, dtype=np.int32)
yield sample |
task for each process | def task_unit(iterator, tokenizer, seq_length, eot, parallel_writer=True):
"""task for each process"""
p = current_process()
index = p.pid if p.pid else 0
item_iter = tokenize_openwebtext(tokenizer, iterator, seq_length, eot)
batch_size = 1024 # size of write batch
count = 0
while True:
data_batch = []
try:
for _ in range(batch_size):
data_batch.append(next(item_iter))
count += 1
writer.write_raw_data(data_batch, parallel_writer=parallel_writer)
print("Process {} transformed {} records.".format(index, count))
except StopIteration:
if data_batch:
writer.write_raw_data(
data_batch, parallel_writer=parallel_writer
)
print("Process {} transformed {} records.".format(index, count))
break |
Calculate the communication group of model parallel dim in one pipeline stage | def _get_model_parallel_group(mp):
"""
Calculate the communication group of model parallel dim in one pipeline stage
"""
rank = get_rank()
stage_nums = auto_parallel_context().get_pipeline_stages()
device_nums = get_group_size()
per_stage_device_nums = device_nums // stage_nums
stage_id = rank // per_stage_device_nums
local_stage_rank_id = rank % per_stage_device_nums
index = local_stage_rank_id // mp
group = range(0, mp)
rank_str_list = [str(x + index * mp + stage_id * per_stage_device_nums) for x in group]
rank_list_str = "-".join(rank_str_list)
rank_list = [x + index * mp + stage_id * per_stage_device_nums for x in group]
return rank_list, rank_list_str |
Calculate the communication group between all pipeline stages | def _get_pipeline_group():
"""
Calculate the communication group between all pipeline stages
"""
rank = get_rank()
stage_nums = auto_parallel_context().get_pipeline_stages()
device_nums = get_group_size()
per_stage_device_nums = device_nums // stage_nums
local_stage_rank_id = rank % per_stage_device_nums
group = range(0, stage_nums)
rank_list = [local_stage_rank_id + x * per_stage_device_nums for x in group]
rank_str_list = [str(local_stage_rank_id + x * per_stage_device_nums) for x in group]
rank_list_str = "-".join(rank_str_list)
return rank_list, rank_list_str |
Add inference params | def add_inference_params(opt):
"""Add inference params"""
opt.add_argument("--frequency_penalty",
type=float,
default=1.5,
help="coefficient for frequency_penalty")
opt.add_argument("--presence_penalty",
type=float,
default=0.3,
help="coefficient for presence_penalty")
opt.add_argument("--max_generate_length",
type=int,
default=2048,
help="the maximum number of generated token")
opt.add_argument("--top_k_num",
type=int,
default=3,
help="the number for top_k sampling")
opt.add_argument("--top_p",
type=float,
default=1.0,
help="top_p sampling threshold, enabled if less than 1.0")
opt.add_argument("--end_token",
type=int,
default=50256,
help="the token id for <end of document>")
opt.add_argument("--use_pynative_op",
type=int,
default=0,
help="Whether use pynative op for postproecess")
opt.add_argument("--use_past",
type=str,
default="true",
choices=["true", "false"],
help="Whether enable state reuse") |
Add training params | def add_training_params(opt):
"""Add training params"""
opt.add_argument("--seq_length",
type=int,
default=2048,
help="sequence length, default is 2048.")
opt.add_argument("--vocab_size",
type=int,
default=40000,
help="vocabulary size, default is 40000.")
opt.add_argument("--embedding_size",
type=int,
default=16384,
help="embedding table size, default is 16384.")
opt.add_argument("--num_layers",
type=int,
default=64,
help="total layers, default is 64.")
opt.add_argument("--num_heads",
type=int,
default=128,
help="head size, default is 128.")
opt.add_argument("--stage_num",
type=int,
default=1,
help="Pipeline stage num, default is 1.")
opt.add_argument("--micro_size",
type=int,
default=1,
help="Pipeline micro_size, default is 1.")
opt.add_argument("--eod_reset",
type=int,
default=1,
help="Enable eod mask, default is 1.")
opt.add_argument("--warmup_step",
type=int,
default=2000,
help="Warmup step, default is 2000.")
opt.add_argument("--decay_steps",
type=int,
default=200000,
help="Decay step, default is 200000.")
opt.add_argument("--optimizer",
type=str,
default="adam",
choices=["adam", "lamb"],
help="select which optimizer to be used, default adam")
opt.add_argument("--opt_offload",
type=int, default=0,
help="Enable optimizer status offload to host CPU, default is 0")
opt.add_argument("--use_moe",
type=int, default=0,
help="Use moe, default is 0")
opt.add_argument("--per_dp_dim_expert_num",
type=int, default=1,
help="Expert nums in one data parallel dim, only effective when applying moe, default is 1")
opt.add_argument("--eod_id",
type=int, default=50256,
help="The id of end of document")
opt.add_argument("--epoch_size",
type=int, default=1,
help="The training epoch")
opt.add_argument("--sink_size",
type=int, default=2,
help="The sink size of the training. default is 2")
opt.add_argument("--full_batch",
default=1, type=int,
help="Import the full size of a batch for each card, default is 1")
opt.add_argument("--optimizer_shard",
type=int,
default=1,
help="Enable optimizer parallel, default is 1")
opt.add_argument("--per_batch_size",
type=int,
default=0,
help="The batch size for each data parallel way. default 6")
opt.add_argument("--start_lr",
type=float,
default=5e-5,
help="The start learning rate. default 5e-5")
opt.add_argument("--dropout_rate",
type=float,
default=0.1,
help="The dropout rate. default 0.1")
opt.add_argument("--end_lr",
type=float,
default=1e-6,
help="The end learning rate. default 1e-6")
opt.add_argument("--op_level_model_parallel_num",
type=int,
default=8,
help="The model parallel way. default 8")
opt.add_argument("--word_emb_dp",
type=int, default=1,
choices=[0, 1],
help="Whether do data parallel in word embedding. default 1")
opt.add_argument("--gradient_aggregation_group",
type=int, default=4,
help="The gradient communication fusion group. default 4")
opt.add_argument("--data_column_name",
type=str, default="input_ids",
help="Column name of datasets") |
Add parameters about retrain. | def add_retrain_params(opt):
"""
Add parameters about retrain.
"""
opt.add_argument("--pre_trained",
type=str,
default=None,
help="Pretrained checkpoint path.")
opt.add_argument("--save_checkpoint_path",
type=str,
default=None,
help="Save checkpoint path.")
opt.add_argument("--save_checkpoint_obs_path",
type=str,
default=None,
help="Save checkpoint path on OBS.")
opt.add_argument("--keep_checkpoint_max",
type=int,
default=1,
help="Max checkpoint save number.")
opt.add_argument("--save_checkpoint_steps",
type=int,
default=2000,
help="Save checkpoint step number.")
opt.add_argument("--save_checkpoint",
type=ast.literal_eval,
default=False,
help="Whether save checkpoint in local disk.")
opt.add_argument("--ckpt_name_prefix",
type=str,
default="pangu",
help="Saving checkpoint name prefix.")
opt.add_argument("--has_trained_epoches",
type=int,
default=0,
help="Epoches has been trained before.")
opt.add_argument("--has_trained_steps",
type=int,
default=0,
help="Steps has been trained before.") |
train function for PanguAlpha | def get_args(inference=False):
"""train function for PanguAlpha"""
parser = argparse.ArgumentParser(description="PanguAlpha training")
parser.add_argument('--device_id',
type=int,
default=0,
help="Device id, default is 0.")
parser.add_argument("--device_num",
type=int,
default=128,
help="Use device nums, default is 128.")
parser.add_argument("--distribute",
type=str,
default="true",
choices=["true", "false"],
help="Run distribute, default is true.")
parser.add_argument("--load_ckpt_name",
type=str,
default=None,
help="checkpint file name.")
parser.add_argument("--load_ckpt_path",
type=str,
default=None,
help="checkpoint file path.")
parser.add_argument("--load_ckpt_epoch",
type=int,
default=None,
help="checkpoint epoch.")
parser.add_argument('--code_data',
type=str,
required=True,
help='Location of code data.')
parser.add_argument("--tb_dir",
type=str,
required=True,
help="Location of tensorboard log")
parser.add_argument("--language",
type=str,
default=None,
help="Language of task")
parser.add_argument("--part",
type=int,
default=None,
help="Part of task")
parser.add_argument('--eval_data_url',
required=False,
default=None,
help='Location of eval data.')
parser.add_argument('--train_url',
required=False,
default=None,
help='Location of training outputs.')
parser.add_argument("--run_type",
type=str,
default="predict",
choices=["train", "predict"],
help="The run type")
parser.add_argument("--mode",
type=str,
default="2.6B",
choices=["200B", "13B", "2.6B", "base", "dev", "self_define"],
help="The scale of the model parameters")
parser.add_argument("--device_target",
type=str,
default="Ascend",
choices=["Ascend", "GPU"],
help="The running device")
parser.add_argument("--strategy_load_ckpt_path",
type=str,
default="",
help="The training prallel strategy for the model.")
parser.add_argument("--tokenizer_path",
type=str,
default="./tokenizer_path",
help="The path where stores vocab and vocab model file")
parser.add_argument("--param_init_type",
type=str,
default="fp32",
help="The initialization type for parameters. Default fp32.")
parser.add_argument("--offline",
type=int,
default=1,
help="Running on cloud of not. Default 1.")
parser.add_argument("--export",
type=int,
default=0,
help="Whether export mindir for serving.")
parser.add_argument("--incremental_training",
type=int,
default=0,
help="Enable incremental training. Default 0.")
parser.add_argument("--train_and_eval_mode",
type=int,
default=0,
help="Enable evaling while training. Default 0.")
parser.add_argument("--eval_steps",
type=int,
default=10,
help="The eval step in train and eval mode. Default 10.")
parser.add_argument(
"--profiling",
type=int,
default=0,
help="Enable profiling. Default 0",
)
parser.add_argument(
"--micro_interleaved_size",
type=int,
default=1,
help="Enable MicroInterLeaved when micro_interleaved_size > 1. Default 1",
)
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="Temperature for inference. Default 1.0",
)
add_training_params(parser)
add_retrain_params(parser)
if inference:
add_inference_params(parser)
args_opt = parser.parse_args()
return args_opt |
Download the dataset from the obs.
src_data_url (Str): should be the dataset path in the obs
tgt_data_path (Str): the local dataset path
rank (Int): the current rank id | def download_data(src_data_url, tgt_data_path, rank):
"""
Download the dataset from the obs.
src_data_url (Str): should be the dataset path in the obs
tgt_data_path (Str): the local dataset path
rank (Int): the current rank id
"""
cache_url = tgt_data_path
EXEC_PATH = "/tmp"
if rank % 8 == 0:
import moxing as mox
print("Modify the time out from 300 to 30000")
print("begin download dataset", flush=True)
if not os.path.exists(cache_url):
os.makedirs(cache_url, exist_ok=True)
mox.file.copy_parallel(src_url=src_data_url, dst_url=cache_url)
print("Dataset download succeed!", flush=True)
f = open("%s/install.txt" % (EXEC_PATH), "w")
f.close()
# stop
while not os.path.exists("%s/install.txt" % (EXEC_PATH)):
time.sleep(1) |
Mindspore's fast gelu implementation. | def fast_gelu(x):
"""Mindspore's fast gelu implementation."""
if hasattr(torch._C, 'quick_gelu'):
return torch._C.quick_gelu(x)
return x / (1 + torch.exp(-1.702 * torch.abs(x))) * torch.exp(0.851 * (x - torch.abs(x))) |
Build masks and position id for left to right model. | def get_ltor_masks_and_position_ids(
data,
eod_token,
reset_position_ids,
reset_attention_mask,
):
"""Build masks and position id for left to right model."""
# Extract batch size and sequence length.
micro_batch_size, seq_length = data.size()
# Attention mask (lower triangular).
if reset_attention_mask:
att_mask_batch = micro_batch_size
else:
att_mask_batch = 1
attention_mask = torch.tril(
torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)
).view(att_mask_batch, 1, seq_length, seq_length)
# Position ids.
position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
position_ids = position_ids.unsqueeze(0).expand_as(data)
# We need to clone as the ids will be modifed based on batch index.
if reset_position_ids:
position_ids = position_ids.clone()
if reset_position_ids or reset_attention_mask:
# Loop through the batches:
for b in range(micro_batch_size):
# Find indecies where EOD token is.
eod_index = position_ids[b, data[b] == eod_token]
# Detach indecies from positions if going to modify positions.
if reset_position_ids:
eod_index = eod_index.clone()
# Loop through EOD indecies:
prev_index = 0
for j in range(eod_index.size()[0]):
i = eod_index[j]
# Mask attention loss.
if reset_attention_mask:
attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
# Reset positions.
if reset_position_ids:
position_ids[b, (i + 1) :] -= i + 1 - prev_index
prev_index = i + 1
# Convert attention mask to binary:
attention_mask = attention_mask < 0.5
return attention_mask, position_ids |
Generate batch from context tokens. | def get_batch(
context_tokens,
micro_batch_size,
eod_token,
reset_position_ids=False,
reset_attention_mask=False,
):
"""Generate batch from context tokens."""
tokens = context_tokens.view(micro_batch_size, -1).contiguous().cuda()
# Get the attention mask and postition ids.
attention_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
eod_token,
reset_position_ids,
reset_attention_mask,
)
return tokens, attention_mask, position_ids |
This function has been mostly taken from huggingface conversational
ai code at
https://medium.com/huggingface/how-to-build-a-state-of-the-art-
conversational-ai-with-transfer-learning-2d818ac26313 | def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
"""This function has been mostly taken from huggingface conversational
ai code at
https://medium.com/huggingface/how-to-build-a-state-of-the-art-
conversational-ai-with-transfer-learning-2d818ac26313"""
if top_k > 0:
# Remove all tokens with a probability less than the
# last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Cconvert to 1D
sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token
# above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
for i in range(sorted_indices.size(0)):
indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
logits[i][indices_to_remove] = filter_value
return logits |
Mindspore's fast gelu implementation. | def fast_gelu(x):
"""Mindspore's fast gelu implementation."""
return x / (1 + paddle.exp(-1.702 * paddle.abs(x))) * paddle.exp(0.851 * (x - paddle.abs(x))) |
Build masks and position id for left to right model. | def get_ltor_masks_and_position_ids(
data,
eod_token,
reset_position_ids,
reset_attention_mask,
):
"""Build masks and position id for left to right model."""
# Extract batch size and sequence length.
micro_batch_size, seq_length = data.shape
# Attention mask (lower triangular).
if reset_attention_mask:
att_mask_batch = micro_batch_size
else:
att_mask_batch = 1
attention_mask = paddle.tril(
paddle.ones((att_mask_batch, seq_length, seq_length))
).reshape([att_mask_batch, 1, seq_length, seq_length])
# Position ids.
position_ids = paddle.arange(seq_length, dtype="int64")
position_ids = position_ids.unsqueeze(0).expand_as(data)
# We need to clone as the ids will be modifed based on batch index.
if reset_position_ids:
position_ids = position_ids.clone()
if reset_position_ids or reset_attention_mask:
# Loop through the batches:
for b in range(micro_batch_size):
# Find indecies where EOD token is.
eod_index = position_ids[b, data[b] == eod_token]
# Detach indecies from positions if going to modify positions.
if reset_position_ids:
eod_index = eod_index.clone()
# Loop through EOD indecies:
prev_index = 0
for j in range(eod_index.shape[0]):
i = eod_index[j]
# Mask attention loss.
if reset_attention_mask:
attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
# Reset positions.
if reset_position_ids:
position_ids[b, (i + 1) :] -= i + 1 - prev_index
prev_index = i + 1
# Convert attention mask to binary:
attention_mask = attention_mask < 0.5
return attention_mask, position_ids |
Generate batch from context tokens. | def get_batch(
context_tokens,
micro_batch_size,
eod_token,
reset_position_ids=False,
reset_attention_mask=False,
):
"""Generate batch from context tokens."""
tokens = context_tokens.reshape([micro_batch_size, -1]).cuda()
# Get the attention mask and postition ids.
attention_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
eod_token,
reset_position_ids,
reset_attention_mask,
)
return tokens, attention_mask, position_ids |
This function has been mostly taken from huggingface conversational
ai code at
https://medium.com/huggingface/how-to-build-a-state-of-the-art-
conversational-ai-with-transfer-learning-2d818ac26313 | def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
"""This function has been mostly taken from huggingface conversational
ai code at
https://medium.com/huggingface/how-to-build-a-state-of-the-art-
conversational-ai-with-transfer-learning-2d818ac26313"""
if top_k > 0:
# Remove all tokens with a probability less than the
# last token of the top-k
indices_to_remove = logits < paddle.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Cconvert to 1D
sorted_logits, sorted_indices = paddle.sort(logits, descending=True, axis=-1)
cumulative_probs = paddle.cumsum(F.softmax(sorted_logits, axis=-1), axis=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token
# above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
for i in range(sorted_indices.shape[0]):
indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
logits[i][indices_to_remove] = filter_value
return logits |
Replace fp16 linear with quantized linear | def quantize(model, weight_bit_width, backend="torch"):
"""Replace fp16 linear with quantized linear"""
for i in range(len(model.language_model.transformer.layers) + 1):
if i == len(model.language_model.transformer.layers):
layer = model.language_model.transformer.topQueryLayer
else:
layer = model.language_model.transformer.layers[i]
if backend == "torch":
layer.attention.query = QuantizedLinear(
in_features=layer.attention.query.in_features,
out_features=layer.attention.query.out_features,
weight_bit_width=weight_bit_width,
weight=layer.attention.query.weight.to(torch.cuda.current_device()),
bias=layer.attention.query.bias.to(torch.cuda.current_device()),
params_dtype=torch.half,
device=layer.attention.query.weight.device,
)
layer.attention.value = QuantizedLinear(
in_features=layer.attention.value.in_features,
out_features=layer.attention.value.out_features,
weight_bit_width=weight_bit_width,
weight=layer.attention.value.weight.to(torch.cuda.current_device()),
bias=layer.attention.value.bias.to(torch.cuda.current_device()),
params_dtype=torch.half,
device=layer.attention.value.weight.device,
)
layer.attention.key = QuantizedLinear(
in_features=layer.attention.key.in_features,
out_features=layer.attention.key.out_features,
weight_bit_width=weight_bit_width,
weight=layer.attention.key.weight.to(torch.cuda.current_device()),
bias=layer.attention.key.bias.to(torch.cuda.current_device()),
params_dtype=torch.half,
device=layer.attention.key.weight.device,
)
layer.attention.dense = QuantizedLinear(
in_features=layer.attention.dense.in_features,
out_features=layer.attention.dense.out_features,
weight_bit_width=weight_bit_width,
weight=layer.attention.dense.weight.to(torch.cuda.current_device()),
bias=layer.attention.dense.bias.to(torch.cuda.current_device()),
params_dtype=torch.half,
device=layer.attention.dense.weight.device,
)
layer.mlp.dense_h_to_4h = QuantizedLinear(
in_features=layer.mlp.dense_h_to_4h.in_features,
out_features=layer.mlp.dense_h_to_4h.out_features,
weight_bit_width=weight_bit_width,
weight=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),
bias=layer.mlp.dense_h_to_4h.bias.to(torch.cuda.current_device()),
params_dtype=torch.half,
device=layer.mlp.dense_h_to_4h.weight.device,
)
layer.mlp.dense_4h_to_h = QuantizedLinear(
in_features=layer.mlp.dense_4h_to_h.in_features,
out_features=layer.mlp.dense_4h_to_h.out_features,
weight_bit_width=weight_bit_width,
weight=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),
bias=layer.mlp.dense_4h_to_h.bias.to(torch.cuda.current_device()),
params_dtype=torch.half,
device=layer.mlp.dense_4h_to_h.weight.device,
)
elif backend == "megatron":
layer.attention.query = QuantizedColumnParallelLinear(
weight_bit_width=weight_bit_width,
weight=layer.attention.query.weight.to(torch.cuda.current_device()),
bias=layer.attention.query.bias.to(torch.cuda.current_device()),
input_size=layer.attention.query.input_size,
output_size=layer.attention.query.output_size,
gather_output=False,
skip_init=True,
params_dtype=torch.half,
device=layer.attention.query.weight.device,
)
layer.attention.value = QuantizedColumnParallelLinear(
weight_bit_width=weight_bit_width,
weight=layer.attention.value.weight.to(torch.cuda.current_device()),
bias=layer.attention.value.bias.to(torch.cuda.current_device()),
input_size=layer.attention.value.input_size,
output_size=layer.attention.value.output_size,
gather_output=False,
skip_init=True,
params_dtype=torch.half,
device=layer.attention.value.weight.device,
)
layer.attention.key = QuantizedColumnParallelLinear(
weight_bit_width=weight_bit_width,
weight=layer.attention.key.weight.to(torch.cuda.current_device()),
bias=layer.attention.key.bias.to(torch.cuda.current_device()),
input_size=layer.attention.key.input_size,
output_size=layer.attention.key.output_size,
gather_output=False,
skip_init=True,
params_dtype=torch.half,
device=layer.attention.key.weight.device,
)
layer.attention.dense = QuantizedRowParallelLinear(
weight_bit_width=weight_bit_width,
weight=layer.attention.dense.weight.to(torch.cuda.current_device()),
bias=layer.attention.dense.bias.to(torch.cuda.current_device()),
input_size=layer.attention.dense.input_size,
output_size=layer.attention.dense.output_size,
input_is_parallel=False,
skip_init=True,
skip_bias_add=True,
params_dtype=torch.half,
device=layer.attention.dense.weight.device,
)
layer.mlp.dense_h_to_4h = QuantizedColumnParallelLinear(
weight_bit_width=weight_bit_width,
weight=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),
bias=layer.mlp.dense_h_to_4h.bias.to(torch.cuda.current_device()),
input_size=layer.mlp.dense_h_to_4h.input_size,
output_size=layer.mlp.dense_h_to_4h.output_size,
gather_output=False,
skip_init=True,
params_dtype=torch.half,
device=layer.mlp.dense_h_to_4h.weight.device,
)
layer.mlp.dense_4h_to_h = QuantizedRowParallelLinear(
weight_bit_width=weight_bit_width,
weight=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),
bias=layer.mlp.dense_4h_to_h.bias.to(torch.cuda.current_device()),
input_size=layer.mlp.dense_4h_to_h.input_size,
output_size=layer.mlp.dense_4h_to_h.output_size,
input_is_parallel=False,
skip_init=True,
params_dtype=torch.half,
device=layer.mlp.dense_4h_to_h.weight.device,
)
return model |
Replace fp16 linear with quantized linear | def quantize_oneflow(model, weight_bit_width):
"""Replace fp16 linear with quantized linear"""
for i in range(len(model.language_model.transformer.layers) + 1):
if i == len(model.language_model.transformer.layers):
layer = model.language_model.transformer.topQueryLayer
else:
layer = model.language_model.transformer.layers[i]
layer.attention.query = QuantizedLinear(
in_features=layer.attention.query.in_features,
out_features=layer.attention.query.out_features,
weight_bit_width=weight_bit_width,
weight=layer.attention.query.weight.to(torch.cuda.current_device()),
bias=layer.attention.query.bias.to(torch.cuda.current_device()),
params_dtype=torch.half,
device=layer.attention.query.weight.device,
)
layer.attention.value = QuantizedLinear(
in_features=layer.attention.value.in_features,
out_features=layer.attention.value.out_features,
weight_bit_width=weight_bit_width,
weight=layer.attention.value.weight.to(torch.cuda.current_device()),
bias=layer.attention.value.bias.to(torch.cuda.current_device()),
params_dtype=torch.half,
device=layer.attention.value.weight.device,
)
layer.attention.key = QuantizedLinear(
in_features=layer.attention.key.in_features,
out_features=layer.attention.key.out_features,
weight_bit_width=weight_bit_width,
weight=layer.attention.key.weight.to(torch.cuda.current_device()),
bias=layer.attention.key.bias.to(torch.cuda.current_device()),
params_dtype=torch.half,
device=layer.attention.key.weight.device,
)
layer.attention.dense = QuantizedLinear(
in_features=layer.attention.dense.in_features,
out_features=layer.attention.dense.out_features,
weight_bit_width=weight_bit_width,
weight=layer.attention.dense.weight.to(torch.cuda.current_device()),
bias=layer.attention.dense.bias.to(torch.cuda.current_device()),
params_dtype=torch.half,
device=layer.attention.dense.weight.device,
)
layer.mlp.dense_h_to_4h = QuantizedLinear(
in_features=layer.mlp.dense_h_to_4h.in_features,
out_features=layer.mlp.dense_h_to_4h.out_features,
weight_bit_width=weight_bit_width,
weight=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),
bias=layer.mlp.dense_h_to_4h.bias.to(torch.cuda.current_device()),
params_dtype=torch.half,
device=layer.mlp.dense_h_to_4h.weight.device,
)
layer.mlp.dense_4h_to_h = QuantizedLinear(
in_features=layer.mlp.dense_4h_to_h.in_features,
out_features=layer.mlp.dense_4h_to_h.out_features,
weight_bit_width=weight_bit_width,
weight=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),
bias=layer.mlp.dense_4h_to_h.bias.to(torch.cuda.current_device()),
params_dtype=torch.half,
device=layer.mlp.dense_4h_to_h.weight.device,
)
return model |
Encode whitespaces to extra tokens.
>>> encode_whitespaces('a\n b\n c', 10, 10)
'a\n<|extratoken_10|>b\n<|extratoken_11|>c' | def encode_whitespaces(text: str, start_extra_id: int, max_len: int):
""" Encode whitespaces to extra tokens.
>>> encode_whitespaces('a\\n b\\n c', 10, 10)
'a\\n<|extratoken_10|>b\\n<|extratoken_11|>c'
"""
for i in np.arange(max_len, 1, -1):
text = text.replace(" " * i, f"<|extratoken_{start_extra_id + i - 2}|>")
return text |
Decode the whitespace-encoded strings produced by encode_whitespace.
>>> text = 'a\n b\n c'
>>> s, l = 10, 10
>>> text == decode_whitespaces(encode_whitespaces(text, s, l), s, l)
True | def decode_whitespaces(text: str, start_extra_id: int, max_len: int):
""" Decode the whitespace-encoded strings produced by encode_whitespace.
>>> text = 'a\\n b\\n c'
>>> s, l = 10, 10
>>> text == decode_whitespaces(encode_whitespaces(text, s, l), s, l)
True
"""
for l in range(2, max_len + 1):
token_id = start_extra_id - 2 + l
token = f'<|extratoken_{token_id}|>'
text = text.replace(token, ' ' * l)
return text |
Mindspore's fast gelu implementation. | def fast_gelu(x):
"""Mindspore's fast gelu implementation."""
return x / (1 + torch.exp(-1.702 * torch.abs(x))) * torch.exp(0.851 * (x - torch.abs(x))) |
Build masks and position id for left to right model. | def get_ltor_masks_and_position_ids(
data,
eod_token,
reset_position_ids,
reset_attention_mask,
):
"""Build masks and position id for left to right model."""
# Extract batch size and sequence length.
micro_batch_size, seq_length = data.size()
# Attention mask (lower triangular).
if reset_attention_mask:
att_mask_batch = micro_batch_size
else:
att_mask_batch = 1
attention_mask = torch.tril(
torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)
).view(att_mask_batch, 1, seq_length, seq_length)
# Position ids.
position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
position_ids = position_ids.unsqueeze(0).expand_as(data)
# We need to clone as the ids will be modifed based on batch index.
if reset_position_ids:
position_ids = position_ids.clone()
if reset_position_ids or reset_attention_mask:
# Loop through the batches:
for b in range(micro_batch_size):
# Find indecies where EOD token is.
eod_index = position_ids[b, data[b] == eod_token]
# Detach indecies from positions if going to modify positions.
if reset_position_ids:
eod_index = eod_index.clone()
# Loop through EOD indecies:
prev_index = 0
for j in range(eod_index.size()[0]):
i = eod_index[j]
# Mask attention loss.
if reset_attention_mask:
attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
# Reset positions.
if reset_position_ids:
position_ids[b, (i + 1) :] -= i + 1 - prev_index
prev_index = i + 1
# Convert attention mask to binary:
attention_mask = attention_mask < 0.5
return attention_mask, position_ids |
Generate batch from context tokens. | def get_batch(
context_tokens,
micro_batch_size,
eod_token,
reset_position_ids=False,
reset_attention_mask=False,
):
"""Generate batch from context tokens."""
tokens = context_tokens.view(micro_batch_size, -1).contiguous().cuda()
# Get the attention mask and postition ids.
attention_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
eod_token,
reset_position_ids,
reset_attention_mask,
)
return tokens, attention_mask, position_ids |
This function has been mostly taken from huggingface conversational
ai code at
https://medium.com/huggingface/how-to-build-a-state-of-the-art-
conversational-ai-with-transfer-learning-2d818ac26313 | def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
"""This function has been mostly taken from huggingface conversational
ai code at
https://medium.com/huggingface/how-to-build-a-state-of-the-art-
conversational-ai-with-transfer-learning-2d818ac26313"""
if top_k > 0:
# Remove all tokens with a probability less than the
# last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Cconvert to 1D
sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token
# above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
for i in range(sorted_indices.size(0)):
indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
logits[i][indices_to_remove] = filter_value
return logits |
Build the model. | def model_provider(args):
"""Build the model."""
model = CodeGeeXModel(
args.hidden_size,
args.num_layers,
args.num_attention_heads,
args.padded_vocab_size,
args.max_position_embeddings
)
return model |
Build the model. | def model_provider(args):
"""Build the model."""
model = CodeGeeXModel(
args.hidden_size,
args.num_layers,
args.num_attention_heads,
args.padded_vocab_size,
args.max_position_embeddings
)
return model |
Build the model. | def model_provider(pre_process=True, post_process=True):
"""Build the model."""
print_rank_0("Building CodeGeeX model ...")
model = CodeGeeXModel(num_tokentypes=0,
parallel_output=False)
return model |
Code generation arguments. | def add_code_generation_args(parser):
"""Code generation arguments."""
group = parser.add_argument_group(title="code generation")
group.add_argument(
"--temperature",
type=float,
default=1.0,
help="Sampling temperature.",
)
group.add_argument(
"--greedy",
action="store_true",
default=False,
help="Use greedy sampling.",
)
group.add_argument(
"--top-p",
type=float,
default=0.0,
help="Top p sampling.",
)
group.add_argument(
"--top-k",
type=int,
default=0,
help="Top k sampling.",
)
group.add_argument(
"--out-seq-length",
type=int,
default=2048,
help="Size of the output generated text.",
)
group.add_argument(
"--recompute",
action="store_true",
help="During generation recompute all attention "
"instead of using previously computed keys/values.",
)
group.add_argument(
"--ws-encoding-start-id",
type=int,
default=10,
help="Start id for whitespace encoding",
)
group.add_argument(
"--ws-encoding-length",
type=int,
default=10,
help="Length of whitespace encoding",
)
group.add_argument(
"--n-generation",
type=int,
default=10,
)
group.add_argument(
"--eos-id",
type=int,
default=50256,
)
group.add_argument(
"--prompt-file",
type=str,
default="./test_prompt.txt",
)
group.add_argument(
"--perf-file",
type=str,
default="./perf_out.txt",
)
group.add_argument(
"--perf-trace",
type=str,
default="./perf_out.txt",
)
group.add_argument(
"--use-torch-profile",
action="store_true",
)
group.add_argument(
"--ln-fp32",
action="store_true",
)
group.add_argument(
'--bad-ids',
nargs="*",
type=int,
default=None,
help='Identify the type of programming language to generate',
)
group.add_argument(
"--quantize",
action="store_true",
)
return parser |
Build the model. | def model_provider(args):
"""Build the model."""
model = CodeGeeXModel(
args.hidden_size,
args.num_layers,
args.num_attention_heads,
args.padded_vocab_size,
args.max_position_embeddings
)
return model |
Build the model. | def model_provider(args):
"""Build the model."""
old_dtype = paddle.get_default_dtype()
paddle.set_default_dtype("float16")
model = CodeGeeXModel(
args.hidden_size,
args.num_layers,
args.num_attention_heads,
args.padded_vocab_size,
args.max_position_embeddings
)
model.language_model.embedding.word_embeddings.to(dtype="float32")
model.language_model.embedding.position_embeddings.to(dtype="float32")
model.language_model.topQueryEmbedding.top_query_embeddings.to(dtype="float32")
for i in model.language_model.transformer.layers:
i.input_layernorm.to(dtype="float32")
i.post_attention_layernorm.to(dtype="float32")
model.language_model.transformer.topQueryLayer.input_layernorm.to(dtype="float32")
model.language_model.transformer.topQueryLayer.post_attention_layernorm.to(dtype="float32")
model.language_model.transformer.final_layernorm.to(dtype="float32")
paddle.set_default_dtype(old_dtype)
return model |
If the ini file does not exist create it and add secret_key | def create_template_ini_file():
"""
If the ini file does not exist create it and add secret_key
"""
if not os.path.isfile(API_KEYS_LOCATION):
print('# Please create a file at {} and add your secret key'.format(API_KEYS_LOCATION))
print('# The format is:\n')
print('# [openai]')
print('# organization_id=<organization-id>')
print('# secret_key=<your secret key>\n')
print('# engine=<engine-id>')
sys.exit(1) |
Initialize openAI and shell mode | def initialize():
"""
Initialize openAI and shell mode
"""
global ENGINE
# Check if file at API_KEYS_LOCATION exists
create_template_ini_file()
config = configparser.ConfigParser()
config.read(API_KEYS_LOCATION)
openai.api_key = config['openai']['secret_key'].strip('"').strip("'")
openai.organization = config['openai']['organization_id'].strip('"').strip("'")
ENGINE = config['openai']['engine'].strip('"').strip("'")
prompt_config = {
'engine': ENGINE,
'temperature': TEMPERATURE,
'max_tokens': MAX_TOKENS,
'shell': SHELL,
'multi_turn': MULTI_TURN,
'token_count': 0
}
return PromptFile(PROMPT_CONTEXT.name, prompt_config) |
Check if the content contains sensitive content
Refer to https://beta.openai.com/docs/engines/content-filter for explanation | def is_sensitive_content(content):
"""
Check if the content contains sensitive content
Refer to https://beta.openai.com/docs/engines/content-filter for explanation
"""
if len(content) == 0:
return False
response = openai.Completion.create(
engine="content-filter-alpha",
prompt = "<|endoftext|>"+content+"\n--\nLabel:",
temperature=0,
max_tokens=1,
top_p=0,
logprobs=10
)
output_label = response["choices"][0]["text"]
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return (output_label != "0") |
uses the stdin to get user input
input is either treated as a command or as a Codex query
Returns: command result or context + input from stdin | def get_query(prompt_file):
"""
uses the stdin to get user input
input is either treated as a command or as a Codex query
Returns: command result or context + input from stdin
"""
# get input from terminal or stdin
if DEBUG_MODE:
entry = input("prompt: ") + '\n'
else:
entry = sys.stdin.read()
# first we check if the input is a command
command_result, prompt_file = get_command_result(entry, prompt_file)
# if input is not a command, then query Codex, otherwise exit command has been run successfully
if command_result == "":
return entry, prompt_file
else:
sys.exit(0) |
Checks if the input is a command and if so, executes it
Currently supported commands:
- start multi-turn
- stop multi-turn
- default context
- show context <n>
- view context
- save context
- clear context
- load context <filename>
- set engine <engine>
- set temperature <temperature>
- set max_tokens <max_tokens>
- set shell <shell>
Returns: command result or "" if no command matched | def get_command_result(input, prompt_file):
"""
Checks if the input is a command and if so, executes it
Currently supported commands:
- start multi-turn
- stop multi-turn
- default context
- show context <n>
- view context
- save context
- clear context
- load context <filename>
- set engine <engine>
- set temperature <temperature>
- set max_tokens <max_tokens>
- set shell <shell>
Returns: command result or "" if no command matched
"""
if prompt_file == None:
return "", None
config = prompt_file.config
# configuration setting commands
if input.__contains__("set"):
# set temperature <temperature>
if input.__contains__("temperature"):
input = input.split()
if len(input) == 4:
config['temperature'] = float(input[3])
prompt_file.set_config(config)
print("# Temperature set to " + str(config['temperature']))
return "config set", prompt_file
else:
return "", prompt_file
# set max_tokens <max_tokens>
elif input.__contains__("max_tokens"):
input = input.split()
if len(input) == 4:
config['max_tokens'] = int(input[3])
prompt_file.set_config(config)
print("# Max tokens set to " + str(config['max_tokens']))
return "config set", prompt_file
else:
return "", prompt_file
elif input.__contains__("shell"):
input = input.split()
if len(input) == 4:
config['shell'] = input[3]
prompt_file.set_config(config)
print("# Shell set to " + str(config['shell']))
return "config set", prompt_file
else:
return "", prompt_file
elif input.__contains__("engine"):
input = input.split()
if len(input) == 4:
config['engine'] = input[3]
prompt_file.set_config(config)
print("# Engine set to " + str(config['engine']))
return "config set", prompt_file
else:
return "", prompt_file
if input.__contains__("show config"):
prompt_file.show_config()
return "config shown", prompt_file
# multi turn/single turn commands
if input.__contains__("multi-turn"):
# start context
if input.__contains__("start"):
if config['multi_turn'] == 'off':
prompt_file.start_multi_turn()
return "multi turn mode on", prompt_file
return "multi turn mode on", prompt_file
# stop context
if input.__contains__("stop"):
prompt_file.stop_multi_turn()
return "multi turn mode off", prompt_file
# context file commands
if input.__contains__("context"):
if input.__contains__("default"):
prompt_file.default_context()
return "stopped context", prompt_file
# show context <n>
if input.__contains__("show"):
print('\n')
with open(prompt_file.file_name, 'r') as f:
lines = f.readlines()
lines = lines[6:] # skip headers
line_numbers = 0
if len(input.split()) > 3:
line_numbers = int(input.split()[3])
if line_numbers != 0:
for line in lines[-line_numbers:]:
print('\n# '+line, end='')
else:
print('\n# '.join(lines))
return "context shown", prompt_file
# edit context
if input.__contains__("view"):
# open the prompt file in text editor
if config['shell'] != 'powershell':
os.system('open {}'.format(prompt_file.file_path))
else:
os.system('start {}'.format(prompt_file.file_path))
return "context shown", prompt_file
# save context <filename>
if input.__contains__("save"):
# save the current prompt file to a new file
# if filename not specified use the current time (to avoid name conflicts)
filename = time.strftime("%Y-%m-%d_%H-%M-%S") + ".txt"
if len(input.split()) == 4:
filename = input.split()[3]
prompt_file.save_to(filename)
return "context saved", prompt_file
# clear context
if input.__contains__("clear"):
# temporary saving deleted prompt file
prompt_file.default_context()
return "unlearned interaction", prompt_file
# load context <filename>
if input.__contains__("load"):
# the input looks like # load context <filename>
# write everything from the file to the prompt file
input = input.split()
if len(input) == 4:
filename = input[3]
prompt_file.load_context(filename)
return "context loaded", prompt_file
print('\n#\tInvalid command format, did you specify which file to load?')
return "context loaded", prompt_file
return "", prompt_file |
creates random noise given a latent image and a seed.
optional arg skip can be used to skip and discard x number of noise generations for a given seed | def prepare_noise(latent_image, seed, noise_inds=None):
"""
creates random noise given a latent image and a seed.
optional arg skip can be used to skip and discard x number of noise generations for a given seed
"""
generator = torch.manual_seed(seed)
if noise_inds is None:
return torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
unique_inds, inverse = np.unique(noise_inds, return_inverse=True)
noises = []
for i in range(unique_inds[-1]+1):
noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
if i in unique_inds:
noises.append(noise)
noises = [noises[i] for i in inverse]
noises = torch.cat(noises, axis=0)
return noises |
ensures noise mask is of proper dimensions | def prepare_mask(noise_mask, shape, device):
"""ensures noise mask is of proper dimensions"""
noise_mask = torch.nn.functional.interpolate(noise_mask.reshape((-1, 1, noise_mask.shape[-2], noise_mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear")
noise_mask = torch.cat([noise_mask] * shape[1], dim=1)
noise_mask = comfy.utils.repeat_to_batch_size(noise_mask, shape[0])
noise_mask = noise_mask.to(device)
return noise_mask |
loads additional models in conditioning | def get_additional_models(conds, dtype):
"""loads additional models in conditioning"""
cnets = []
gligen = []
for k in conds:
cnets += get_models_from_cond(conds[k], "control")
gligen += get_models_from_cond(conds[k], "gligen")
control_nets = set(cnets)
inference_memory = 0
control_models = []
for m in control_nets:
control_models += m.get_models()
inference_memory += m.inference_memory_requirements(dtype)
gligen = [x[1] for x in gligen]
models = control_models + gligen
return models, inference_memory |
cleanup additional models that were loaded | def cleanup_additional_models(models):
"""cleanup additional models that were loaded"""
for m in models:
if hasattr(m, 'cleanup'):
m.cleanup() |
Create a wrapper function for the noise prediction model.
DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
We support four types of the diffusion model by setting `model_type`:
1. "noise": noise prediction model. (Trained by predicting noise).
2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
3. "v": velocity prediction model. (Trained by predicting the velocity).
The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
[1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
arXiv preprint arXiv:2202.00512 (2022).
[2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
arXiv preprint arXiv:2210.02303 (2022).
4. "score": marginal score function. (Trained by denoising score matching).
Note that the score function and the noise prediction model follows a simple relationship:
```
noise(x_t, t) = -sigma_t * score(x_t, t)
```
We support three types of guided sampling by DPMs by setting `guidance_type`:
1. "uncond": unconditional sampling by DPMs.
The input `model` has the following format:
``
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
``
2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
The input `model` has the following format:
``
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
``
The input `classifier_fn` has the following format:
``
classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
``
[3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
The input `model` has the following format:
``
model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
``
And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
[4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
arXiv preprint arXiv:2207.12598 (2022).
The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
or continuous-time labels (i.e. epsilon to T).
We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
``
def model_fn(x, t_continuous) -> noise:
t_input = get_model_input_time(t_continuous)
return noise_pred(model, x, t_input, **model_kwargs)
``
where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
===============================================================
Args:
model: A diffusion model with the corresponding format described above.
noise_schedule: A noise schedule object, such as NoiseScheduleVP.
model_type: A `str`. The parameterization type of the diffusion model.
"noise" or "x_start" or "v" or "score".
model_kwargs: A `dict`. A dict for the other inputs of the model function.
guidance_type: A `str`. The type of the guidance for sampling.
"uncond" or "classifier" or "classifier-free".
condition: A pytorch tensor. The condition for the guided sampling.
Only used for "classifier" or "classifier-free" guidance type.
unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
Only used for "classifier-free" guidance type.
guidance_scale: A `float`. The scale for the guided sampling.
classifier_fn: A classifier function. Only used for the classifier guidance.
classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
Returns:
A noise prediction model that accepts the noised data and the continuous time as the inputs. | def model_wrapper(
model,
noise_schedule,
model_type="noise",
model_kwargs={},
guidance_type="uncond",
condition=None,
unconditional_condition=None,
guidance_scale=1.,
classifier_fn=None,
classifier_kwargs={},
):
"""Create a wrapper function for the noise prediction model.
DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
We support four types of the diffusion model by setting `model_type`:
1. "noise": noise prediction model. (Trained by predicting noise).
2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
3. "v": velocity prediction model. (Trained by predicting the velocity).
The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
[1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
arXiv preprint arXiv:2202.00512 (2022).
[2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
arXiv preprint arXiv:2210.02303 (2022).
4. "score": marginal score function. (Trained by denoising score matching).
Note that the score function and the noise prediction model follows a simple relationship:
```
noise(x_t, t) = -sigma_t * score(x_t, t)
```
We support three types of guided sampling by DPMs by setting `guidance_type`:
1. "uncond": unconditional sampling by DPMs.
The input `model` has the following format:
``
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
``
2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
The input `model` has the following format:
``
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
``
The input `classifier_fn` has the following format:
``
classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
``
[3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
The input `model` has the following format:
``
model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
``
And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
[4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
arXiv preprint arXiv:2207.12598 (2022).
The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
or continuous-time labels (i.e. epsilon to T).
We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
``
def model_fn(x, t_continuous) -> noise:
t_input = get_model_input_time(t_continuous)
return noise_pred(model, x, t_input, **model_kwargs)
``
where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
===============================================================
Args:
model: A diffusion model with the corresponding format described above.
noise_schedule: A noise schedule object, such as NoiseScheduleVP.
model_type: A `str`. The parameterization type of the diffusion model.
"noise" or "x_start" or "v" or "score".
model_kwargs: A `dict`. A dict for the other inputs of the model function.
guidance_type: A `str`. The type of the guidance for sampling.
"uncond" or "classifier" or "classifier-free".
condition: A pytorch tensor. The condition for the guided sampling.
Only used for "classifier" or "classifier-free" guidance type.
unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
Only used for "classifier-free" guidance type.
guidance_scale: A `float`. The scale for the guided sampling.
classifier_fn: A classifier function. Only used for the classifier guidance.
classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
Returns:
A noise prediction model that accepts the noised data and the continuous time as the inputs.
"""
def get_model_input_time(t_continuous):
"""
Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
For continuous-time DPMs, we just use `t_continuous`.
"""
if noise_schedule.schedule == 'discrete':
return (t_continuous - 1. / noise_schedule.total_N) * 1000.
else:
return t_continuous
def noise_pred_fn(x, t_continuous, cond=None):
if t_continuous.reshape((-1,)).shape[0] == 1:
t_continuous = t_continuous.expand((x.shape[0]))
t_input = get_model_input_time(t_continuous)
output = model(x, t_input, **model_kwargs)
if model_type == "noise":
return output
elif model_type == "x_start":
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
dims = x.dim()
return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
elif model_type == "v":
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
dims = x.dim()
return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
elif model_type == "score":
sigma_t = noise_schedule.marginal_std(t_continuous)
dims = x.dim()
return -expand_dims(sigma_t, dims) * output
def cond_grad_fn(x, t_input):
"""
Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
"""
with torch.enable_grad():
x_in = x.detach().requires_grad_(True)
log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
return torch.autograd.grad(log_prob.sum(), x_in)[0]
def model_fn(x, t_continuous):
"""
The noise predicition model function that is used for DPM-Solver.
"""
if t_continuous.reshape((-1,)).shape[0] == 1:
t_continuous = t_continuous.expand((x.shape[0]))
if guidance_type == "uncond":
return noise_pred_fn(x, t_continuous)
elif guidance_type == "classifier":
assert classifier_fn is not None
t_input = get_model_input_time(t_continuous)
cond_grad = cond_grad_fn(x, t_input)
sigma_t = noise_schedule.marginal_std(t_continuous)
noise = noise_pred_fn(x, t_continuous)
return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
elif guidance_type == "classifier-free":
if guidance_scale == 1. or unconditional_condition is None:
return noise_pred_fn(x, t_continuous, cond=condition)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t_continuous] * 2)
c_in = torch.cat([unconditional_condition, condition])
noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
return noise_uncond + guidance_scale * (noise - noise_uncond)
assert model_type in ["noise", "x_start", "v"]
assert guidance_type in ["uncond", "classifier", "classifier-free"]
return model_fn |
A piecewise linear function y = f(x), using xp and yp as keypoints.
We implement f(x) in a differentiable way (i.e. applicable for autograd).
The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
Args:
x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
yp: PyTorch tensor with shape [C, K].
Returns:
The function values f(x), with shape [N, C]. | def interpolate_fn(x, xp, yp):
"""
A piecewise linear function y = f(x), using xp and yp as keypoints.
We implement f(x) in a differentiable way (i.e. applicable for autograd).
The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
Args:
x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
yp: PyTorch tensor with shape [C, K].
Returns:
The function values f(x), with shape [N, C].
"""
N, K = x.shape[0], xp.shape[1]
all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
sorted_all_x, x_indices = torch.sort(all_x, dim=2)
x_idx = torch.argmin(x_indices, dim=2)
cand_start_idx = x_idx - 1
start_idx = torch.where(
torch.eq(x_idx, 0),
torch.tensor(1, device=x.device),
torch.where(
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
),
)
end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
start_idx2 = torch.where(
torch.eq(x_idx, 0),
torch.tensor(0, device=x.device),
torch.where(
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
),
)
y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
return cand |
Expand the tensor `v` to the dim `dims`.
Args:
`v`: a PyTorch tensor with shape [N].
`dim`: a `int`.
Returns:
a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. | def expand_dims(v, dims):
"""
Expand the tensor `v` to the dim `dims`.
Args:
`v`: a PyTorch tensor with shape [N].
`dim`: a `int`.
Returns:
a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
"""
return v[(...,) + (None,)*(dims - 1)] |
Constructs the noise schedule of Karras et al. (2022). | def get_sigmas_karras(n, sigma_min, sigma_max, rho=7., device='cpu'):
"""Constructs the noise schedule of Karras et al. (2022)."""
ramp = torch.linspace(0, 1, n, device=device)
min_inv_rho = sigma_min ** (1 / rho)
max_inv_rho = sigma_max ** (1 / rho)
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return append_zero(sigmas).to(device) |
Constructs an exponential noise schedule. | def get_sigmas_exponential(n, sigma_min, sigma_max, device='cpu'):
"""Constructs an exponential noise schedule."""
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), n, device=device).exp()
return append_zero(sigmas) |
Constructs an polynomial in log sigma noise schedule. | def get_sigmas_polyexponential(n, sigma_min, sigma_max, rho=1., device='cpu'):
"""Constructs an polynomial in log sigma noise schedule."""
ramp = torch.linspace(1, 0, n, device=device) ** rho
sigmas = torch.exp(ramp * (math.log(sigma_max) - math.log(sigma_min)) + math.log(sigma_min))
return append_zero(sigmas) |
Constructs a continuous VP noise schedule. | def get_sigmas_vp(n, beta_d=19.9, beta_min=0.1, eps_s=1e-3, device='cpu'):
"""Constructs a continuous VP noise schedule."""
t = torch.linspace(1, eps_s, n, device=device)
sigmas = torch.sqrt(torch.exp(beta_d * t ** 2 / 2 + beta_min * t) - 1)
return append_zero(sigmas) |
Converts a denoiser output to a Karras ODE derivative. | def to_d(x, sigma, denoised):
"""Converts a denoiser output to a Karras ODE derivative."""
return (x - denoised) / utils.append_dims(sigma, x.ndim) |
Calculates the noise level (sigma_down) to step down to and the amount
of noise to add (sigma_up) when doing an ancestral sampling step. | def get_ancestral_step(sigma_from, sigma_to, eta=1.):
"""Calculates the noise level (sigma_down) to step down to and the amount
of noise to add (sigma_up) when doing an ancestral sampling step."""
if not eta:
return sigma_to, 0.
sigma_up = min(sigma_to, eta * (sigma_to ** 2 * (sigma_from ** 2 - sigma_to ** 2) / sigma_from ** 2) ** 0.5)
sigma_down = (sigma_to ** 2 - sigma_up ** 2) ** 0.5
return sigma_down, sigma_up |
Implements Algorithm 2 (Euler steps) from Karras et al. (2022). | def sample_euler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
eps = torch.randn_like(x) * s_noise
x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
denoised = model(x, sigma_hat * s_in, **extra_args)
d = to_d(x, sigma_hat, denoised)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
dt = sigmas[i + 1] - sigma_hat
# Euler method
x = x + d * dt
return x |
Ancestral sampling with Euler method steps. | def sample_euler_ancestral(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
"""Ancestral sampling with Euler method steps."""
extra_args = {} if extra_args is None else extra_args
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
d = to_d(x, sigmas[i], denoised)
# Euler method
dt = sigma_down - sigmas[i]
x = x + d * dt
if sigmas[i + 1] > 0:
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
return x |
Implements Algorithm 2 (Heun steps) from Karras et al. (2022). | def sample_heun(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
eps = torch.randn_like(x) * s_noise
x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
denoised = model(x, sigma_hat * s_in, **extra_args)
d = to_d(x, sigma_hat, denoised)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
dt = sigmas[i + 1] - sigma_hat
if sigmas[i + 1] == 0:
# Euler method
x = x + d * dt
else:
# Heun's method
x_2 = x + d * dt
denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
d_prime = (d + d_2) / 2
x = x + d_prime * dt
return x |
A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022). | def sample_dpm_2(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
eps = torch.randn_like(x) * s_noise
x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
denoised = model(x, sigma_hat * s_in, **extra_args)
d = to_d(x, sigma_hat, denoised)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
if sigmas[i + 1] == 0:
# Euler method
dt = sigmas[i + 1] - sigma_hat
x = x + d * dt
else:
# DPM-Solver-2
sigma_mid = sigma_hat.log().lerp(sigmas[i + 1].log(), 0.5).exp()
dt_1 = sigma_mid - sigma_hat
dt_2 = sigmas[i + 1] - sigma_hat
x_2 = x + d * dt_1
denoised_2 = model(x_2, sigma_mid * s_in, **extra_args)
d_2 = to_d(x_2, sigma_mid, denoised_2)
x = x + d_2 * dt_2
return x |
Ancestral sampling with DPM-Solver second-order steps. | def sample_dpm_2_ancestral(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
"""Ancestral sampling with DPM-Solver second-order steps."""
extra_args = {} if extra_args is None else extra_args
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
s_in = x.new_ones([x.shape[0]])
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
d = to_d(x, sigmas[i], denoised)
if sigma_down == 0:
# Euler method
dt = sigma_down - sigmas[i]
x = x + d * dt
else:
# DPM-Solver-2
sigma_mid = sigmas[i].log().lerp(sigma_down.log(), 0.5).exp()
dt_1 = sigma_mid - sigmas[i]
dt_2 = sigma_down - sigmas[i]
x_2 = x + d * dt_1
denoised_2 = model(x_2, sigma_mid * s_in, **extra_args)
d_2 = to_d(x_2, sigma_mid, denoised_2)
x = x + d_2 * dt_2
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
return x |
DPM-Solver-Fast (fixed step size). See https://arxiv.org/abs/2206.00927. | def sample_dpm_fast(model, x, sigma_min, sigma_max, n, extra_args=None, callback=None, disable=None, eta=0., s_noise=1., noise_sampler=None):
"""DPM-Solver-Fast (fixed step size). See https://arxiv.org/abs/2206.00927."""
if sigma_min <= 0 or sigma_max <= 0:
raise ValueError('sigma_min and sigma_max must not be 0')
with tqdm(total=n, disable=disable) as pbar:
dpm_solver = DPMSolver(model, extra_args, eps_callback=pbar.update)
if callback is not None:
dpm_solver.info_callback = lambda info: callback({'sigma': dpm_solver.sigma(info['t']), 'sigma_hat': dpm_solver.sigma(info['t_up']), **info})
return dpm_solver.dpm_solver_fast(x, dpm_solver.t(torch.tensor(sigma_max)), dpm_solver.t(torch.tensor(sigma_min)), n, eta, s_noise, noise_sampler) |
DPM-Solver-12 and 23 (adaptive step size). See https://arxiv.org/abs/2206.00927. | def sample_dpm_adaptive(model, x, sigma_min, sigma_max, extra_args=None, callback=None, disable=None, order=3, rtol=0.05, atol=0.0078, h_init=0.05, pcoeff=0., icoeff=1., dcoeff=0., accept_safety=0.81, eta=0., s_noise=1., noise_sampler=None, return_info=False):
"""DPM-Solver-12 and 23 (adaptive step size). See https://arxiv.org/abs/2206.00927."""
if sigma_min <= 0 or sigma_max <= 0:
raise ValueError('sigma_min and sigma_max must not be 0')
with tqdm(disable=disable) as pbar:
dpm_solver = DPMSolver(model, extra_args, eps_callback=pbar.update)
if callback is not None:
dpm_solver.info_callback = lambda info: callback({'sigma': dpm_solver.sigma(info['t']), 'sigma_hat': dpm_solver.sigma(info['t_up']), **info})
x, info = dpm_solver.dpm_solver_adaptive(x, dpm_solver.t(torch.tensor(sigma_max)), dpm_solver.t(torch.tensor(sigma_min)), order, rtol, atol, h_init, pcoeff, icoeff, dcoeff, accept_safety, eta, s_noise, noise_sampler)
if return_info:
return x, info
return x |
Ancestral sampling with DPM-Solver++(2S) second-order steps. | def sample_dpmpp_2s_ancestral(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
"""Ancestral sampling with DPM-Solver++(2S) second-order steps."""
extra_args = {} if extra_args is None else extra_args
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
s_in = x.new_ones([x.shape[0]])
sigma_fn = lambda t: t.neg().exp()
t_fn = lambda sigma: sigma.log().neg()
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
if sigma_down == 0:
# Euler method
d = to_d(x, sigmas[i], denoised)
dt = sigma_down - sigmas[i]
x = x + d * dt
else:
# DPM-Solver++(2S)
t, t_next = t_fn(sigmas[i]), t_fn(sigma_down)
r = 1 / 2
h = t_next - t
s = t + r * h
x_2 = (sigma_fn(s) / sigma_fn(t)) * x - (-h * r).expm1() * denoised
denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)
x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_2
# Noise addition
if sigmas[i + 1] > 0:
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
return x |
DPM-Solver++ (stochastic). | def sample_dpmpp_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=1 / 2):
"""DPM-Solver++ (stochastic)."""
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
seed = extra_args.get("seed", None)
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
sigma_fn = lambda t: t.neg().exp()
t_fn = lambda sigma: sigma.log().neg()
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
if sigmas[i + 1] == 0:
# Euler method
d = to_d(x, sigmas[i], denoised)
dt = sigmas[i + 1] - sigmas[i]
x = x + d * dt
else:
# DPM-Solver++
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
h = t_next - t
s = t + h * r
fac = 1 / (2 * r)
# Step 1
sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(s), eta)
s_ = t_fn(sd)
x_2 = (sigma_fn(s_) / sigma_fn(t)) * x - (t - s_).expm1() * denoised
x_2 = x_2 + noise_sampler(sigma_fn(t), sigma_fn(s)) * s_noise * su
denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)
# Step 2
sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(t_next), eta)
t_next_ = t_fn(sd)
denoised_d = (1 - fac) * denoised + fac * denoised_2
x = (sigma_fn(t_next_) / sigma_fn(t)) * x - (t - t_next_).expm1() * denoised_d
x = x + noise_sampler(sigma_fn(t), sigma_fn(t_next)) * s_noise * su
return x |
DPM-Solver++(2M). | def sample_dpmpp_2m(model, x, sigmas, extra_args=None, callback=None, disable=None):
"""DPM-Solver++(2M)."""
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
sigma_fn = lambda t: t.neg().exp()
t_fn = lambda sigma: sigma.log().neg()
old_denoised = None
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
h = t_next - t
if old_denoised is None or sigmas[i + 1] == 0:
x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised
else:
h_last = t - t_fn(sigmas[i - 1])
r = h_last / h
denoised_d = (1 + 1 / (2 * r)) * denoised - (1 / (2 * r)) * old_denoised
x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_d
old_denoised = denoised
return x |
DPM-Solver++(2M) SDE. | def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'):
"""DPM-Solver++(2M) SDE."""
if solver_type not in {'heun', 'midpoint'}:
raise ValueError('solver_type must be \'heun\' or \'midpoint\'')
seed = extra_args.get("seed", None)
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
old_denoised = None
h_last = None
h = None
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
if sigmas[i + 1] == 0:
# Denoising step
x = denoised
else:
# DPM-Solver++(2M) SDE
t, s = -sigmas[i].log(), -sigmas[i + 1].log()
h = s - t
eta_h = eta * h
x = sigmas[i + 1] / sigmas[i] * (-eta_h).exp() * x + (-h - eta_h).expm1().neg() * denoised
if old_denoised is not None:
r = h_last / h
if solver_type == 'heun':
x = x + ((-h - eta_h).expm1().neg() / (-h - eta_h) + 1) * (1 / r) * (denoised - old_denoised)
elif solver_type == 'midpoint':
x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)
if eta:
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise
old_denoised = denoised
h_last = h
return x |
DPM-Solver++(3M) SDE. | def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
"""DPM-Solver++(3M) SDE."""
seed = extra_args.get("seed", None)
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
denoised_1, denoised_2 = None, None
h, h_1, h_2 = None, None, None
for i in trange(len(sigmas) - 1, disable=disable):
denoised = model(x, sigmas[i] * s_in, **extra_args)
if callback is not None:
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
if sigmas[i + 1] == 0:
# Denoising step
x = denoised
else:
t, s = -sigmas[i].log(), -sigmas[i + 1].log()
h = s - t
h_eta = h * (eta + 1)
x = torch.exp(-h_eta) * x + (-h_eta).expm1().neg() * denoised
if h_2 is not None:
r0 = h_1 / h
r1 = h_2 / h
d1_0 = (denoised - denoised_1) / r0
d1_1 = (denoised_1 - denoised_2) / r1
d1 = d1_0 + (d1_0 - d1_1) * r0 / (r0 + r1)
d2 = (d1_0 - d1_1) / (r0 + r1)
phi_2 = h_eta.neg().expm1() / h_eta + 1
phi_3 = phi_2 / h_eta - 0.5
x = x + phi_2 * d1 - phi_3 * d2
elif h_1 is not None:
r = h_1 / h
d = (denoised - denoised_1) / r
phi_2 = h_eta.neg().expm1() / h_eta + 1
x = x + phi_2 * d
if eta:
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise
denoised_1, denoised_2 = denoised, denoised_1
h_1, h_2 = h, h_1
return x |
Apply passed in transforms for HuggingFace Datasets. | def hf_datasets_augs_helper(examples, transform, image_key, mode='RGB'):
"""Apply passed in transforms for HuggingFace Datasets."""
images = [transform(image.convert(mode)) for image in examples[image_key]]
return {image_key: images} |
Appends dimensions to the end of a tensor until it has target_dims dimensions. | def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
dims_to_append = target_dims - x.ndim
if dims_to_append < 0:
raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
expanded = x[(...,) + (None,) * dims_to_append]
# MPS will get inf values if it tries to index into the new axes, but detaching fixes this.
# https://github.com/pytorch/pytorch/issues/84364
return expanded.detach().clone() if expanded.device.type == 'mps' else expanded |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.