|
program(1.3) |
|
[buildInfo = dict<string, string>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.5.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})] |
|
{ |
|
func main<ios18>(tensor<int32, [1]> cache_length, tensor<fp16, [1, 448]> decoder_key_padding_mask, state<tensor<fp16, [4, 384, 1, 1536]>> encoder_attn_key_cache, state<tensor<fp16, [1, 1536]>> encoder_attn_key_padding_mask, state<tensor<fp16, [4, 384, 1, 1536]>> encoder_attn_value_cache, tensor<int32, [1]> input_ids, tensor<fp16, [1, 448]> kv_cache_update_mask, state<tensor<fp16, [4, 384, 1, 448]>> self_attn_key_cache, state<tensor<fp16, [4, 384, 1, 448]>> self_attn_value_cache) { |
|
int32 var_26_axis_0 = const()[name = string("op_26_axis_0"), val = int32(0)]; |
|
int32 var_26_batch_dims_0 = const()[name = string("op_26_batch_dims_0"), val = int32(0)]; |
|
bool var_26_validate_indices_0 = const()[name = string("op_26_validate_indices_0"), val = bool(false)]; |
|
tensor<fp16, [51864, 384]> embed_tokens_weight_to_fp16 = const()[name = string("embed_tokens_weight_to_fp16"), val = tensor<fp16, [51864, 384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))]; |
|
tensor<fp16, [1, 384]> var_26_cast_fp16 = gather(axis = var_26_axis_0, batch_dims = var_26_batch_dims_0, indices = input_ids, validate_indices = var_26_validate_indices_0, x = embed_tokens_weight_to_fp16)[name = string("op_26_cast_fp16")]; |
|
int32 var_30_axis_0 = const()[name = string("op_30_axis_0"), val = int32(0)]; |
|
int32 var_30_batch_dims_0 = const()[name = string("op_30_batch_dims_0"), val = int32(0)]; |
|
bool var_30_validate_indices_0 = const()[name = string("op_30_validate_indices_0"), val = bool(false)]; |
|
tensor<fp16, [448, 384]> embed_positions_weight_to_fp16 = const()[name = string("embed_positions_weight_to_fp16"), val = tensor<fp16, [448, 384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(39831680)))]; |
|
string cache_length_to_uint16_dtype_0 = const()[name = string("cache_length_to_uint16_dtype_0"), val = string("uint16")]; |
|
tensor<uint16, [1]> cache_length_to_uint16 = cast(dtype = cache_length_to_uint16_dtype_0, x = cache_length)[name = string("cast_71")]; |
|
tensor<fp16, [1, 384]> var_30_cast_fp16_cast_uint16 = gather(axis = var_30_axis_0, batch_dims = var_30_batch_dims_0, indices = cache_length_to_uint16, validate_indices = var_30_validate_indices_0, x = embed_positions_weight_to_fp16)[name = string("op_30_cast_fp16_cast_uint16")]; |
|
tensor<fp16, [1, 384]> hidden_states_1_cast_fp16 = add(x = var_26_cast_fp16, y = var_30_cast_fp16_cast_uint16)[name = string("hidden_states_1_cast_fp16")]; |
|
tensor<int32, [1]> var_44_axes_0 = const()[name = string("op_44_axes_0"), val = tensor<int32, [1]>([2])]; |
|
tensor<fp16, [1, 384, 1]> var_44_cast_fp16 = expand_dims(axes = var_44_axes_0, x = hidden_states_1_cast_fp16)[name = string("op_44_cast_fp16")]; |
|
tensor<int32, [1]> inputs_1_axes_0 = const()[name = string("inputs_1_axes_0"), val = tensor<int32, [1]>([3])]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_1_cast_fp16 = expand_dims(axes = inputs_1_axes_0, x = var_44_cast_fp16)[name = string("inputs_1_cast_fp16")]; |
|
tensor<fp16, [4, 384, 1, 448]> read_state_0 = read_state(input = self_attn_key_cache)[name = string("read_state_0")]; |
|
tensor<int32, [4]> tile_0 = const()[name = string("tile_0"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
int32 var_49_axis_0 = const()[name = string("op_49_axis_0"), val = int32(0)]; |
|
tensor<fp16, [1, 384, 1, 448]> var_49_cast_fp16_0, tensor<fp16, [1, 384, 1, 448]> var_49_cast_fp16_1, tensor<fp16, [1, 384, 1, 448]> var_49_cast_fp16_2, tensor<fp16, [1, 384, 1, 448]> var_49_cast_fp16_3 = split(axis = var_49_axis_0, split_sizes = tile_0, x = read_state_0)[name = string("op_49_cast_fp16")]; |
|
tensor<fp16, [4, 384, 1, 448]> read_state_1 = read_state(input = self_attn_value_cache)[name = string("read_state_1")]; |
|
tensor<int32, [4]> tile_1 = const()[name = string("tile_1"), val = tensor<int32, [4]>([1, 1, 1, 1])]; |
|
int32 var_56_axis_0 = const()[name = string("op_56_axis_0"), val = int32(0)]; |
|
tensor<fp16, [1, 384, 1, 448]> var_56_cast_fp16_0, tensor<fp16, [1, 384, 1, 448]> var_56_cast_fp16_1, tensor<fp16, [1, 384, 1, 448]> var_56_cast_fp16_2, tensor<fp16, [1, 384, 1, 448]> var_56_cast_fp16_3 = split(axis = var_56_axis_0, split_sizes = tile_1, x = read_state_1)[name = string("op_56_cast_fp16")]; |
|
tensor<fp16, [4, 384, 1, 1536]> read_state_2 = read_state(input = encoder_attn_key_cache)[name = string("read_state_2")]; |
|
tensor<int32, [4]> obj_17_begin_0 = const()[name = string("obj_17_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> obj_17_end_0 = const()[name = string("obj_17_end_0"), val = tensor<int32, [4]>([1, 384, 1, 1536])]; |
|
tensor<bool, [4]> obj_17_end_mask_0 = const()[name = string("obj_17_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])]; |
|
tensor<fp16, [1, 384, 1, 1536]> obj_17_cast_fp16 = slice_by_index(begin = obj_17_begin_0, end = obj_17_end_0, end_mask = obj_17_end_mask_0, x = read_state_2)[name = string("obj_17_cast_fp16")]; |
|
tensor<fp16, [4, 384, 1, 1536]> read_state_3 = read_state(input = encoder_attn_value_cache)[name = string("read_state_3")]; |
|
tensor<int32, [4]> obj_19_begin_0 = const()[name = string("obj_19_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> obj_19_end_0 = const()[name = string("obj_19_end_0"), val = tensor<int32, [4]>([1, 384, 1, 1536])]; |
|
tensor<bool, [4]> obj_19_end_mask_0 = const()[name = string("obj_19_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])]; |
|
tensor<fp16, [1, 384, 1, 1536]> obj_19_cast_fp16 = slice_by_index(begin = obj_19_begin_0, end = obj_19_end_0, end_mask = obj_19_end_mask_0, x = read_state_3)[name = string("obj_19_cast_fp16")]; |
|
int32 var_76 = const()[name = string("op_76"), val = int32(3)]; |
|
tensor<int32, [1]> out_1_axes_0 = const()[name = string("out_1_axes_0"), val = tensor<int32, [1]>([1])]; |
|
fp16 var_101_to_fp16 = const()[name = string("op_101_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_1_cast_fp16 = layer_norm(axes = out_1_axes_0, epsilon = var_101_to_fp16, x = inputs_1_cast_fp16)[name = string("out_1_cast_fp16")]; |
|
tensor<fp16, [384]> obj_5_mean_0_to_fp16 = const()[name = string("obj_5_mean_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(40175808)))]; |
|
tensor<fp16, [384]> obj_5_variance_0_to_fp16 = const()[name = string("obj_5_variance_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(40176640)))]; |
|
tensor<fp16, [384]> obj_5_gamma_0_to_fp16 = const()[name = string("obj_5_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(40177472)))]; |
|
tensor<fp16, [384]> obj_5_beta_0_to_fp16 = const()[name = string("obj_5_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(40178304)))]; |
|
fp16 obj_5_epsilon_0_to_fp16 = const()[name = string("obj_5_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_5_cast_fp16 = batch_norm(beta = obj_5_beta_0_to_fp16, epsilon = obj_5_epsilon_0_to_fp16, gamma = obj_5_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_1_cast_fp16)[name = string("obj_5_cast_fp16")]; |
|
string query_1_pad_type_0 = const()[name = string("query_1_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> query_1_strides_0 = const()[name = string("query_1_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_1_pad_0 = const()[name = string("query_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_1_dilations_0 = const()[name = string("query_1_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 query_1_groups_0 = const()[name = string("query_1_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(40179136)))]; |
|
tensor<fp16, [384]> layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(40474112)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_1_cast_fp16 = conv(bias = layers_0_self_attn_q_proj_bias_to_fp16, dilations = query_1_dilations_0, groups = query_1_groups_0, pad = query_1_pad_0, pad_type = query_1_pad_type_0, strides = query_1_strides_0, weight = layers_0_self_attn_q_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = string("query_1_cast_fp16")]; |
|
string current_key_1_pad_type_0 = const()[name = string("current_key_1_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> current_key_1_strides_0 = const()[name = string("current_key_1_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_key_1_pad_0 = const()[name = string("current_key_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_key_1_dilations_0 = const()[name = string("current_key_1_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 current_key_1_groups_0 = const()[name = string("current_key_1_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(40474944)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_key_1_cast_fp16 = conv(dilations = current_key_1_dilations_0, groups = current_key_1_groups_0, pad = current_key_1_pad_0, pad_type = current_key_1_pad_type_0, strides = current_key_1_strides_0, weight = layers_0_self_attn_k_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = string("current_key_1_cast_fp16")]; |
|
string current_value_1_pad_type_0 = const()[name = string("current_value_1_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> current_value_1_strides_0 = const()[name = string("current_value_1_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_value_1_pad_0 = const()[name = string("current_value_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_value_1_dilations_0 = const()[name = string("current_value_1_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 current_value_1_groups_0 = const()[name = string("current_value_1_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(40769920)))]; |
|
tensor<fp16, [384]> layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41064896)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations = current_value_1_dilations_0, groups = current_value_1_groups_0, pad = current_value_1_pad_0, pad_type = current_value_1_pad_type_0, strides = current_value_1_strides_0, weight = layers_0_self_attn_v_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = string("current_value_1_cast_fp16")]; |
|
tensor<int32, [1]> var_136_axes_0 = const()[name = string("op_136_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, [1, 1, 448]> var_136_cast_fp16 = expand_dims(axes = var_136_axes_0, x = kv_cache_update_mask)[name = string("op_136_cast_fp16")]; |
|
tensor<int32, [1]> var_137_axes_0 = const()[name = string("op_137_axes_0"), val = tensor<int32, [1]>([2])]; |
|
tensor<fp16, [1, 1, 1, 448]> var_137_cast_fp16 = expand_dims(axes = var_137_axes_0, x = var_136_cast_fp16)[name = string("op_137_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_139_cast_fp16 = mul(x = current_key_1_cast_fp16, y = var_137_cast_fp16)[name = string("op_139_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> key_1_cast_fp16 = add(x = var_49_cast_fp16_0, y = var_139_cast_fp16)[name = string("key_1_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_141_cast_fp16 = mul(x = current_value_1_cast_fp16, y = var_137_cast_fp16)[name = string("op_141_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> value_1_cast_fp16 = add(x = var_56_cast_fp16_0, y = var_141_cast_fp16)[name = string("value_1_cast_fp16")]; |
|
tensor<int32, [4]> var_144 = const()[name = string("op_144"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_1_cast_fp16 = reshape(shape = var_144, x = query_1_cast_fp16)[name = string("mh_q_1_cast_fp16")]; |
|
fp16 var_146_to_fp16 = const()[name = string("op_146_to_fp16"), val = fp16(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_147_cast_fp16 = mul(x = mh_q_1_cast_fp16, y = var_146_to_fp16)[name = string("op_147_cast_fp16")]; |
|
tensor<int32, [4]> var_148 = const()[name = string("op_148"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_149_cast_fp16 = reshape(shape = var_148, x = key_1_cast_fp16)[name = string("op_149_cast_fp16")]; |
|
bool mh_w_1_transpose_x_0 = const()[name = string("mh_w_1_transpose_x_0"), val = bool(true)]; |
|
bool mh_w_1_transpose_y_0 = const()[name = string("mh_w_1_transpose_y_0"), val = bool(false)]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_1_cast_fp16 = matmul(transpose_x = mh_w_1_transpose_x_0, transpose_y = mh_w_1_transpose_y_0, x = var_147_cast_fp16, y = var_149_cast_fp16)[name = string("mh_w_1_cast_fp16")]; |
|
tensor<int32, [1]> var_153_axes_0 = const()[name = string("op_153_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, [1, 1, 448]> var_153_cast_fp16 = expand_dims(axes = var_153_axes_0, x = decoder_key_padding_mask)[name = string("op_153_cast_fp16")]; |
|
tensor<int32, [1]> var_154_axes_0 = const()[name = string("op_154_axes_0"), val = tensor<int32, [1]>([2])]; |
|
tensor<fp16, [1, 1, 1, 448]> var_154_cast_fp16 = expand_dims(axes = var_154_axes_0, x = var_153_cast_fp16)[name = string("op_154_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_3_cast_fp16 = add(x = mh_w_1_cast_fp16, y = var_154_cast_fp16)[name = string("mh_w_3_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> var_157_cast_fp16 = softmax(axis = var_76, x = mh_w_3_cast_fp16)[name = string("op_157_cast_fp16")]; |
|
tensor<int32, [4]> var_158 = const()[name = string("op_158"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_159_cast_fp16 = reshape(shape = var_158, x = value_1_cast_fp16)[name = string("op_159_cast_fp16")]; |
|
bool attn_1_transpose_x_0 = const()[name = string("attn_1_transpose_x_0"), val = bool(false)]; |
|
bool attn_1_transpose_y_0 = const()[name = string("attn_1_transpose_y_0"), val = bool(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_159_cast_fp16, y = var_157_cast_fp16)[name = string("attn_1_cast_fp16")]; |
|
tensor<int32, [4]> var_162 = const()[name = string("op_162"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_1_cast_fp16 = reshape(shape = var_162, x = attn_1_cast_fp16)[name = string("input_1_cast_fp16")]; |
|
string obj_11_pad_type_0 = const()[name = string("obj_11_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> obj_11_strides_0 = const()[name = string("obj_11_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_11_pad_0 = const()[name = string("obj_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_11_dilations_0 = const()[name = string("obj_11_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 obj_11_groups_0 = const()[name = string("obj_11_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41065728)))]; |
|
tensor<fp16, [384]> layers_0_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_0_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41360704)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_11_cast_fp16 = conv(bias = layers_0_self_attn_o_proj_bias_to_fp16, dilations = obj_11_dilations_0, groups = obj_11_groups_0, pad = obj_11_pad_0, pad_type = obj_11_pad_type_0, strides = obj_11_strides_0, weight = layers_0_self_attn_o_proj_weight_to_fp16, x = input_1_cast_fp16)[name = string("obj_11_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_3_cast_fp16 = add(x = inputs_1_cast_fp16, y = obj_11_cast_fp16)[name = string("inputs_3_cast_fp16")]; |
|
tensor<int32, [1]> out_3_axes_0 = const()[name = string("out_3_axes_0"), val = tensor<int32, [1]>([1])]; |
|
fp16 var_184_to_fp16 = const()[name = string("op_184_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_3_cast_fp16 = layer_norm(axes = out_3_axes_0, epsilon = var_184_to_fp16, x = inputs_3_cast_fp16)[name = string("out_3_cast_fp16")]; |
|
tensor<fp16, [384]> obj_13_gamma_0_to_fp16 = const()[name = string("obj_13_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41361536)))]; |
|
tensor<fp16, [384]> obj_13_beta_0_to_fp16 = const()[name = string("obj_13_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41362368)))]; |
|
fp16 obj_13_epsilon_0_to_fp16 = const()[name = string("obj_13_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_13_cast_fp16 = batch_norm(beta = obj_13_beta_0_to_fp16, epsilon = obj_13_epsilon_0_to_fp16, gamma = obj_13_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_3_cast_fp16)[name = string("obj_13_cast_fp16")]; |
|
string query_3_pad_type_0 = const()[name = string("query_3_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> query_3_strides_0 = const()[name = string("query_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_3_pad_0 = const()[name = string("query_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_3_dilations_0 = const()[name = string("query_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 query_3_groups_0 = const()[name = string("query_3_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_0_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41363200)))]; |
|
tensor<fp16, [384]> layers_0_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_0_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41658176)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_3_cast_fp16 = conv(bias = layers_0_encoder_attn_q_proj_bias_to_fp16, dilations = query_3_dilations_0, groups = query_3_groups_0, pad = query_3_pad_0, pad_type = query_3_pad_type_0, strides = query_3_strides_0, weight = layers_0_encoder_attn_q_proj_weight_to_fp16, x = obj_13_cast_fp16)[name = string("query_3_cast_fp16")]; |
|
tensor<int32, [4]> var_204 = const()[name = string("op_204"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_3_cast_fp16 = reshape(shape = var_204, x = query_3_cast_fp16)[name = string("mh_q_3_cast_fp16")]; |
|
fp16 var_206_to_fp16 = const()[name = string("op_206_to_fp16"), val = fp16(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_207_cast_fp16 = mul(x = mh_q_3_cast_fp16, y = var_206_to_fp16)[name = string("op_207_cast_fp16")]; |
|
tensor<int32, [4]> var_208 = const()[name = string("op_208"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1536]> var_209_cast_fp16 = reshape(shape = var_208, x = obj_17_cast_fp16)[name = string("op_209_cast_fp16")]; |
|
bool mh_w_5_transpose_x_0 = const()[name = string("mh_w_5_transpose_x_0"), val = bool(true)]; |
|
bool mh_w_5_transpose_y_0 = const()[name = string("mh_w_5_transpose_y_0"), val = bool(false)]; |
|
tensor<fp16, [1, 6, 1, 1536]> mh_w_5_cast_fp16 = matmul(transpose_x = mh_w_5_transpose_x_0, transpose_y = mh_w_5_transpose_y_0, x = var_207_cast_fp16, y = var_209_cast_fp16)[name = string("mh_w_5_cast_fp16")]; |
|
tensor<fp16, [1, 1536]> read_state_4 = read_state(input = encoder_attn_key_padding_mask)[name = string("read_state_4")]; |
|
tensor<int32, [1]> var_213_axes_0 = const()[name = string("op_213_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, [1, 1, 1536]> var_213_cast_fp16 = expand_dims(axes = var_213_axes_0, x = read_state_4)[name = string("op_213_cast_fp16")]; |
|
tensor<int32, [1]> var_214_axes_0 = const()[name = string("op_214_axes_0"), val = tensor<int32, [1]>([2])]; |
|
tensor<fp16, [1, 1, 1, 1536]> var_214_cast_fp16 = expand_dims(axes = var_214_axes_0, x = var_213_cast_fp16)[name = string("op_214_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 1536]> mh_w_7_cast_fp16 = add(x = mh_w_5_cast_fp16, y = var_214_cast_fp16)[name = string("mh_w_7_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 1536]> obj_23_cast_fp16 = softmax(axis = var_76, x = mh_w_7_cast_fp16)[name = string("obj_23_cast_fp16")]; |
|
tensor<int32, [4]> var_218 = const()[name = string("op_218"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1536]> var_219_cast_fp16 = reshape(shape = var_218, x = obj_19_cast_fp16)[name = string("op_219_cast_fp16")]; |
|
bool attn_3_transpose_x_0 = const()[name = string("attn_3_transpose_x_0"), val = bool(false)]; |
|
bool attn_3_transpose_y_0 = const()[name = string("attn_3_transpose_y_0"), val = bool(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_219_cast_fp16, y = obj_23_cast_fp16)[name = string("attn_3_cast_fp16")]; |
|
tensor<int32, [4]> var_222 = const()[name = string("op_222"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_3_cast_fp16 = reshape(shape = var_222, x = attn_3_cast_fp16)[name = string("input_3_cast_fp16")]; |
|
string obj_21_pad_type_0 = const()[name = string("obj_21_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> obj_21_strides_0 = const()[name = string("obj_21_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_21_pad_0 = const()[name = string("obj_21_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_21_dilations_0 = const()[name = string("obj_21_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 obj_21_groups_0 = const()[name = string("obj_21_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_0_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_0_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41659008)))]; |
|
tensor<fp16, [384]> layers_0_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_0_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41953984)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_21_cast_fp16 = conv(bias = layers_0_encoder_attn_o_proj_bias_to_fp16, dilations = obj_21_dilations_0, groups = obj_21_groups_0, pad = obj_21_pad_0, pad_type = obj_21_pad_type_0, strides = obj_21_strides_0, weight = layers_0_encoder_attn_o_proj_weight_to_fp16, x = input_3_cast_fp16)[name = string("obj_21_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_5_cast_fp16 = add(x = inputs_3_cast_fp16, y = obj_21_cast_fp16)[name = string("inputs_5_cast_fp16")]; |
|
tensor<int32, [1]> out_5_axes_0 = const()[name = string("out_5_axes_0"), val = tensor<int32, [1]>([1])]; |
|
fp16 var_240_to_fp16 = const()[name = string("op_240_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_5_cast_fp16 = layer_norm(axes = out_5_axes_0, epsilon = var_240_to_fp16, x = inputs_5_cast_fp16)[name = string("out_5_cast_fp16")]; |
|
tensor<fp16, [384]> input_5_gamma_0_to_fp16 = const()[name = string("input_5_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41954816)))]; |
|
tensor<fp16, [384]> input_5_beta_0_to_fp16 = const()[name = string("input_5_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41955648)))]; |
|
fp16 input_5_epsilon_0_to_fp16 = const()[name = string("input_5_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> input_5_cast_fp16 = batch_norm(beta = input_5_beta_0_to_fp16, epsilon = input_5_epsilon_0_to_fp16, gamma = input_5_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_5_cast_fp16)[name = string("input_5_cast_fp16")]; |
|
string input_7_pad_type_0 = const()[name = string("input_7_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> input_7_strides_0 = const()[name = string("input_7_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> input_7_pad_0 = const()[name = string("input_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> input_7_dilations_0 = const()[name = string("input_7_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 input_7_groups_0 = const()[name = string("input_7_groups_0"), val = int32(1)]; |
|
tensor<fp16, [1536, 384, 1, 1]> layers_0_fc1_weight_to_fp16 = const()[name = string("layers_0_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(41956480)))]; |
|
tensor<fp16, [1536]> layers_0_fc1_bias_to_fp16 = const()[name = string("layers_0_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(43136192)))]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_7_cast_fp16 = conv(bias = layers_0_fc1_bias_to_fp16, dilations = input_7_dilations_0, groups = input_7_groups_0, pad = input_7_pad_0, pad_type = input_7_pad_type_0, strides = input_7_strides_0, weight = layers_0_fc1_weight_to_fp16, x = input_5_cast_fp16)[name = string("input_7_cast_fp16")]; |
|
string input_9_mode_0 = const()[name = string("input_9_mode_0"), val = string("EXACT")]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_9_cast_fp16 = gelu(mode = input_9_mode_0, x = input_7_cast_fp16)[name = string("input_9_cast_fp16")]; |
|
string hidden_states_3_pad_type_0 = const()[name = string("hidden_states_3_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> hidden_states_3_strides_0 = const()[name = string("hidden_states_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> hidden_states_3_pad_0 = const()[name = string("hidden_states_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> hidden_states_3_dilations_0 = const()[name = string("hidden_states_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 hidden_states_3_groups_0 = const()[name = string("hidden_states_3_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 1536, 1, 1]> layers_0_fc2_weight_to_fp16 = const()[name = string("layers_0_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(43139328)))]; |
|
tensor<fp16, [384]> layers_0_fc2_bias_to_fp16 = const()[name = string("layers_0_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(44319040)))]; |
|
tensor<fp16, [1, 384, 1, 1]> hidden_states_3_cast_fp16 = conv(bias = layers_0_fc2_bias_to_fp16, dilations = hidden_states_3_dilations_0, groups = hidden_states_3_groups_0, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = hidden_states_3_strides_0, weight = layers_0_fc2_weight_to_fp16, x = input_9_cast_fp16)[name = string("hidden_states_3_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_7_cast_fp16 = add(x = inputs_5_cast_fp16, y = hidden_states_3_cast_fp16)[name = string("inputs_7_cast_fp16")]; |
|
tensor<int32, [4]> obj_35_begin_0 = const()[name = string("obj_35_begin_0"), val = tensor<int32, [4]>([1, 0, 0, 0])]; |
|
tensor<int32, [4]> obj_35_end_0 = const()[name = string("obj_35_end_0"), val = tensor<int32, [4]>([2, 384, 1, 1536])]; |
|
tensor<bool, [4]> obj_35_end_mask_0 = const()[name = string("obj_35_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])]; |
|
tensor<fp16, [1, 384, 1, 1536]> obj_35_cast_fp16 = slice_by_index(begin = obj_35_begin_0, end = obj_35_end_0, end_mask = obj_35_end_mask_0, x = read_state_2)[name = string("obj_35_cast_fp16")]; |
|
tensor<int32, [4]> obj_37_begin_0 = const()[name = string("obj_37_begin_0"), val = tensor<int32, [4]>([1, 0, 0, 0])]; |
|
tensor<int32, [4]> obj_37_end_0 = const()[name = string("obj_37_end_0"), val = tensor<int32, [4]>([2, 384, 1, 1536])]; |
|
tensor<bool, [4]> obj_37_end_mask_0 = const()[name = string("obj_37_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])]; |
|
tensor<fp16, [1, 384, 1, 1536]> obj_37_cast_fp16 = slice_by_index(begin = obj_37_begin_0, end = obj_37_end_0, end_mask = obj_37_end_mask_0, x = read_state_3)[name = string("obj_37_cast_fp16")]; |
|
int32 var_285 = const()[name = string("op_285"), val = int32(3)]; |
|
tensor<int32, [1]> out_7_axes_0 = const()[name = string("out_7_axes_0"), val = tensor<int32, [1]>([1])]; |
|
fp16 var_310_to_fp16 = const()[name = string("op_310_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_7_cast_fp16 = layer_norm(axes = out_7_axes_0, epsilon = var_310_to_fp16, x = inputs_7_cast_fp16)[name = string("out_7_cast_fp16")]; |
|
tensor<fp16, [384]> obj_25_gamma_0_to_fp16 = const()[name = string("obj_25_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(44319872)))]; |
|
tensor<fp16, [384]> obj_25_beta_0_to_fp16 = const()[name = string("obj_25_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(44320704)))]; |
|
fp16 obj_25_epsilon_0_to_fp16 = const()[name = string("obj_25_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_25_cast_fp16 = batch_norm(beta = obj_25_beta_0_to_fp16, epsilon = obj_25_epsilon_0_to_fp16, gamma = obj_25_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_7_cast_fp16)[name = string("obj_25_cast_fp16")]; |
|
string query_5_pad_type_0 = const()[name = string("query_5_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> query_5_strides_0 = const()[name = string("query_5_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_5_pad_0 = const()[name = string("query_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_5_dilations_0 = const()[name = string("query_5_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 query_5_groups_0 = const()[name = string("query_5_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(44321536)))]; |
|
tensor<fp16, [384]> layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(44616512)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_5_cast_fp16 = conv(bias = layers_1_self_attn_q_proj_bias_to_fp16, dilations = query_5_dilations_0, groups = query_5_groups_0, pad = query_5_pad_0, pad_type = query_5_pad_type_0, strides = query_5_strides_0, weight = layers_1_self_attn_q_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = string("query_5_cast_fp16")]; |
|
string current_key_3_pad_type_0 = const()[name = string("current_key_3_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> current_key_3_strides_0 = const()[name = string("current_key_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_key_3_pad_0 = const()[name = string("current_key_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_key_3_dilations_0 = const()[name = string("current_key_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 current_key_3_groups_0 = const()[name = string("current_key_3_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(44617344)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_key_3_cast_fp16 = conv(dilations = current_key_3_dilations_0, groups = current_key_3_groups_0, pad = current_key_3_pad_0, pad_type = current_key_3_pad_type_0, strides = current_key_3_strides_0, weight = layers_1_self_attn_k_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = string("current_key_3_cast_fp16")]; |
|
string current_value_3_pad_type_0 = const()[name = string("current_value_3_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> current_value_3_strides_0 = const()[name = string("current_value_3_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_value_3_pad_0 = const()[name = string("current_value_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_value_3_dilations_0 = const()[name = string("current_value_3_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 current_value_3_groups_0 = const()[name = string("current_value_3_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(44912320)))]; |
|
tensor<fp16, [384]> layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(45207296)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_value_3_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations = current_value_3_dilations_0, groups = current_value_3_groups_0, pad = current_value_3_pad_0, pad_type = current_value_3_pad_type_0, strides = current_value_3_strides_0, weight = layers_1_self_attn_v_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = string("current_value_3_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_348_cast_fp16 = mul(x = current_key_3_cast_fp16, y = var_137_cast_fp16)[name = string("op_348_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> key_3_cast_fp16 = add(x = var_49_cast_fp16_1, y = var_348_cast_fp16)[name = string("key_3_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_350_cast_fp16 = mul(x = current_value_3_cast_fp16, y = var_137_cast_fp16)[name = string("op_350_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> value_3_cast_fp16 = add(x = var_56_cast_fp16_1, y = var_350_cast_fp16)[name = string("value_3_cast_fp16")]; |
|
tensor<int32, [4]> var_353 = const()[name = string("op_353"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_5_cast_fp16 = reshape(shape = var_353, x = query_5_cast_fp16)[name = string("mh_q_5_cast_fp16")]; |
|
fp16 var_355_to_fp16 = const()[name = string("op_355_to_fp16"), val = fp16(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_356_cast_fp16 = mul(x = mh_q_5_cast_fp16, y = var_355_to_fp16)[name = string("op_356_cast_fp16")]; |
|
tensor<int32, [4]> var_357 = const()[name = string("op_357"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_358_cast_fp16 = reshape(shape = var_357, x = key_3_cast_fp16)[name = string("op_358_cast_fp16")]; |
|
bool mh_w_9_transpose_x_0 = const()[name = string("mh_w_9_transpose_x_0"), val = bool(true)]; |
|
bool mh_w_9_transpose_y_0 = const()[name = string("mh_w_9_transpose_y_0"), val = bool(false)]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_9_cast_fp16 = matmul(transpose_x = mh_w_9_transpose_x_0, transpose_y = mh_w_9_transpose_y_0, x = var_356_cast_fp16, y = var_358_cast_fp16)[name = string("mh_w_9_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_11_cast_fp16 = add(x = mh_w_9_cast_fp16, y = var_154_cast_fp16)[name = string("mh_w_11_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> var_366_cast_fp16 = softmax(axis = var_285, x = mh_w_11_cast_fp16)[name = string("op_366_cast_fp16")]; |
|
tensor<int32, [4]> var_367 = const()[name = string("op_367"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_368_cast_fp16 = reshape(shape = var_367, x = value_3_cast_fp16)[name = string("op_368_cast_fp16")]; |
|
bool attn_5_transpose_x_0 = const()[name = string("attn_5_transpose_x_0"), val = bool(false)]; |
|
bool attn_5_transpose_y_0 = const()[name = string("attn_5_transpose_y_0"), val = bool(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_368_cast_fp16, y = var_366_cast_fp16)[name = string("attn_5_cast_fp16")]; |
|
tensor<int32, [4]> var_371 = const()[name = string("op_371"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_11_cast_fp16 = reshape(shape = var_371, x = attn_5_cast_fp16)[name = string("input_11_cast_fp16")]; |
|
string obj_31_pad_type_0 = const()[name = string("obj_31_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> obj_31_strides_0 = const()[name = string("obj_31_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_31_pad_0 = const()[name = string("obj_31_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_31_dilations_0 = const()[name = string("obj_31_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 obj_31_groups_0 = const()[name = string("obj_31_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(45208128)))]; |
|
tensor<fp16, [384]> layers_1_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_1_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(45503104)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_31_cast_fp16 = conv(bias = layers_1_self_attn_o_proj_bias_to_fp16, dilations = obj_31_dilations_0, groups = obj_31_groups_0, pad = obj_31_pad_0, pad_type = obj_31_pad_type_0, strides = obj_31_strides_0, weight = layers_1_self_attn_o_proj_weight_to_fp16, x = input_11_cast_fp16)[name = string("obj_31_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_9_cast_fp16 = add(x = inputs_7_cast_fp16, y = obj_31_cast_fp16)[name = string("inputs_9_cast_fp16")]; |
|
tensor<int32, [1]> out_9_axes_0 = const()[name = string("out_9_axes_0"), val = tensor<int32, [1]>([1])]; |
|
fp16 var_393_to_fp16 = const()[name = string("op_393_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_9_cast_fp16 = layer_norm(axes = out_9_axes_0, epsilon = var_393_to_fp16, x = inputs_9_cast_fp16)[name = string("out_9_cast_fp16")]; |
|
tensor<fp16, [384]> obj_33_gamma_0_to_fp16 = const()[name = string("obj_33_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(45503936)))]; |
|
tensor<fp16, [384]> obj_33_beta_0_to_fp16 = const()[name = string("obj_33_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(45504768)))]; |
|
fp16 obj_33_epsilon_0_to_fp16 = const()[name = string("obj_33_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_33_cast_fp16 = batch_norm(beta = obj_33_beta_0_to_fp16, epsilon = obj_33_epsilon_0_to_fp16, gamma = obj_33_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_9_cast_fp16)[name = string("obj_33_cast_fp16")]; |
|
string query_7_pad_type_0 = const()[name = string("query_7_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> query_7_strides_0 = const()[name = string("query_7_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_7_pad_0 = const()[name = string("query_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_7_dilations_0 = const()[name = string("query_7_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 query_7_groups_0 = const()[name = string("query_7_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_1_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(45505600)))]; |
|
tensor<fp16, [384]> layers_1_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_1_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(45800576)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_7_cast_fp16 = conv(bias = layers_1_encoder_attn_q_proj_bias_to_fp16, dilations = query_7_dilations_0, groups = query_7_groups_0, pad = query_7_pad_0, pad_type = query_7_pad_type_0, strides = query_7_strides_0, weight = layers_1_encoder_attn_q_proj_weight_to_fp16, x = obj_33_cast_fp16)[name = string("query_7_cast_fp16")]; |
|
tensor<int32, [4]> var_413 = const()[name = string("op_413"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_7_cast_fp16 = reshape(shape = var_413, x = query_7_cast_fp16)[name = string("mh_q_7_cast_fp16")]; |
|
fp16 var_415_to_fp16 = const()[name = string("op_415_to_fp16"), val = fp16(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_416_cast_fp16 = mul(x = mh_q_7_cast_fp16, y = var_415_to_fp16)[name = string("op_416_cast_fp16")]; |
|
tensor<int32, [4]> var_417 = const()[name = string("op_417"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1536]> var_418_cast_fp16 = reshape(shape = var_417, x = obj_35_cast_fp16)[name = string("op_418_cast_fp16")]; |
|
bool mh_w_13_transpose_x_0 = const()[name = string("mh_w_13_transpose_x_0"), val = bool(true)]; |
|
bool mh_w_13_transpose_y_0 = const()[name = string("mh_w_13_transpose_y_0"), val = bool(false)]; |
|
tensor<fp16, [1, 6, 1, 1536]> mh_w_13_cast_fp16 = matmul(transpose_x = mh_w_13_transpose_x_0, transpose_y = mh_w_13_transpose_y_0, x = var_416_cast_fp16, y = var_418_cast_fp16)[name = string("mh_w_13_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 1536]> mh_w_15_cast_fp16 = add(x = mh_w_13_cast_fp16, y = var_214_cast_fp16)[name = string("mh_w_15_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 1536]> obj_41_cast_fp16 = softmax(axis = var_285, x = mh_w_15_cast_fp16)[name = string("obj_41_cast_fp16")]; |
|
tensor<int32, [4]> var_427 = const()[name = string("op_427"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1536]> var_428_cast_fp16 = reshape(shape = var_427, x = obj_37_cast_fp16)[name = string("op_428_cast_fp16")]; |
|
bool attn_7_transpose_x_0 = const()[name = string("attn_7_transpose_x_0"), val = bool(false)]; |
|
bool attn_7_transpose_y_0 = const()[name = string("attn_7_transpose_y_0"), val = bool(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_7_cast_fp16 = matmul(transpose_x = attn_7_transpose_x_0, transpose_y = attn_7_transpose_y_0, x = var_428_cast_fp16, y = obj_41_cast_fp16)[name = string("attn_7_cast_fp16")]; |
|
tensor<int32, [4]> var_431 = const()[name = string("op_431"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_13_cast_fp16 = reshape(shape = var_431, x = attn_7_cast_fp16)[name = string("input_13_cast_fp16")]; |
|
string obj_39_pad_type_0 = const()[name = string("obj_39_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> obj_39_strides_0 = const()[name = string("obj_39_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_39_pad_0 = const()[name = string("obj_39_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_39_dilations_0 = const()[name = string("obj_39_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 obj_39_groups_0 = const()[name = string("obj_39_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_1_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_1_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(45801408)))]; |
|
tensor<fp16, [384]> layers_1_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_1_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(46096384)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_39_cast_fp16 = conv(bias = layers_1_encoder_attn_o_proj_bias_to_fp16, dilations = obj_39_dilations_0, groups = obj_39_groups_0, pad = obj_39_pad_0, pad_type = obj_39_pad_type_0, strides = obj_39_strides_0, weight = layers_1_encoder_attn_o_proj_weight_to_fp16, x = input_13_cast_fp16)[name = string("obj_39_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_11_cast_fp16 = add(x = inputs_9_cast_fp16, y = obj_39_cast_fp16)[name = string("inputs_11_cast_fp16")]; |
|
tensor<int32, [1]> out_11_axes_0 = const()[name = string("out_11_axes_0"), val = tensor<int32, [1]>([1])]; |
|
fp16 var_452_to_fp16 = const()[name = string("op_452_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_11_cast_fp16 = layer_norm(axes = out_11_axes_0, epsilon = var_452_to_fp16, x = inputs_11_cast_fp16)[name = string("out_11_cast_fp16")]; |
|
tensor<fp16, [384]> input_15_gamma_0_to_fp16 = const()[name = string("input_15_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(46097216)))]; |
|
tensor<fp16, [384]> input_15_beta_0_to_fp16 = const()[name = string("input_15_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(46098048)))]; |
|
fp16 input_15_epsilon_0_to_fp16 = const()[name = string("input_15_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> input_15_cast_fp16 = batch_norm(beta = input_15_beta_0_to_fp16, epsilon = input_15_epsilon_0_to_fp16, gamma = input_15_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_11_cast_fp16)[name = string("input_15_cast_fp16")]; |
|
string input_17_pad_type_0 = const()[name = string("input_17_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> input_17_strides_0 = const()[name = string("input_17_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> input_17_pad_0 = const()[name = string("input_17_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> input_17_dilations_0 = const()[name = string("input_17_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 input_17_groups_0 = const()[name = string("input_17_groups_0"), val = int32(1)]; |
|
tensor<fp16, [1536, 384, 1, 1]> layers_1_fc1_weight_to_fp16 = const()[name = string("layers_1_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(46098880)))]; |
|
tensor<fp16, [1536]> layers_1_fc1_bias_to_fp16 = const()[name = string("layers_1_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(47278592)))]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_17_cast_fp16 = conv(bias = layers_1_fc1_bias_to_fp16, dilations = input_17_dilations_0, groups = input_17_groups_0, pad = input_17_pad_0, pad_type = input_17_pad_type_0, strides = input_17_strides_0, weight = layers_1_fc1_weight_to_fp16, x = input_15_cast_fp16)[name = string("input_17_cast_fp16")]; |
|
string input_19_mode_0 = const()[name = string("input_19_mode_0"), val = string("EXACT")]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_19_cast_fp16 = gelu(mode = input_19_mode_0, x = input_17_cast_fp16)[name = string("input_19_cast_fp16")]; |
|
string hidden_states_5_pad_type_0 = const()[name = string("hidden_states_5_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> hidden_states_5_strides_0 = const()[name = string("hidden_states_5_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> hidden_states_5_pad_0 = const()[name = string("hidden_states_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> hidden_states_5_dilations_0 = const()[name = string("hidden_states_5_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 hidden_states_5_groups_0 = const()[name = string("hidden_states_5_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 1536, 1, 1]> layers_1_fc2_weight_to_fp16 = const()[name = string("layers_1_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(47281728)))]; |
|
tensor<fp16, [384]> layers_1_fc2_bias_to_fp16 = const()[name = string("layers_1_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(48461440)))]; |
|
tensor<fp16, [1, 384, 1, 1]> hidden_states_5_cast_fp16 = conv(bias = layers_1_fc2_bias_to_fp16, dilations = hidden_states_5_dilations_0, groups = hidden_states_5_groups_0, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = hidden_states_5_strides_0, weight = layers_1_fc2_weight_to_fp16, x = input_19_cast_fp16)[name = string("hidden_states_5_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_13_cast_fp16 = add(x = inputs_11_cast_fp16, y = hidden_states_5_cast_fp16)[name = string("inputs_13_cast_fp16")]; |
|
tensor<int32, [4]> obj_53_begin_0 = const()[name = string("obj_53_begin_0"), val = tensor<int32, [4]>([2, 0, 0, 0])]; |
|
tensor<int32, [4]> obj_53_end_0 = const()[name = string("obj_53_end_0"), val = tensor<int32, [4]>([3, 384, 1, 1536])]; |
|
tensor<bool, [4]> obj_53_end_mask_0 = const()[name = string("obj_53_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])]; |
|
tensor<fp16, [1, 384, 1, 1536]> obj_53_cast_fp16 = slice_by_index(begin = obj_53_begin_0, end = obj_53_end_0, end_mask = obj_53_end_mask_0, x = read_state_2)[name = string("obj_53_cast_fp16")]; |
|
tensor<int32, [4]> obj_55_begin_0 = const()[name = string("obj_55_begin_0"), val = tensor<int32, [4]>([2, 0, 0, 0])]; |
|
tensor<int32, [4]> obj_55_end_0 = const()[name = string("obj_55_end_0"), val = tensor<int32, [4]>([3, 384, 1, 1536])]; |
|
tensor<bool, [4]> obj_55_end_mask_0 = const()[name = string("obj_55_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])]; |
|
tensor<fp16, [1, 384, 1, 1536]> obj_55_cast_fp16 = slice_by_index(begin = obj_55_begin_0, end = obj_55_end_0, end_mask = obj_55_end_mask_0, x = read_state_3)[name = string("obj_55_cast_fp16")]; |
|
int32 var_498 = const()[name = string("op_498"), val = int32(3)]; |
|
tensor<int32, [1]> out_13_axes_0 = const()[name = string("out_13_axes_0"), val = tensor<int32, [1]>([1])]; |
|
fp16 var_523_to_fp16 = const()[name = string("op_523_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_13_cast_fp16 = layer_norm(axes = out_13_axes_0, epsilon = var_523_to_fp16, x = inputs_13_cast_fp16)[name = string("out_13_cast_fp16")]; |
|
tensor<fp16, [384]> obj_43_gamma_0_to_fp16 = const()[name = string("obj_43_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(48462272)))]; |
|
tensor<fp16, [384]> obj_43_beta_0_to_fp16 = const()[name = string("obj_43_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(48463104)))]; |
|
fp16 obj_43_epsilon_0_to_fp16 = const()[name = string("obj_43_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_43_cast_fp16 = batch_norm(beta = obj_43_beta_0_to_fp16, epsilon = obj_43_epsilon_0_to_fp16, gamma = obj_43_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_13_cast_fp16)[name = string("obj_43_cast_fp16")]; |
|
string query_9_pad_type_0 = const()[name = string("query_9_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> query_9_strides_0 = const()[name = string("query_9_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_9_pad_0 = const()[name = string("query_9_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_9_dilations_0 = const()[name = string("query_9_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 query_9_groups_0 = const()[name = string("query_9_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_2_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(48463936)))]; |
|
tensor<fp16, [384]> layers_2_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_2_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(48758912)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_9_cast_fp16 = conv(bias = layers_2_self_attn_q_proj_bias_to_fp16, dilations = query_9_dilations_0, groups = query_9_groups_0, pad = query_9_pad_0, pad_type = query_9_pad_type_0, strides = query_9_strides_0, weight = layers_2_self_attn_q_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = string("query_9_cast_fp16")]; |
|
string current_key_5_pad_type_0 = const()[name = string("current_key_5_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> current_key_5_strides_0 = const()[name = string("current_key_5_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_key_5_pad_0 = const()[name = string("current_key_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_key_5_dilations_0 = const()[name = string("current_key_5_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 current_key_5_groups_0 = const()[name = string("current_key_5_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_2_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(48759744)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_key_5_cast_fp16 = conv(dilations = current_key_5_dilations_0, groups = current_key_5_groups_0, pad = current_key_5_pad_0, pad_type = current_key_5_pad_type_0, strides = current_key_5_strides_0, weight = layers_2_self_attn_k_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = string("current_key_5_cast_fp16")]; |
|
string current_value_5_pad_type_0 = const()[name = string("current_value_5_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> current_value_5_strides_0 = const()[name = string("current_value_5_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_value_5_pad_0 = const()[name = string("current_value_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_value_5_dilations_0 = const()[name = string("current_value_5_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 current_value_5_groups_0 = const()[name = string("current_value_5_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_2_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(49054720)))]; |
|
tensor<fp16, [384]> layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(49349696)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_value_5_cast_fp16 = conv(bias = layers_2_self_attn_v_proj_bias_to_fp16, dilations = current_value_5_dilations_0, groups = current_value_5_groups_0, pad = current_value_5_pad_0, pad_type = current_value_5_pad_type_0, strides = current_value_5_strides_0, weight = layers_2_self_attn_v_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = string("current_value_5_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_561_cast_fp16 = mul(x = current_key_5_cast_fp16, y = var_137_cast_fp16)[name = string("op_561_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> key_5_cast_fp16 = add(x = var_49_cast_fp16_2, y = var_561_cast_fp16)[name = string("key_5_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_563_cast_fp16 = mul(x = current_value_5_cast_fp16, y = var_137_cast_fp16)[name = string("op_563_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> value_5_cast_fp16 = add(x = var_56_cast_fp16_2, y = var_563_cast_fp16)[name = string("value_5_cast_fp16")]; |
|
tensor<int32, [4]> var_566 = const()[name = string("op_566"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_9_cast_fp16 = reshape(shape = var_566, x = query_9_cast_fp16)[name = string("mh_q_9_cast_fp16")]; |
|
fp16 var_568_to_fp16 = const()[name = string("op_568_to_fp16"), val = fp16(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_569_cast_fp16 = mul(x = mh_q_9_cast_fp16, y = var_568_to_fp16)[name = string("op_569_cast_fp16")]; |
|
tensor<int32, [4]> var_570 = const()[name = string("op_570"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_571_cast_fp16 = reshape(shape = var_570, x = key_5_cast_fp16)[name = string("op_571_cast_fp16")]; |
|
bool mh_w_17_transpose_x_0 = const()[name = string("mh_w_17_transpose_x_0"), val = bool(true)]; |
|
bool mh_w_17_transpose_y_0 = const()[name = string("mh_w_17_transpose_y_0"), val = bool(false)]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_17_cast_fp16 = matmul(transpose_x = mh_w_17_transpose_x_0, transpose_y = mh_w_17_transpose_y_0, x = var_569_cast_fp16, y = var_571_cast_fp16)[name = string("mh_w_17_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_19_cast_fp16 = add(x = mh_w_17_cast_fp16, y = var_154_cast_fp16)[name = string("mh_w_19_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> var_579_cast_fp16 = softmax(axis = var_498, x = mh_w_19_cast_fp16)[name = string("op_579_cast_fp16")]; |
|
tensor<int32, [4]> var_580 = const()[name = string("op_580"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_581_cast_fp16 = reshape(shape = var_580, x = value_5_cast_fp16)[name = string("op_581_cast_fp16")]; |
|
bool attn_9_transpose_x_0 = const()[name = string("attn_9_transpose_x_0"), val = bool(false)]; |
|
bool attn_9_transpose_y_0 = const()[name = string("attn_9_transpose_y_0"), val = bool(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_9_cast_fp16 = matmul(transpose_x = attn_9_transpose_x_0, transpose_y = attn_9_transpose_y_0, x = var_581_cast_fp16, y = var_579_cast_fp16)[name = string("attn_9_cast_fp16")]; |
|
tensor<int32, [4]> var_584 = const()[name = string("op_584"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_21_cast_fp16 = reshape(shape = var_584, x = attn_9_cast_fp16)[name = string("input_21_cast_fp16")]; |
|
string obj_49_pad_type_0 = const()[name = string("obj_49_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> obj_49_strides_0 = const()[name = string("obj_49_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_49_pad_0 = const()[name = string("obj_49_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_49_dilations_0 = const()[name = string("obj_49_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 obj_49_groups_0 = const()[name = string("obj_49_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_2_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(49350528)))]; |
|
tensor<fp16, [384]> layers_2_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_2_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(49645504)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_49_cast_fp16 = conv(bias = layers_2_self_attn_o_proj_bias_to_fp16, dilations = obj_49_dilations_0, groups = obj_49_groups_0, pad = obj_49_pad_0, pad_type = obj_49_pad_type_0, strides = obj_49_strides_0, weight = layers_2_self_attn_o_proj_weight_to_fp16, x = input_21_cast_fp16)[name = string("obj_49_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_15_cast_fp16 = add(x = inputs_13_cast_fp16, y = obj_49_cast_fp16)[name = string("inputs_15_cast_fp16")]; |
|
tensor<int32, [1]> out_15_axes_0 = const()[name = string("out_15_axes_0"), val = tensor<int32, [1]>([1])]; |
|
fp16 var_606_to_fp16 = const()[name = string("op_606_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_15_cast_fp16 = layer_norm(axes = out_15_axes_0, epsilon = var_606_to_fp16, x = inputs_15_cast_fp16)[name = string("out_15_cast_fp16")]; |
|
tensor<fp16, [384]> obj_51_gamma_0_to_fp16 = const()[name = string("obj_51_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(49646336)))]; |
|
tensor<fp16, [384]> obj_51_beta_0_to_fp16 = const()[name = string("obj_51_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(49647168)))]; |
|
fp16 obj_51_epsilon_0_to_fp16 = const()[name = string("obj_51_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_51_cast_fp16 = batch_norm(beta = obj_51_beta_0_to_fp16, epsilon = obj_51_epsilon_0_to_fp16, gamma = obj_51_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_15_cast_fp16)[name = string("obj_51_cast_fp16")]; |
|
string query_11_pad_type_0 = const()[name = string("query_11_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> query_11_strides_0 = const()[name = string("query_11_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_11_pad_0 = const()[name = string("query_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_11_dilations_0 = const()[name = string("query_11_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 query_11_groups_0 = const()[name = string("query_11_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_2_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(49648000)))]; |
|
tensor<fp16, [384]> layers_2_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_2_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(49942976)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_11_cast_fp16 = conv(bias = layers_2_encoder_attn_q_proj_bias_to_fp16, dilations = query_11_dilations_0, groups = query_11_groups_0, pad = query_11_pad_0, pad_type = query_11_pad_type_0, strides = query_11_strides_0, weight = layers_2_encoder_attn_q_proj_weight_to_fp16, x = obj_51_cast_fp16)[name = string("query_11_cast_fp16")]; |
|
tensor<int32, [4]> var_626 = const()[name = string("op_626"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_11_cast_fp16 = reshape(shape = var_626, x = query_11_cast_fp16)[name = string("mh_q_11_cast_fp16")]; |
|
fp16 var_628_to_fp16 = const()[name = string("op_628_to_fp16"), val = fp16(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_629_cast_fp16 = mul(x = mh_q_11_cast_fp16, y = var_628_to_fp16)[name = string("op_629_cast_fp16")]; |
|
tensor<int32, [4]> var_630 = const()[name = string("op_630"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1536]> var_631_cast_fp16 = reshape(shape = var_630, x = obj_53_cast_fp16)[name = string("op_631_cast_fp16")]; |
|
bool mh_w_21_transpose_x_0 = const()[name = string("mh_w_21_transpose_x_0"), val = bool(true)]; |
|
bool mh_w_21_transpose_y_0 = const()[name = string("mh_w_21_transpose_y_0"), val = bool(false)]; |
|
tensor<fp16, [1, 6, 1, 1536]> mh_w_21_cast_fp16 = matmul(transpose_x = mh_w_21_transpose_x_0, transpose_y = mh_w_21_transpose_y_0, x = var_629_cast_fp16, y = var_631_cast_fp16)[name = string("mh_w_21_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 1536]> mh_w_23_cast_fp16 = add(x = mh_w_21_cast_fp16, y = var_214_cast_fp16)[name = string("mh_w_23_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 1536]> obj_59_cast_fp16 = softmax(axis = var_498, x = mh_w_23_cast_fp16)[name = string("obj_59_cast_fp16")]; |
|
tensor<int32, [4]> var_640 = const()[name = string("op_640"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1536]> var_641_cast_fp16 = reshape(shape = var_640, x = obj_55_cast_fp16)[name = string("op_641_cast_fp16")]; |
|
bool attn_11_transpose_x_0 = const()[name = string("attn_11_transpose_x_0"), val = bool(false)]; |
|
bool attn_11_transpose_y_0 = const()[name = string("attn_11_transpose_y_0"), val = bool(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_11_cast_fp16 = matmul(transpose_x = attn_11_transpose_x_0, transpose_y = attn_11_transpose_y_0, x = var_641_cast_fp16, y = obj_59_cast_fp16)[name = string("attn_11_cast_fp16")]; |
|
tensor<int32, [4]> var_644 = const()[name = string("op_644"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_23_cast_fp16 = reshape(shape = var_644, x = attn_11_cast_fp16)[name = string("input_23_cast_fp16")]; |
|
string obj_57_pad_type_0 = const()[name = string("obj_57_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> obj_57_strides_0 = const()[name = string("obj_57_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_57_pad_0 = const()[name = string("obj_57_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_57_dilations_0 = const()[name = string("obj_57_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 obj_57_groups_0 = const()[name = string("obj_57_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_2_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_2_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(49943808)))]; |
|
tensor<fp16, [384]> layers_2_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_2_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(50238784)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_57_cast_fp16 = conv(bias = layers_2_encoder_attn_o_proj_bias_to_fp16, dilations = obj_57_dilations_0, groups = obj_57_groups_0, pad = obj_57_pad_0, pad_type = obj_57_pad_type_0, strides = obj_57_strides_0, weight = layers_2_encoder_attn_o_proj_weight_to_fp16, x = input_23_cast_fp16)[name = string("obj_57_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_17_cast_fp16 = add(x = inputs_15_cast_fp16, y = obj_57_cast_fp16)[name = string("inputs_17_cast_fp16")]; |
|
tensor<int32, [1]> out_17_axes_0 = const()[name = string("out_17_axes_0"), val = tensor<int32, [1]>([1])]; |
|
fp16 var_665_to_fp16 = const()[name = string("op_665_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_17_cast_fp16 = layer_norm(axes = out_17_axes_0, epsilon = var_665_to_fp16, x = inputs_17_cast_fp16)[name = string("out_17_cast_fp16")]; |
|
tensor<fp16, [384]> input_25_gamma_0_to_fp16 = const()[name = string("input_25_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(50239616)))]; |
|
tensor<fp16, [384]> input_25_beta_0_to_fp16 = const()[name = string("input_25_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(50240448)))]; |
|
fp16 input_25_epsilon_0_to_fp16 = const()[name = string("input_25_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> input_25_cast_fp16 = batch_norm(beta = input_25_beta_0_to_fp16, epsilon = input_25_epsilon_0_to_fp16, gamma = input_25_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_17_cast_fp16)[name = string("input_25_cast_fp16")]; |
|
string input_27_pad_type_0 = const()[name = string("input_27_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> input_27_strides_0 = const()[name = string("input_27_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> input_27_pad_0 = const()[name = string("input_27_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> input_27_dilations_0 = const()[name = string("input_27_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 input_27_groups_0 = const()[name = string("input_27_groups_0"), val = int32(1)]; |
|
tensor<fp16, [1536, 384, 1, 1]> layers_2_fc1_weight_to_fp16 = const()[name = string("layers_2_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(50241280)))]; |
|
tensor<fp16, [1536]> layers_2_fc1_bias_to_fp16 = const()[name = string("layers_2_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(51420992)))]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_27_cast_fp16 = conv(bias = layers_2_fc1_bias_to_fp16, dilations = input_27_dilations_0, groups = input_27_groups_0, pad = input_27_pad_0, pad_type = input_27_pad_type_0, strides = input_27_strides_0, weight = layers_2_fc1_weight_to_fp16, x = input_25_cast_fp16)[name = string("input_27_cast_fp16")]; |
|
string input_29_mode_0 = const()[name = string("input_29_mode_0"), val = string("EXACT")]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_29_cast_fp16 = gelu(mode = input_29_mode_0, x = input_27_cast_fp16)[name = string("input_29_cast_fp16")]; |
|
string hidden_states_7_pad_type_0 = const()[name = string("hidden_states_7_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> hidden_states_7_strides_0 = const()[name = string("hidden_states_7_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> hidden_states_7_pad_0 = const()[name = string("hidden_states_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> hidden_states_7_dilations_0 = const()[name = string("hidden_states_7_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 hidden_states_7_groups_0 = const()[name = string("hidden_states_7_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 1536, 1, 1]> layers_2_fc2_weight_to_fp16 = const()[name = string("layers_2_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(51424128)))]; |
|
tensor<fp16, [384]> layers_2_fc2_bias_to_fp16 = const()[name = string("layers_2_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(52603840)))]; |
|
tensor<fp16, [1, 384, 1, 1]> hidden_states_7_cast_fp16 = conv(bias = layers_2_fc2_bias_to_fp16, dilations = hidden_states_7_dilations_0, groups = hidden_states_7_groups_0, pad = hidden_states_7_pad_0, pad_type = hidden_states_7_pad_type_0, strides = hidden_states_7_strides_0, weight = layers_2_fc2_weight_to_fp16, x = input_29_cast_fp16)[name = string("hidden_states_7_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_19_cast_fp16 = add(x = inputs_17_cast_fp16, y = hidden_states_7_cast_fp16)[name = string("inputs_19_cast_fp16")]; |
|
tensor<int32, [4]> obj_71_begin_0 = const()[name = string("obj_71_begin_0"), val = tensor<int32, [4]>([3, 0, 0, 0])]; |
|
tensor<int32, [4]> obj_71_end_0 = const()[name = string("obj_71_end_0"), val = tensor<int32, [4]>([4, 384, 1, 1536])]; |
|
tensor<bool, [4]> obj_71_end_mask_0 = const()[name = string("obj_71_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])]; |
|
tensor<fp16, [1, 384, 1, 1536]> obj_71_cast_fp16 = slice_by_index(begin = obj_71_begin_0, end = obj_71_end_0, end_mask = obj_71_end_mask_0, x = read_state_2)[name = string("obj_71_cast_fp16")]; |
|
tensor<int32, [4]> obj_73_begin_0 = const()[name = string("obj_73_begin_0"), val = tensor<int32, [4]>([3, 0, 0, 0])]; |
|
tensor<int32, [4]> obj_73_end_0 = const()[name = string("obj_73_end_0"), val = tensor<int32, [4]>([4, 384, 1, 1536])]; |
|
tensor<bool, [4]> obj_73_end_mask_0 = const()[name = string("obj_73_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])]; |
|
tensor<fp16, [1, 384, 1, 1536]> obj_73_cast_fp16 = slice_by_index(begin = obj_73_begin_0, end = obj_73_end_0, end_mask = obj_73_end_mask_0, x = read_state_3)[name = string("obj_73_cast_fp16")]; |
|
int32 var_711 = const()[name = string("op_711"), val = int32(3)]; |
|
tensor<int32, [1]> out_19_axes_0 = const()[name = string("out_19_axes_0"), val = tensor<int32, [1]>([1])]; |
|
fp16 var_736_to_fp16 = const()[name = string("op_736_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_19_cast_fp16 = layer_norm(axes = out_19_axes_0, epsilon = var_736_to_fp16, x = inputs_19_cast_fp16)[name = string("out_19_cast_fp16")]; |
|
tensor<fp16, [384]> obj_61_gamma_0_to_fp16 = const()[name = string("obj_61_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(52604672)))]; |
|
tensor<fp16, [384]> obj_61_beta_0_to_fp16 = const()[name = string("obj_61_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(52605504)))]; |
|
fp16 obj_61_epsilon_0_to_fp16 = const()[name = string("obj_61_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_61_cast_fp16 = batch_norm(beta = obj_61_beta_0_to_fp16, epsilon = obj_61_epsilon_0_to_fp16, gamma = obj_61_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_19_cast_fp16)[name = string("obj_61_cast_fp16")]; |
|
string query_13_pad_type_0 = const()[name = string("query_13_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> query_13_strides_0 = const()[name = string("query_13_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_13_pad_0 = const()[name = string("query_13_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_13_dilations_0 = const()[name = string("query_13_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 query_13_groups_0 = const()[name = string("query_13_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_3_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(52606336)))]; |
|
tensor<fp16, [384]> layers_3_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_3_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(52901312)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_13_cast_fp16 = conv(bias = layers_3_self_attn_q_proj_bias_to_fp16, dilations = query_13_dilations_0, groups = query_13_groups_0, pad = query_13_pad_0, pad_type = query_13_pad_type_0, strides = query_13_strides_0, weight = layers_3_self_attn_q_proj_weight_to_fp16, x = obj_61_cast_fp16)[name = string("query_13_cast_fp16")]; |
|
string current_key_pad_type_0 = const()[name = string("current_key_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> current_key_strides_0 = const()[name = string("current_key_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_key_pad_0 = const()[name = string("current_key_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_key_dilations_0 = const()[name = string("current_key_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 current_key_groups_0 = const()[name = string("current_key_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_3_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(52902144)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_key_cast_fp16 = conv(dilations = current_key_dilations_0, groups = current_key_groups_0, pad = current_key_pad_0, pad_type = current_key_pad_type_0, strides = current_key_strides_0, weight = layers_3_self_attn_k_proj_weight_to_fp16, x = obj_61_cast_fp16)[name = string("current_key_cast_fp16")]; |
|
string current_value_pad_type_0 = const()[name = string("current_value_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> current_value_strides_0 = const()[name = string("current_value_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> current_value_pad_0 = const()[name = string("current_value_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> current_value_dilations_0 = const()[name = string("current_value_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 current_value_groups_0 = const()[name = string("current_value_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_3_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53197120)))]; |
|
tensor<fp16, [384]> layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53492096)))]; |
|
tensor<fp16, [1, 384, 1, 1]> current_value_cast_fp16 = conv(bias = layers_3_self_attn_v_proj_bias_to_fp16, dilations = current_value_dilations_0, groups = current_value_groups_0, pad = current_value_pad_0, pad_type = current_value_pad_type_0, strides = current_value_strides_0, weight = layers_3_self_attn_v_proj_weight_to_fp16, x = obj_61_cast_fp16)[name = string("current_value_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_774_cast_fp16 = mul(x = current_key_cast_fp16, y = var_137_cast_fp16)[name = string("op_774_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> key_cast_fp16 = add(x = var_49_cast_fp16_3, y = var_774_cast_fp16)[name = string("key_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> var_776_cast_fp16 = mul(x = current_value_cast_fp16, y = var_137_cast_fp16)[name = string("op_776_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 448]> value_cast_fp16 = add(x = var_56_cast_fp16_3, y = var_776_cast_fp16)[name = string("value_cast_fp16")]; |
|
tensor<int32, [4]> var_779 = const()[name = string("op_779"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_13_cast_fp16 = reshape(shape = var_779, x = query_13_cast_fp16)[name = string("mh_q_13_cast_fp16")]; |
|
fp16 var_781_to_fp16 = const()[name = string("op_781_to_fp16"), val = fp16(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_782_cast_fp16 = mul(x = mh_q_13_cast_fp16, y = var_781_to_fp16)[name = string("op_782_cast_fp16")]; |
|
tensor<int32, [4]> var_783 = const()[name = string("op_783"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_784_cast_fp16 = reshape(shape = var_783, x = key_cast_fp16)[name = string("op_784_cast_fp16")]; |
|
bool mh_w_25_transpose_x_0 = const()[name = string("mh_w_25_transpose_x_0"), val = bool(true)]; |
|
bool mh_w_25_transpose_y_0 = const()[name = string("mh_w_25_transpose_y_0"), val = bool(false)]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_25_cast_fp16 = matmul(transpose_x = mh_w_25_transpose_x_0, transpose_y = mh_w_25_transpose_y_0, x = var_782_cast_fp16, y = var_784_cast_fp16)[name = string("mh_w_25_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> mh_w_27_cast_fp16 = add(x = mh_w_25_cast_fp16, y = var_154_cast_fp16)[name = string("mh_w_27_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 448]> var_792_cast_fp16 = softmax(axis = var_711, x = mh_w_27_cast_fp16)[name = string("op_792_cast_fp16")]; |
|
tensor<int32, [4]> var_793 = const()[name = string("op_793"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 448]> var_794_cast_fp16 = reshape(shape = var_793, x = value_cast_fp16)[name = string("op_794_cast_fp16")]; |
|
bool attn_13_transpose_x_0 = const()[name = string("attn_13_transpose_x_0"), val = bool(false)]; |
|
bool attn_13_transpose_y_0 = const()[name = string("attn_13_transpose_y_0"), val = bool(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_13_cast_fp16 = matmul(transpose_x = attn_13_transpose_x_0, transpose_y = attn_13_transpose_y_0, x = var_794_cast_fp16, y = var_792_cast_fp16)[name = string("attn_13_cast_fp16")]; |
|
tensor<int32, [4]> var_797 = const()[name = string("op_797"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_31_cast_fp16 = reshape(shape = var_797, x = attn_13_cast_fp16)[name = string("input_31_cast_fp16")]; |
|
string obj_67_pad_type_0 = const()[name = string("obj_67_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> obj_67_strides_0 = const()[name = string("obj_67_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_67_pad_0 = const()[name = string("obj_67_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_67_dilations_0 = const()[name = string("obj_67_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 obj_67_groups_0 = const()[name = string("obj_67_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_3_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53492928)))]; |
|
tensor<fp16, [384]> layers_3_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_3_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53787904)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_67_cast_fp16 = conv(bias = layers_3_self_attn_o_proj_bias_to_fp16, dilations = obj_67_dilations_0, groups = obj_67_groups_0, pad = obj_67_pad_0, pad_type = obj_67_pad_type_0, strides = obj_67_strides_0, weight = layers_3_self_attn_o_proj_weight_to_fp16, x = input_31_cast_fp16)[name = string("obj_67_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_21_cast_fp16 = add(x = inputs_19_cast_fp16, y = obj_67_cast_fp16)[name = string("inputs_21_cast_fp16")]; |
|
tensor<int32, [1]> out_21_axes_0 = const()[name = string("out_21_axes_0"), val = tensor<int32, [1]>([1])]; |
|
fp16 var_819_to_fp16 = const()[name = string("op_819_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_21_cast_fp16 = layer_norm(axes = out_21_axes_0, epsilon = var_819_to_fp16, x = inputs_21_cast_fp16)[name = string("out_21_cast_fp16")]; |
|
tensor<fp16, [384]> obj_69_gamma_0_to_fp16 = const()[name = string("obj_69_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53788736)))]; |
|
tensor<fp16, [384]> obj_69_beta_0_to_fp16 = const()[name = string("obj_69_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53789568)))]; |
|
fp16 obj_69_epsilon_0_to_fp16 = const()[name = string("obj_69_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_69_cast_fp16 = batch_norm(beta = obj_69_beta_0_to_fp16, epsilon = obj_69_epsilon_0_to_fp16, gamma = obj_69_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_21_cast_fp16)[name = string("obj_69_cast_fp16")]; |
|
string query_pad_type_0 = const()[name = string("query_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> query_strides_0 = const()[name = string("query_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> query_pad_0 = const()[name = string("query_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> query_dilations_0 = const()[name = string("query_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 query_groups_0 = const()[name = string("query_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_3_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53790400)))]; |
|
tensor<fp16, [384]> layers_3_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_3_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(54085376)))]; |
|
tensor<fp16, [1, 384, 1, 1]> query_cast_fp16 = conv(bias = layers_3_encoder_attn_q_proj_bias_to_fp16, dilations = query_dilations_0, groups = query_groups_0, pad = query_pad_0, pad_type = query_pad_type_0, strides = query_strides_0, weight = layers_3_encoder_attn_q_proj_weight_to_fp16, x = obj_69_cast_fp16)[name = string("query_cast_fp16")]; |
|
tensor<int32, [4]> var_839 = const()[name = string("op_839"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1]> mh_q_cast_fp16 = reshape(shape = var_839, x = query_cast_fp16)[name = string("mh_q_cast_fp16")]; |
|
fp16 var_841_to_fp16 = const()[name = string("op_841_to_fp16"), val = fp16(0x1p-3)]; |
|
tensor<fp16, [1, 6, 64, 1]> var_842_cast_fp16 = mul(x = mh_q_cast_fp16, y = var_841_to_fp16)[name = string("op_842_cast_fp16")]; |
|
tensor<int32, [4]> var_843 = const()[name = string("op_843"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1536]> var_844_cast_fp16 = reshape(shape = var_843, x = obj_71_cast_fp16)[name = string("op_844_cast_fp16")]; |
|
bool mh_w_29_transpose_x_0 = const()[name = string("mh_w_29_transpose_x_0"), val = bool(true)]; |
|
bool mh_w_29_transpose_y_0 = const()[name = string("mh_w_29_transpose_y_0"), val = bool(false)]; |
|
tensor<fp16, [1, 6, 1, 1536]> mh_w_29_cast_fp16 = matmul(transpose_x = mh_w_29_transpose_x_0, transpose_y = mh_w_29_transpose_y_0, x = var_842_cast_fp16, y = var_844_cast_fp16)[name = string("mh_w_29_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 1536]> mh_w_cast_fp16 = add(x = mh_w_29_cast_fp16, y = var_214_cast_fp16)[name = string("mh_w_cast_fp16")]; |
|
tensor<fp16, [1, 6, 1, 1536]> obj_77_cast_fp16 = softmax(axis = var_711, x = mh_w_cast_fp16)[name = string("obj_77_cast_fp16")]; |
|
tensor<int32, [4]> var_853 = const()[name = string("op_853"), val = tensor<int32, [4]>([1, 6, 64, -1])]; |
|
tensor<fp16, [1, 6, 64, 1536]> var_854_cast_fp16 = reshape(shape = var_853, x = obj_73_cast_fp16)[name = string("op_854_cast_fp16")]; |
|
bool attn_transpose_x_0 = const()[name = string("attn_transpose_x_0"), val = bool(false)]; |
|
bool attn_transpose_y_0 = const()[name = string("attn_transpose_y_0"), val = bool(true)]; |
|
tensor<fp16, [1, 6, 64, 1]> attn_cast_fp16 = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_854_cast_fp16, y = obj_77_cast_fp16)[name = string("attn_cast_fp16")]; |
|
tensor<int32, [4]> var_857 = const()[name = string("op_857"), val = tensor<int32, [4]>([1, 384, 1, -1])]; |
|
tensor<fp16, [1, 384, 1, 1]> input_33_cast_fp16 = reshape(shape = var_857, x = attn_cast_fp16)[name = string("input_33_cast_fp16")]; |
|
string obj_75_pad_type_0 = const()[name = string("obj_75_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> obj_75_strides_0 = const()[name = string("obj_75_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> obj_75_pad_0 = const()[name = string("obj_75_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> obj_75_dilations_0 = const()[name = string("obj_75_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 obj_75_groups_0 = const()[name = string("obj_75_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 384, 1, 1]> layers_3_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_3_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(54086208)))]; |
|
tensor<fp16, [384]> layers_3_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_3_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(54381184)))]; |
|
tensor<fp16, [1, 384, 1, 1]> obj_75_cast_fp16 = conv(bias = layers_3_encoder_attn_o_proj_bias_to_fp16, dilations = obj_75_dilations_0, groups = obj_75_groups_0, pad = obj_75_pad_0, pad_type = obj_75_pad_type_0, strides = obj_75_strides_0, weight = layers_3_encoder_attn_o_proj_weight_to_fp16, x = input_33_cast_fp16)[name = string("obj_75_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_23_cast_fp16 = add(x = inputs_21_cast_fp16, y = obj_75_cast_fp16)[name = string("inputs_23_cast_fp16")]; |
|
tensor<int32, [1]> out_23_axes_0 = const()[name = string("out_23_axes_0"), val = tensor<int32, [1]>([1])]; |
|
fp16 var_878_to_fp16 = const()[name = string("op_878_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_23_cast_fp16 = layer_norm(axes = out_23_axes_0, epsilon = var_878_to_fp16, x = inputs_23_cast_fp16)[name = string("out_23_cast_fp16")]; |
|
tensor<fp16, [384]> input_35_gamma_0_to_fp16 = const()[name = string("input_35_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(54382016)))]; |
|
tensor<fp16, [384]> input_35_beta_0_to_fp16 = const()[name = string("input_35_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(54382848)))]; |
|
fp16 input_35_epsilon_0_to_fp16 = const()[name = string("input_35_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> input_35_cast_fp16 = batch_norm(beta = input_35_beta_0_to_fp16, epsilon = input_35_epsilon_0_to_fp16, gamma = input_35_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_23_cast_fp16)[name = string("input_35_cast_fp16")]; |
|
string input_37_pad_type_0 = const()[name = string("input_37_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> input_37_strides_0 = const()[name = string("input_37_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> input_37_pad_0 = const()[name = string("input_37_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> input_37_dilations_0 = const()[name = string("input_37_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 input_37_groups_0 = const()[name = string("input_37_groups_0"), val = int32(1)]; |
|
tensor<fp16, [1536, 384, 1, 1]> layers_3_fc1_weight_to_fp16 = const()[name = string("layers_3_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(54383680)))]; |
|
tensor<fp16, [1536]> layers_3_fc1_bias_to_fp16 = const()[name = string("layers_3_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(55563392)))]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_37_cast_fp16 = conv(bias = layers_3_fc1_bias_to_fp16, dilations = input_37_dilations_0, groups = input_37_groups_0, pad = input_37_pad_0, pad_type = input_37_pad_type_0, strides = input_37_strides_0, weight = layers_3_fc1_weight_to_fp16, x = input_35_cast_fp16)[name = string("input_37_cast_fp16")]; |
|
string input_mode_0 = const()[name = string("input_mode_0"), val = string("EXACT")]; |
|
tensor<fp16, [1, 1536, 1, 1]> input_cast_fp16 = gelu(mode = input_mode_0, x = input_37_cast_fp16)[name = string("input_cast_fp16")]; |
|
string hidden_states_9_pad_type_0 = const()[name = string("hidden_states_9_pad_type_0"), val = string("valid")]; |
|
tensor<int32, [2]> hidden_states_9_strides_0 = const()[name = string("hidden_states_9_strides_0"), val = tensor<int32, [2]>([1, 1])]; |
|
tensor<int32, [4]> hidden_states_9_pad_0 = const()[name = string("hidden_states_9_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [2]> hidden_states_9_dilations_0 = const()[name = string("hidden_states_9_dilations_0"), val = tensor<int32, [2]>([1, 1])]; |
|
int32 hidden_states_9_groups_0 = const()[name = string("hidden_states_9_groups_0"), val = int32(1)]; |
|
tensor<fp16, [384, 1536, 1, 1]> layers_3_fc2_weight_to_fp16 = const()[name = string("layers_3_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(55566528)))]; |
|
tensor<fp16, [384]> layers_3_fc2_bias_to_fp16 = const()[name = string("layers_3_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(56746240)))]; |
|
tensor<fp16, [1, 384, 1, 1]> hidden_states_9_cast_fp16 = conv(bias = layers_3_fc2_bias_to_fp16, dilations = hidden_states_9_dilations_0, groups = hidden_states_9_groups_0, pad = hidden_states_9_pad_0, pad_type = hidden_states_9_pad_type_0, strides = hidden_states_9_strides_0, weight = layers_3_fc2_weight_to_fp16, x = input_cast_fp16)[name = string("hidden_states_9_cast_fp16")]; |
|
tensor<fp16, [1, 384, 1, 1]> inputs_cast_fp16 = add(x = inputs_23_cast_fp16, y = hidden_states_9_cast_fp16)[name = string("inputs_cast_fp16")]; |
|
tensor<int32, [1]> out_axes_0 = const()[name = string("out_axes_0"), val = tensor<int32, [1]>([1])]; |
|
fp16 var_921_to_fp16 = const()[name = string("op_921_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> out_cast_fp16 = layer_norm(axes = out_axes_0, epsilon = var_921_to_fp16, x = inputs_cast_fp16)[name = string("out_cast_fp16")]; |
|
tensor<fp16, [384]> hidden_states_gamma_0_to_fp16 = const()[name = string("hidden_states_gamma_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(56747072)))]; |
|
tensor<fp16, [384]> hidden_states_beta_0_to_fp16 = const()[name = string("hidden_states_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(56747904)))]; |
|
fp16 hidden_states_epsilon_0_to_fp16 = const()[name = string("hidden_states_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)]; |
|
tensor<fp16, [1, 384, 1, 1]> hidden_states_cast_fp16 = batch_norm(beta = hidden_states_beta_0_to_fp16, epsilon = hidden_states_epsilon_0_to_fp16, gamma = hidden_states_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_cast_fp16)[name = string("hidden_states_cast_fp16")]; |
|
tensor<int32, [1]> var_932_axes_0 = const()[name = string("op_932_axes_0"), val = tensor<int32, [1]>([2])]; |
|
tensor<fp16, [1, 384, 1]> var_932_cast_fp16 = squeeze(axes = var_932_axes_0, x = hidden_states_cast_fp16)[name = string("op_932_cast_fp16")]; |
|
tensor<int32, [3]> var_935_perm_0 = const()[name = string("op_935_perm_0"), val = tensor<int32, [3]>([0, 2, 1])]; |
|
tensor<fp16, [51864]> linear_0_bias_0_to_fp16 = const()[name = string("linear_0_bias_0_to_fp16"), val = tensor<fp16, [51864]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(56748736)))]; |
|
tensor<fp16, [1, 1, 384]> var_935_cast_fp16 = transpose(perm = var_935_perm_0, x = var_932_cast_fp16)[name = string("transpose_0")]; |
|
tensor<fp16, [1, 1, 51864]> logits = linear(bias = linear_0_bias_0_to_fp16, weight = embed_tokens_weight_to_fp16, x = var_935_cast_fp16)[name = string("linear_0_cast_fp16")]; |
|
int32 var_939 = const()[name = string("op_939"), val = int32(1)]; |
|
bool obj_81_interleave_0 = const()[name = string("obj_81_interleave_0"), val = bool(false)]; |
|
tensor<fp16, [1, 1536, 1, 1]> key_cache_updates = concat(axis = var_939, interleave = obj_81_interleave_0, values = (current_key_1_cast_fp16, current_key_3_cast_fp16, current_key_5_cast_fp16, current_key_cast_fp16))[name = string("obj_81_cast_fp16")]; |
|
int32 var_942 = const()[name = string("op_942"), val = int32(1)]; |
|
bool obj_83_interleave_0 = const()[name = string("obj_83_interleave_0"), val = bool(false)]; |
|
tensor<fp16, [1, 1536, 1, 1]> value_cache_updates = concat(axis = var_942, interleave = obj_83_interleave_0, values = (current_value_1_cast_fp16, current_value_3_cast_fp16, current_value_5_cast_fp16, current_value_cast_fp16))[name = string("obj_83_cast_fp16")]; |
|
tensor<int32, [4]> var_953_begin_0 = const()[name = string("op_953_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_953_end_0 = const()[name = string("op_953_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])]; |
|
tensor<bool, [4]> var_953_end_mask_0 = const()[name = string("op_953_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1536]> var_953_cast_fp16 = slice_by_index(begin = var_953_begin_0, end = var_953_end_0, end_mask = var_953_end_mask_0, x = obj_41_cast_fp16)[name = string("op_953_cast_fp16")]; |
|
tensor<int32, [4]> var_956_begin_0 = const()[name = string("op_956_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_956_end_0 = const()[name = string("op_956_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])]; |
|
tensor<bool, [4]> var_956_end_mask_0 = const()[name = string("op_956_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_956_squeeze_mask_0 = const()[name = string("op_956_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1536]> var_956_cast_fp16 = slice_by_index(begin = var_956_begin_0, end = var_956_end_0, end_mask = var_956_end_mask_0, squeeze_mask = var_956_squeeze_mask_0, x = var_953_cast_fp16)[name = string("op_956_cast_fp16")]; |
|
tensor<int32, [4]> var_971_begin_0 = const()[name = string("op_971_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_971_end_0 = const()[name = string("op_971_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])]; |
|
tensor<bool, [4]> var_971_end_mask_0 = const()[name = string("op_971_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1536]> var_971_cast_fp16 = slice_by_index(begin = var_971_begin_0, end = var_971_end_0, end_mask = var_971_end_mask_0, x = obj_59_cast_fp16)[name = string("op_971_cast_fp16")]; |
|
tensor<int32, [4]> var_974_begin_0 = const()[name = string("op_974_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_974_end_0 = const()[name = string("op_974_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])]; |
|
tensor<bool, [4]> var_974_end_mask_0 = const()[name = string("op_974_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_974_squeeze_mask_0 = const()[name = string("op_974_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1536]> var_974_cast_fp16 = slice_by_index(begin = var_974_begin_0, end = var_974_end_0, end_mask = var_974_end_mask_0, squeeze_mask = var_974_squeeze_mask_0, x = var_971_cast_fp16)[name = string("op_974_cast_fp16")]; |
|
tensor<int32, [4]> var_989_begin_0 = const()[name = string("op_989_begin_0"), val = tensor<int32, [4]>([0, 5, 0, 0])]; |
|
tensor<int32, [4]> var_989_end_0 = const()[name = string("op_989_end_0"), val = tensor<int32, [4]>([1, 6, 1, 1536])]; |
|
tensor<bool, [4]> var_989_end_mask_0 = const()[name = string("op_989_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1536]> var_989_cast_fp16 = slice_by_index(begin = var_989_begin_0, end = var_989_end_0, end_mask = var_989_end_mask_0, x = obj_59_cast_fp16)[name = string("op_989_cast_fp16")]; |
|
tensor<int32, [4]> var_992_begin_0 = const()[name = string("op_992_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_992_end_0 = const()[name = string("op_992_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])]; |
|
tensor<bool, [4]> var_992_end_mask_0 = const()[name = string("op_992_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_992_squeeze_mask_0 = const()[name = string("op_992_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1536]> var_992_cast_fp16 = slice_by_index(begin = var_992_begin_0, end = var_992_end_0, end_mask = var_992_end_mask_0, squeeze_mask = var_992_squeeze_mask_0, x = var_989_cast_fp16)[name = string("op_992_cast_fp16")]; |
|
tensor<int32, [4]> var_1007_begin_0 = const()[name = string("op_1007_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_1007_end_0 = const()[name = string("op_1007_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])]; |
|
tensor<bool, [4]> var_1007_end_mask_0 = const()[name = string("op_1007_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1536]> var_1007_cast_fp16 = slice_by_index(begin = var_1007_begin_0, end = var_1007_end_0, end_mask = var_1007_end_mask_0, x = obj_77_cast_fp16)[name = string("op_1007_cast_fp16")]; |
|
tensor<int32, [4]> var_1010_begin_0 = const()[name = string("op_1010_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_1010_end_0 = const()[name = string("op_1010_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])]; |
|
tensor<bool, [4]> var_1010_end_mask_0 = const()[name = string("op_1010_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_1010_squeeze_mask_0 = const()[name = string("op_1010_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1536]> var_1010_cast_fp16 = slice_by_index(begin = var_1010_begin_0, end = var_1010_end_0, end_mask = var_1010_end_mask_0, squeeze_mask = var_1010_squeeze_mask_0, x = var_1007_cast_fp16)[name = string("op_1010_cast_fp16")]; |
|
tensor<int32, [4]> var_1025_begin_0 = const()[name = string("op_1025_begin_0"), val = tensor<int32, [4]>([0, 1, 0, 0])]; |
|
tensor<int32, [4]> var_1025_end_0 = const()[name = string("op_1025_end_0"), val = tensor<int32, [4]>([1, 2, 1, 1536])]; |
|
tensor<bool, [4]> var_1025_end_mask_0 = const()[name = string("op_1025_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1536]> var_1025_cast_fp16 = slice_by_index(begin = var_1025_begin_0, end = var_1025_end_0, end_mask = var_1025_end_mask_0, x = obj_77_cast_fp16)[name = string("op_1025_cast_fp16")]; |
|
tensor<int32, [4]> var_1028_begin_0 = const()[name = string("op_1028_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_1028_end_0 = const()[name = string("op_1028_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])]; |
|
tensor<bool, [4]> var_1028_end_mask_0 = const()[name = string("op_1028_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_1028_squeeze_mask_0 = const()[name = string("op_1028_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1536]> var_1028_cast_fp16 = slice_by_index(begin = var_1028_begin_0, end = var_1028_end_0, end_mask = var_1028_end_mask_0, squeeze_mask = var_1028_squeeze_mask_0, x = var_1025_cast_fp16)[name = string("op_1028_cast_fp16")]; |
|
tensor<int32, [4]> var_1043_begin_0 = const()[name = string("op_1043_begin_0"), val = tensor<int32, [4]>([0, 2, 0, 0])]; |
|
tensor<int32, [4]> var_1043_end_0 = const()[name = string("op_1043_end_0"), val = tensor<int32, [4]>([1, 3, 1, 1536])]; |
|
tensor<bool, [4]> var_1043_end_mask_0 = const()[name = string("op_1043_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1536]> var_1043_cast_fp16 = slice_by_index(begin = var_1043_begin_0, end = var_1043_end_0, end_mask = var_1043_end_mask_0, x = obj_77_cast_fp16)[name = string("op_1043_cast_fp16")]; |
|
tensor<int32, [4]> var_1046_begin_0 = const()[name = string("op_1046_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_1046_end_0 = const()[name = string("op_1046_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])]; |
|
tensor<bool, [4]> var_1046_end_mask_0 = const()[name = string("op_1046_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_1046_squeeze_mask_0 = const()[name = string("op_1046_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1536]> var_1046_cast_fp16 = slice_by_index(begin = var_1046_begin_0, end = var_1046_end_0, end_mask = var_1046_end_mask_0, squeeze_mask = var_1046_squeeze_mask_0, x = var_1043_cast_fp16)[name = string("op_1046_cast_fp16")]; |
|
tensor<int32, [4]> var_1061_begin_0 = const()[name = string("op_1061_begin_0"), val = tensor<int32, [4]>([0, 3, 0, 0])]; |
|
tensor<int32, [4]> var_1061_end_0 = const()[name = string("op_1061_end_0"), val = tensor<int32, [4]>([1, 4, 1, 1536])]; |
|
tensor<bool, [4]> var_1061_end_mask_0 = const()[name = string("op_1061_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1536]> var_1061_cast_fp16 = slice_by_index(begin = var_1061_begin_0, end = var_1061_end_0, end_mask = var_1061_end_mask_0, x = obj_77_cast_fp16)[name = string("op_1061_cast_fp16")]; |
|
tensor<int32, [4]> var_1064_begin_0 = const()[name = string("op_1064_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_1064_end_0 = const()[name = string("op_1064_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])]; |
|
tensor<bool, [4]> var_1064_end_mask_0 = const()[name = string("op_1064_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_1064_squeeze_mask_0 = const()[name = string("op_1064_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1536]> var_1064_cast_fp16 = slice_by_index(begin = var_1064_begin_0, end = var_1064_end_0, end_mask = var_1064_end_mask_0, squeeze_mask = var_1064_squeeze_mask_0, x = var_1061_cast_fp16)[name = string("op_1064_cast_fp16")]; |
|
tensor<int32, [4]> var_1079_begin_0 = const()[name = string("op_1079_begin_0"), val = tensor<int32, [4]>([0, 4, 0, 0])]; |
|
tensor<int32, [4]> var_1079_end_0 = const()[name = string("op_1079_end_0"), val = tensor<int32, [4]>([1, 5, 1, 1536])]; |
|
tensor<bool, [4]> var_1079_end_mask_0 = const()[name = string("op_1079_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])]; |
|
tensor<fp16, [1, 1, 1, 1536]> var_1079_cast_fp16 = slice_by_index(begin = var_1079_begin_0, end = var_1079_end_0, end_mask = var_1079_end_mask_0, x = obj_77_cast_fp16)[name = string("op_1079_cast_fp16")]; |
|
tensor<int32, [4]> var_1082_begin_0 = const()[name = string("op_1082_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])]; |
|
tensor<int32, [4]> var_1082_end_0 = const()[name = string("op_1082_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])]; |
|
tensor<bool, [4]> var_1082_end_mask_0 = const()[name = string("op_1082_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])]; |
|
tensor<bool, [4]> var_1082_squeeze_mask_0 = const()[name = string("op_1082_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])]; |
|
tensor<fp16, [1, 1, 1536]> var_1082_cast_fp16 = slice_by_index(begin = var_1082_begin_0, end = var_1082_end_0, end_mask = var_1082_end_mask_0, squeeze_mask = var_1082_squeeze_mask_0, x = var_1079_cast_fp16)[name = string("op_1082_cast_fp16")]; |
|
int32 var_1089 = const()[name = string("op_1089"), val = int32(1)]; |
|
bool var_1090_interleave_0 = const()[name = string("op_1090_interleave_0"), val = bool(false)]; |
|
tensor<fp16, [1, 8, 1536]> var_1090_cast_fp16 = concat(axis = var_1089, interleave = var_1090_interleave_0, values = (var_956_cast_fp16, var_974_cast_fp16, var_992_cast_fp16, var_1010_cast_fp16, var_1028_cast_fp16, var_1046_cast_fp16, var_1064_cast_fp16, var_1082_cast_fp16))[name = string("op_1090_cast_fp16")]; |
|
bool var_1093 = const()[name = string("op_1093"), val = bool(false)]; |
|
tensor<int32, [1]> obj_axes_0 = const()[name = string("obj_axes_0"), val = tensor<int32, [1]>([1])]; |
|
tensor<fp16, [1, 1536]> alignment_heads_weights = reduce_mean(axes = obj_axes_0, keep_dims = var_1093, x = var_1090_cast_fp16)[name = string("obj_cast_fp16")]; |
|
} -> (logits, key_cache_updates, value_cache_updates, alignment_heads_weights); |
|
} |