aotrih's picture
initial commit
e8f351b
raw
history blame
158 kB
program(1.3)
[buildInfo = dict<string, string>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.5.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
{
func main<ios18>(tensor<int32, [1]> cache_length, tensor<fp16, [1, 448]> decoder_key_padding_mask, state<tensor<fp16, [6, 512, 1, 1536]>> encoder_attn_key_cache, state<tensor<fp16, [1, 1536]>> encoder_attn_key_padding_mask, state<tensor<fp16, [6, 512, 1, 1536]>> encoder_attn_value_cache, tensor<int32, [1]> input_ids, tensor<fp16, [1, 448]> kv_cache_update_mask, state<tensor<fp16, [6, 512, 1, 448]>> self_attn_key_cache, state<tensor<fp16, [6, 512, 1, 448]>> self_attn_value_cache) {
int32 var_30_axis_0 = const()[name = string("op_30_axis_0"), val = int32(0)];
int32 var_30_batch_dims_0 = const()[name = string("op_30_batch_dims_0"), val = int32(0)];
bool var_30_validate_indices_0 = const()[name = string("op_30_validate_indices_0"), val = bool(false)];
tensor<fp16, [51864, 512]> embed_tokens_weight_to_fp16 = const()[name = string("embed_tokens_weight_to_fp16"), val = tensor<fp16, [51864, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))];
tensor<fp16, [1, 512]> var_30_cast_fp16 = gather(axis = var_30_axis_0, batch_dims = var_30_batch_dims_0, indices = input_ids, validate_indices = var_30_validate_indices_0, x = embed_tokens_weight_to_fp16)[name = string("op_30_cast_fp16")];
int32 var_34_axis_0 = const()[name = string("op_34_axis_0"), val = int32(0)];
int32 var_34_batch_dims_0 = const()[name = string("op_34_batch_dims_0"), val = int32(0)];
bool var_34_validate_indices_0 = const()[name = string("op_34_validate_indices_0"), val = bool(false)];
tensor<fp16, [448, 512]> embed_positions_weight_to_fp16 = const()[name = string("embed_positions_weight_to_fp16"), val = tensor<fp16, [448, 512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53108864)))];
string cache_length_to_uint16_dtype_0 = const()[name = string("cache_length_to_uint16_dtype_0"), val = string("uint16")];
tensor<uint16, [1]> cache_length_to_uint16 = cast(dtype = cache_length_to_uint16_dtype_0, x = cache_length)[name = string("cast_99")];
tensor<fp16, [1, 512]> var_34_cast_fp16_cast_uint16 = gather(axis = var_34_axis_0, batch_dims = var_34_batch_dims_0, indices = cache_length_to_uint16, validate_indices = var_34_validate_indices_0, x = embed_positions_weight_to_fp16)[name = string("op_34_cast_fp16_cast_uint16")];
tensor<fp16, [1, 512]> hidden_states_1_cast_fp16 = add(x = var_30_cast_fp16, y = var_34_cast_fp16_cast_uint16)[name = string("hidden_states_1_cast_fp16")];
tensor<int32, [1]> var_48_axes_0 = const()[name = string("op_48_axes_0"), val = tensor<int32, [1]>([2])];
tensor<fp16, [1, 512, 1]> var_48_cast_fp16 = expand_dims(axes = var_48_axes_0, x = hidden_states_1_cast_fp16)[name = string("op_48_cast_fp16")];
tensor<int32, [1]> inputs_1_axes_0 = const()[name = string("inputs_1_axes_0"), val = tensor<int32, [1]>([3])];
tensor<fp16, [1, 512, 1, 1]> inputs_1_cast_fp16 = expand_dims(axes = inputs_1_axes_0, x = var_48_cast_fp16)[name = string("inputs_1_cast_fp16")];
tensor<fp16, [6, 512, 1, 448]> read_state_0 = read_state(input = self_attn_key_cache)[name = string("read_state_0")];
tensor<int32, [6]> tile_0 = const()[name = string("tile_0"), val = tensor<int32, [6]>([1, 1, 1, 1, 1, 1])];
int32 var_53_axis_0 = const()[name = string("op_53_axis_0"), val = int32(0)];
tensor<fp16, [1, 512, 1, 448]> var_53_cast_fp16_0, tensor<fp16, [1, 512, 1, 448]> var_53_cast_fp16_1, tensor<fp16, [1, 512, 1, 448]> var_53_cast_fp16_2, tensor<fp16, [1, 512, 1, 448]> var_53_cast_fp16_3, tensor<fp16, [1, 512, 1, 448]> var_53_cast_fp16_4, tensor<fp16, [1, 512, 1, 448]> var_53_cast_fp16_5 = split(axis = var_53_axis_0, split_sizes = tile_0, x = read_state_0)[name = string("op_53_cast_fp16")];
tensor<fp16, [6, 512, 1, 448]> read_state_1 = read_state(input = self_attn_value_cache)[name = string("read_state_1")];
tensor<int32, [6]> tile_1 = const()[name = string("tile_1"), val = tensor<int32, [6]>([1, 1, 1, 1, 1, 1])];
int32 var_62_axis_0 = const()[name = string("op_62_axis_0"), val = int32(0)];
tensor<fp16, [1, 512, 1, 448]> var_62_cast_fp16_0, tensor<fp16, [1, 512, 1, 448]> var_62_cast_fp16_1, tensor<fp16, [1, 512, 1, 448]> var_62_cast_fp16_2, tensor<fp16, [1, 512, 1, 448]> var_62_cast_fp16_3, tensor<fp16, [1, 512, 1, 448]> var_62_cast_fp16_4, tensor<fp16, [1, 512, 1, 448]> var_62_cast_fp16_5 = split(axis = var_62_axis_0, split_sizes = tile_1, x = read_state_1)[name = string("op_62_cast_fp16")];
tensor<fp16, [6, 512, 1, 1536]> read_state_2 = read_state(input = encoder_attn_key_cache)[name = string("read_state_2")];
tensor<int32, [4]> obj_17_begin_0 = const()[name = string("obj_17_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> obj_17_end_0 = const()[name = string("obj_17_end_0"), val = tensor<int32, [4]>([1, 512, 1, 1536])];
tensor<bool, [4]> obj_17_end_mask_0 = const()[name = string("obj_17_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
tensor<fp16, [1, 512, 1, 1536]> obj_17_cast_fp16 = slice_by_index(begin = obj_17_begin_0, end = obj_17_end_0, end_mask = obj_17_end_mask_0, x = read_state_2)[name = string("obj_17_cast_fp16")];
tensor<fp16, [6, 512, 1, 1536]> read_state_3 = read_state(input = encoder_attn_value_cache)[name = string("read_state_3")];
tensor<int32, [4]> obj_19_begin_0 = const()[name = string("obj_19_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> obj_19_end_0 = const()[name = string("obj_19_end_0"), val = tensor<int32, [4]>([1, 512, 1, 1536])];
tensor<bool, [4]> obj_19_end_mask_0 = const()[name = string("obj_19_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
tensor<fp16, [1, 512, 1, 1536]> obj_19_cast_fp16 = slice_by_index(begin = obj_19_begin_0, end = obj_19_end_0, end_mask = obj_19_end_mask_0, x = read_state_3)[name = string("obj_19_cast_fp16")];
int32 var_84 = const()[name = string("op_84"), val = int32(3)];
tensor<int32, [1]> out_1_axes_0 = const()[name = string("out_1_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_109_to_fp16 = const()[name = string("op_109_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_1_cast_fp16 = layer_norm(axes = out_1_axes_0, epsilon = var_109_to_fp16, x = inputs_1_cast_fp16)[name = string("out_1_cast_fp16")];
tensor<fp16, [512]> obj_5_mean_0_to_fp16 = const()[name = string("obj_5_mean_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53567680)))];
tensor<fp16, [512]> obj_5_variance_0_to_fp16 = const()[name = string("obj_5_variance_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53568768)))];
tensor<fp16, [512]> obj_5_gamma_0_to_fp16 = const()[name = string("obj_5_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53569856)))];
tensor<fp16, [512]> obj_5_beta_0_to_fp16 = const()[name = string("obj_5_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53570944)))];
fp16 obj_5_epsilon_0_to_fp16 = const()[name = string("obj_5_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> obj_5_cast_fp16 = batch_norm(beta = obj_5_beta_0_to_fp16, epsilon = obj_5_epsilon_0_to_fp16, gamma = obj_5_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_1_cast_fp16)[name = string("obj_5_cast_fp16")];
string query_1_pad_type_0 = const()[name = string("query_1_pad_type_0"), val = string("valid")];
tensor<int32, [2]> query_1_strides_0 = const()[name = string("query_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> query_1_pad_0 = const()[name = string("query_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> query_1_dilations_0 = const()[name = string("query_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 query_1_groups_0 = const()[name = string("query_1_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(53572032)))];
tensor<fp16, [512]> layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(54096384)))];
tensor<fp16, [1, 512, 1, 1]> query_1_cast_fp16 = conv(bias = layers_0_self_attn_q_proj_bias_to_fp16, dilations = query_1_dilations_0, groups = query_1_groups_0, pad = query_1_pad_0, pad_type = query_1_pad_type_0, strides = query_1_strides_0, weight = layers_0_self_attn_q_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = string("query_1_cast_fp16")];
string current_key_1_pad_type_0 = const()[name = string("current_key_1_pad_type_0"), val = string("valid")];
tensor<int32, [2]> current_key_1_strides_0 = const()[name = string("current_key_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> current_key_1_pad_0 = const()[name = string("current_key_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> current_key_1_dilations_0 = const()[name = string("current_key_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 current_key_1_groups_0 = const()[name = string("current_key_1_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(54097472)))];
tensor<fp16, [1, 512, 1, 1]> current_key_1_cast_fp16 = conv(dilations = current_key_1_dilations_0, groups = current_key_1_groups_0, pad = current_key_1_pad_0, pad_type = current_key_1_pad_type_0, strides = current_key_1_strides_0, weight = layers_0_self_attn_k_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = string("current_key_1_cast_fp16")];
string current_value_1_pad_type_0 = const()[name = string("current_value_1_pad_type_0"), val = string("valid")];
tensor<int32, [2]> current_value_1_strides_0 = const()[name = string("current_value_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> current_value_1_pad_0 = const()[name = string("current_value_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> current_value_1_dilations_0 = const()[name = string("current_value_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 current_value_1_groups_0 = const()[name = string("current_value_1_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(54621824)))];
tensor<fp16, [512]> layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(55146176)))];
tensor<fp16, [1, 512, 1, 1]> current_value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations = current_value_1_dilations_0, groups = current_value_1_groups_0, pad = current_value_1_pad_0, pad_type = current_value_1_pad_type_0, strides = current_value_1_strides_0, weight = layers_0_self_attn_v_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = string("current_value_1_cast_fp16")];
tensor<int32, [1]> var_144_axes_0 = const()[name = string("op_144_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1, 448]> var_144_cast_fp16 = expand_dims(axes = var_144_axes_0, x = kv_cache_update_mask)[name = string("op_144_cast_fp16")];
tensor<int32, [1]> var_145_axes_0 = const()[name = string("op_145_axes_0"), val = tensor<int32, [1]>([2])];
tensor<fp16, [1, 1, 1, 448]> var_145_cast_fp16 = expand_dims(axes = var_145_axes_0, x = var_144_cast_fp16)[name = string("op_145_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> var_147_cast_fp16 = mul(x = current_key_1_cast_fp16, y = var_145_cast_fp16)[name = string("op_147_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> key_1_cast_fp16 = add(x = var_53_cast_fp16_0, y = var_147_cast_fp16)[name = string("key_1_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> var_149_cast_fp16 = mul(x = current_value_1_cast_fp16, y = var_145_cast_fp16)[name = string("op_149_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> value_1_cast_fp16 = add(x = var_62_cast_fp16_0, y = var_149_cast_fp16)[name = string("value_1_cast_fp16")];
tensor<int32, [4]> var_152 = const()[name = string("op_152"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1]> mh_q_1_cast_fp16 = reshape(shape = var_152, x = query_1_cast_fp16)[name = string("mh_q_1_cast_fp16")];
fp16 var_154_to_fp16 = const()[name = string("op_154_to_fp16"), val = fp16(0x1p-3)];
tensor<fp16, [1, 8, 64, 1]> var_155_cast_fp16 = mul(x = mh_q_1_cast_fp16, y = var_154_to_fp16)[name = string("op_155_cast_fp16")];
tensor<int32, [4]> var_156 = const()[name = string("op_156"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 448]> var_157_cast_fp16 = reshape(shape = var_156, x = key_1_cast_fp16)[name = string("op_157_cast_fp16")];
bool mh_w_1_transpose_x_0 = const()[name = string("mh_w_1_transpose_x_0"), val = bool(true)];
bool mh_w_1_transpose_y_0 = const()[name = string("mh_w_1_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 8, 1, 448]> mh_w_1_cast_fp16 = matmul(transpose_x = mh_w_1_transpose_x_0, transpose_y = mh_w_1_transpose_y_0, x = var_155_cast_fp16, y = var_157_cast_fp16)[name = string("mh_w_1_cast_fp16")];
tensor<int32, [1]> var_161_axes_0 = const()[name = string("op_161_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1, 448]> var_161_cast_fp16 = expand_dims(axes = var_161_axes_0, x = decoder_key_padding_mask)[name = string("op_161_cast_fp16")];
tensor<int32, [1]> var_162_axes_0 = const()[name = string("op_162_axes_0"), val = tensor<int32, [1]>([2])];
tensor<fp16, [1, 1, 1, 448]> var_162_cast_fp16 = expand_dims(axes = var_162_axes_0, x = var_161_cast_fp16)[name = string("op_162_cast_fp16")];
tensor<fp16, [1, 8, 1, 448]> mh_w_3_cast_fp16 = add(x = mh_w_1_cast_fp16, y = var_162_cast_fp16)[name = string("mh_w_3_cast_fp16")];
tensor<fp16, [1, 8, 1, 448]> var_165_cast_fp16 = softmax(axis = var_84, x = mh_w_3_cast_fp16)[name = string("op_165_cast_fp16")];
tensor<int32, [4]> var_166 = const()[name = string("op_166"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 448]> var_167_cast_fp16 = reshape(shape = var_166, x = value_1_cast_fp16)[name = string("op_167_cast_fp16")];
bool attn_1_transpose_x_0 = const()[name = string("attn_1_transpose_x_0"), val = bool(false)];
bool attn_1_transpose_y_0 = const()[name = string("attn_1_transpose_y_0"), val = bool(true)];
tensor<fp16, [1, 8, 64, 1]> attn_1_cast_fp16 = matmul(transpose_x = attn_1_transpose_x_0, transpose_y = attn_1_transpose_y_0, x = var_167_cast_fp16, y = var_165_cast_fp16)[name = string("attn_1_cast_fp16")];
tensor<int32, [4]> var_170 = const()[name = string("op_170"), val = tensor<int32, [4]>([1, 512, 1, -1])];
tensor<fp16, [1, 512, 1, 1]> input_1_cast_fp16 = reshape(shape = var_170, x = attn_1_cast_fp16)[name = string("input_1_cast_fp16")];
string obj_11_pad_type_0 = const()[name = string("obj_11_pad_type_0"), val = string("valid")];
tensor<int32, [2]> obj_11_strides_0 = const()[name = string("obj_11_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> obj_11_pad_0 = const()[name = string("obj_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> obj_11_dilations_0 = const()[name = string("obj_11_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 obj_11_groups_0 = const()[name = string("obj_11_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_0_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_0_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(55147264)))];
tensor<fp16, [512]> layers_0_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_0_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(55671616)))];
tensor<fp16, [1, 512, 1, 1]> obj_11_cast_fp16 = conv(bias = layers_0_self_attn_o_proj_bias_to_fp16, dilations = obj_11_dilations_0, groups = obj_11_groups_0, pad = obj_11_pad_0, pad_type = obj_11_pad_type_0, strides = obj_11_strides_0, weight = layers_0_self_attn_o_proj_weight_to_fp16, x = input_1_cast_fp16)[name = string("obj_11_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_3_cast_fp16 = add(x = inputs_1_cast_fp16, y = obj_11_cast_fp16)[name = string("inputs_3_cast_fp16")];
tensor<int32, [1]> out_3_axes_0 = const()[name = string("out_3_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_192_to_fp16 = const()[name = string("op_192_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_3_cast_fp16 = layer_norm(axes = out_3_axes_0, epsilon = var_192_to_fp16, x = inputs_3_cast_fp16)[name = string("out_3_cast_fp16")];
tensor<fp16, [512]> obj_13_gamma_0_to_fp16 = const()[name = string("obj_13_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(55672704)))];
tensor<fp16, [512]> obj_13_beta_0_to_fp16 = const()[name = string("obj_13_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(55673792)))];
fp16 obj_13_epsilon_0_to_fp16 = const()[name = string("obj_13_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> obj_13_cast_fp16 = batch_norm(beta = obj_13_beta_0_to_fp16, epsilon = obj_13_epsilon_0_to_fp16, gamma = obj_13_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_3_cast_fp16)[name = string("obj_13_cast_fp16")];
string query_3_pad_type_0 = const()[name = string("query_3_pad_type_0"), val = string("valid")];
tensor<int32, [2]> query_3_strides_0 = const()[name = string("query_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> query_3_pad_0 = const()[name = string("query_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> query_3_dilations_0 = const()[name = string("query_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 query_3_groups_0 = const()[name = string("query_3_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_0_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_0_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(55674880)))];
tensor<fp16, [512]> layers_0_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_0_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(56199232)))];
tensor<fp16, [1, 512, 1, 1]> query_3_cast_fp16 = conv(bias = layers_0_encoder_attn_q_proj_bias_to_fp16, dilations = query_3_dilations_0, groups = query_3_groups_0, pad = query_3_pad_0, pad_type = query_3_pad_type_0, strides = query_3_strides_0, weight = layers_0_encoder_attn_q_proj_weight_to_fp16, x = obj_13_cast_fp16)[name = string("query_3_cast_fp16")];
tensor<int32, [4]> var_212 = const()[name = string("op_212"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1]> mh_q_3_cast_fp16 = reshape(shape = var_212, x = query_3_cast_fp16)[name = string("mh_q_3_cast_fp16")];
fp16 var_214_to_fp16 = const()[name = string("op_214_to_fp16"), val = fp16(0x1p-3)];
tensor<fp16, [1, 8, 64, 1]> var_215_cast_fp16 = mul(x = mh_q_3_cast_fp16, y = var_214_to_fp16)[name = string("op_215_cast_fp16")];
tensor<int32, [4]> var_216 = const()[name = string("op_216"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1536]> var_217_cast_fp16 = reshape(shape = var_216, x = obj_17_cast_fp16)[name = string("op_217_cast_fp16")];
bool mh_w_5_transpose_x_0 = const()[name = string("mh_w_5_transpose_x_0"), val = bool(true)];
bool mh_w_5_transpose_y_0 = const()[name = string("mh_w_5_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 8, 1, 1536]> mh_w_5_cast_fp16 = matmul(transpose_x = mh_w_5_transpose_x_0, transpose_y = mh_w_5_transpose_y_0, x = var_215_cast_fp16, y = var_217_cast_fp16)[name = string("mh_w_5_cast_fp16")];
tensor<fp16, [1, 1536]> read_state_4 = read_state(input = encoder_attn_key_padding_mask)[name = string("read_state_4")];
tensor<int32, [1]> var_221_axes_0 = const()[name = string("op_221_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1, 1536]> var_221_cast_fp16 = expand_dims(axes = var_221_axes_0, x = read_state_4)[name = string("op_221_cast_fp16")];
tensor<int32, [1]> var_222_axes_0 = const()[name = string("op_222_axes_0"), val = tensor<int32, [1]>([2])];
tensor<fp16, [1, 1, 1, 1536]> var_222_cast_fp16 = expand_dims(axes = var_222_axes_0, x = var_221_cast_fp16)[name = string("op_222_cast_fp16")];
tensor<fp16, [1, 8, 1, 1536]> mh_w_7_cast_fp16 = add(x = mh_w_5_cast_fp16, y = var_222_cast_fp16)[name = string("mh_w_7_cast_fp16")];
tensor<fp16, [1, 8, 1, 1536]> obj_23_cast_fp16 = softmax(axis = var_84, x = mh_w_7_cast_fp16)[name = string("obj_23_cast_fp16")];
tensor<int32, [4]> var_226 = const()[name = string("op_226"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1536]> var_227_cast_fp16 = reshape(shape = var_226, x = obj_19_cast_fp16)[name = string("op_227_cast_fp16")];
bool attn_3_transpose_x_0 = const()[name = string("attn_3_transpose_x_0"), val = bool(false)];
bool attn_3_transpose_y_0 = const()[name = string("attn_3_transpose_y_0"), val = bool(true)];
tensor<fp16, [1, 8, 64, 1]> attn_3_cast_fp16 = matmul(transpose_x = attn_3_transpose_x_0, transpose_y = attn_3_transpose_y_0, x = var_227_cast_fp16, y = obj_23_cast_fp16)[name = string("attn_3_cast_fp16")];
tensor<int32, [4]> var_230 = const()[name = string("op_230"), val = tensor<int32, [4]>([1, 512, 1, -1])];
tensor<fp16, [1, 512, 1, 1]> input_3_cast_fp16 = reshape(shape = var_230, x = attn_3_cast_fp16)[name = string("input_3_cast_fp16")];
string obj_21_pad_type_0 = const()[name = string("obj_21_pad_type_0"), val = string("valid")];
tensor<int32, [2]> obj_21_strides_0 = const()[name = string("obj_21_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> obj_21_pad_0 = const()[name = string("obj_21_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> obj_21_dilations_0 = const()[name = string("obj_21_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 obj_21_groups_0 = const()[name = string("obj_21_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_0_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_0_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(56200320)))];
tensor<fp16, [512]> layers_0_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_0_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(56724672)))];
tensor<fp16, [1, 512, 1, 1]> obj_21_cast_fp16 = conv(bias = layers_0_encoder_attn_o_proj_bias_to_fp16, dilations = obj_21_dilations_0, groups = obj_21_groups_0, pad = obj_21_pad_0, pad_type = obj_21_pad_type_0, strides = obj_21_strides_0, weight = layers_0_encoder_attn_o_proj_weight_to_fp16, x = input_3_cast_fp16)[name = string("obj_21_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_5_cast_fp16 = add(x = inputs_3_cast_fp16, y = obj_21_cast_fp16)[name = string("inputs_5_cast_fp16")];
tensor<int32, [1]> out_5_axes_0 = const()[name = string("out_5_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_248_to_fp16 = const()[name = string("op_248_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_5_cast_fp16 = layer_norm(axes = out_5_axes_0, epsilon = var_248_to_fp16, x = inputs_5_cast_fp16)[name = string("out_5_cast_fp16")];
tensor<fp16, [512]> input_5_gamma_0_to_fp16 = const()[name = string("input_5_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(56725760)))];
tensor<fp16, [512]> input_5_beta_0_to_fp16 = const()[name = string("input_5_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(56726848)))];
fp16 input_5_epsilon_0_to_fp16 = const()[name = string("input_5_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> input_5_cast_fp16 = batch_norm(beta = input_5_beta_0_to_fp16, epsilon = input_5_epsilon_0_to_fp16, gamma = input_5_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_5_cast_fp16)[name = string("input_5_cast_fp16")];
string input_7_pad_type_0 = const()[name = string("input_7_pad_type_0"), val = string("valid")];
tensor<int32, [2]> input_7_strides_0 = const()[name = string("input_7_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> input_7_pad_0 = const()[name = string("input_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> input_7_dilations_0 = const()[name = string("input_7_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 input_7_groups_0 = const()[name = string("input_7_groups_0"), val = int32(1)];
tensor<fp16, [2048, 512, 1, 1]> layers_0_fc1_weight_to_fp16 = const()[name = string("layers_0_fc1_weight_to_fp16"), val = tensor<fp16, [2048, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(56727936)))];
tensor<fp16, [2048]> layers_0_fc1_bias_to_fp16 = const()[name = string("layers_0_fc1_bias_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(58825152)))];
tensor<fp16, [1, 2048, 1, 1]> input_7_cast_fp16 = conv(bias = layers_0_fc1_bias_to_fp16, dilations = input_7_dilations_0, groups = input_7_groups_0, pad = input_7_pad_0, pad_type = input_7_pad_type_0, strides = input_7_strides_0, weight = layers_0_fc1_weight_to_fp16, x = input_5_cast_fp16)[name = string("input_7_cast_fp16")];
string input_9_mode_0 = const()[name = string("input_9_mode_0"), val = string("EXACT")];
tensor<fp16, [1, 2048, 1, 1]> input_9_cast_fp16 = gelu(mode = input_9_mode_0, x = input_7_cast_fp16)[name = string("input_9_cast_fp16")];
string hidden_states_3_pad_type_0 = const()[name = string("hidden_states_3_pad_type_0"), val = string("valid")];
tensor<int32, [2]> hidden_states_3_strides_0 = const()[name = string("hidden_states_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> hidden_states_3_pad_0 = const()[name = string("hidden_states_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> hidden_states_3_dilations_0 = const()[name = string("hidden_states_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 hidden_states_3_groups_0 = const()[name = string("hidden_states_3_groups_0"), val = int32(1)];
tensor<fp16, [512, 2048, 1, 1]> layers_0_fc2_weight_to_fp16 = const()[name = string("layers_0_fc2_weight_to_fp16"), val = tensor<fp16, [512, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(58829312)))];
tensor<fp16, [512]> layers_0_fc2_bias_to_fp16 = const()[name = string("layers_0_fc2_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(60926528)))];
tensor<fp16, [1, 512, 1, 1]> hidden_states_3_cast_fp16 = conv(bias = layers_0_fc2_bias_to_fp16, dilations = hidden_states_3_dilations_0, groups = hidden_states_3_groups_0, pad = hidden_states_3_pad_0, pad_type = hidden_states_3_pad_type_0, strides = hidden_states_3_strides_0, weight = layers_0_fc2_weight_to_fp16, x = input_9_cast_fp16)[name = string("hidden_states_3_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_7_cast_fp16 = add(x = inputs_5_cast_fp16, y = hidden_states_3_cast_fp16)[name = string("inputs_7_cast_fp16")];
tensor<int32, [4]> obj_35_begin_0 = const()[name = string("obj_35_begin_0"), val = tensor<int32, [4]>([1, 0, 0, 0])];
tensor<int32, [4]> obj_35_end_0 = const()[name = string("obj_35_end_0"), val = tensor<int32, [4]>([2, 512, 1, 1536])];
tensor<bool, [4]> obj_35_end_mask_0 = const()[name = string("obj_35_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
tensor<fp16, [1, 512, 1, 1536]> obj_35_cast_fp16 = slice_by_index(begin = obj_35_begin_0, end = obj_35_end_0, end_mask = obj_35_end_mask_0, x = read_state_2)[name = string("obj_35_cast_fp16")];
tensor<int32, [4]> obj_37_begin_0 = const()[name = string("obj_37_begin_0"), val = tensor<int32, [4]>([1, 0, 0, 0])];
tensor<int32, [4]> obj_37_end_0 = const()[name = string("obj_37_end_0"), val = tensor<int32, [4]>([2, 512, 1, 1536])];
tensor<bool, [4]> obj_37_end_mask_0 = const()[name = string("obj_37_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
tensor<fp16, [1, 512, 1, 1536]> obj_37_cast_fp16 = slice_by_index(begin = obj_37_begin_0, end = obj_37_end_0, end_mask = obj_37_end_mask_0, x = read_state_3)[name = string("obj_37_cast_fp16")];
int32 var_293 = const()[name = string("op_293"), val = int32(3)];
tensor<int32, [1]> out_7_axes_0 = const()[name = string("out_7_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_318_to_fp16 = const()[name = string("op_318_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_7_cast_fp16 = layer_norm(axes = out_7_axes_0, epsilon = var_318_to_fp16, x = inputs_7_cast_fp16)[name = string("out_7_cast_fp16")];
tensor<fp16, [512]> obj_25_gamma_0_to_fp16 = const()[name = string("obj_25_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(60927616)))];
tensor<fp16, [512]> obj_25_beta_0_to_fp16 = const()[name = string("obj_25_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(60928704)))];
fp16 obj_25_epsilon_0_to_fp16 = const()[name = string("obj_25_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> obj_25_cast_fp16 = batch_norm(beta = obj_25_beta_0_to_fp16, epsilon = obj_25_epsilon_0_to_fp16, gamma = obj_25_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_7_cast_fp16)[name = string("obj_25_cast_fp16")];
string query_5_pad_type_0 = const()[name = string("query_5_pad_type_0"), val = string("valid")];
tensor<int32, [2]> query_5_strides_0 = const()[name = string("query_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> query_5_pad_0 = const()[name = string("query_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> query_5_dilations_0 = const()[name = string("query_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 query_5_groups_0 = const()[name = string("query_5_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(60929792)))];
tensor<fp16, [512]> layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(61454144)))];
tensor<fp16, [1, 512, 1, 1]> query_5_cast_fp16 = conv(bias = layers_1_self_attn_q_proj_bias_to_fp16, dilations = query_5_dilations_0, groups = query_5_groups_0, pad = query_5_pad_0, pad_type = query_5_pad_type_0, strides = query_5_strides_0, weight = layers_1_self_attn_q_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = string("query_5_cast_fp16")];
string current_key_3_pad_type_0 = const()[name = string("current_key_3_pad_type_0"), val = string("valid")];
tensor<int32, [2]> current_key_3_strides_0 = const()[name = string("current_key_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> current_key_3_pad_0 = const()[name = string("current_key_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> current_key_3_dilations_0 = const()[name = string("current_key_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 current_key_3_groups_0 = const()[name = string("current_key_3_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(61455232)))];
tensor<fp16, [1, 512, 1, 1]> current_key_3_cast_fp16 = conv(dilations = current_key_3_dilations_0, groups = current_key_3_groups_0, pad = current_key_3_pad_0, pad_type = current_key_3_pad_type_0, strides = current_key_3_strides_0, weight = layers_1_self_attn_k_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = string("current_key_3_cast_fp16")];
string current_value_3_pad_type_0 = const()[name = string("current_value_3_pad_type_0"), val = string("valid")];
tensor<int32, [2]> current_value_3_strides_0 = const()[name = string("current_value_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> current_value_3_pad_0 = const()[name = string("current_value_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> current_value_3_dilations_0 = const()[name = string("current_value_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 current_value_3_groups_0 = const()[name = string("current_value_3_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(61979584)))];
tensor<fp16, [512]> layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(62503936)))];
tensor<fp16, [1, 512, 1, 1]> current_value_3_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations = current_value_3_dilations_0, groups = current_value_3_groups_0, pad = current_value_3_pad_0, pad_type = current_value_3_pad_type_0, strides = current_value_3_strides_0, weight = layers_1_self_attn_v_proj_weight_to_fp16, x = obj_25_cast_fp16)[name = string("current_value_3_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> var_356_cast_fp16 = mul(x = current_key_3_cast_fp16, y = var_145_cast_fp16)[name = string("op_356_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> key_3_cast_fp16 = add(x = var_53_cast_fp16_1, y = var_356_cast_fp16)[name = string("key_3_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> var_358_cast_fp16 = mul(x = current_value_3_cast_fp16, y = var_145_cast_fp16)[name = string("op_358_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> value_3_cast_fp16 = add(x = var_62_cast_fp16_1, y = var_358_cast_fp16)[name = string("value_3_cast_fp16")];
tensor<int32, [4]> var_361 = const()[name = string("op_361"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1]> mh_q_5_cast_fp16 = reshape(shape = var_361, x = query_5_cast_fp16)[name = string("mh_q_5_cast_fp16")];
fp16 var_363_to_fp16 = const()[name = string("op_363_to_fp16"), val = fp16(0x1p-3)];
tensor<fp16, [1, 8, 64, 1]> var_364_cast_fp16 = mul(x = mh_q_5_cast_fp16, y = var_363_to_fp16)[name = string("op_364_cast_fp16")];
tensor<int32, [4]> var_365 = const()[name = string("op_365"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 448]> var_366_cast_fp16 = reshape(shape = var_365, x = key_3_cast_fp16)[name = string("op_366_cast_fp16")];
bool mh_w_9_transpose_x_0 = const()[name = string("mh_w_9_transpose_x_0"), val = bool(true)];
bool mh_w_9_transpose_y_0 = const()[name = string("mh_w_9_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 8, 1, 448]> mh_w_9_cast_fp16 = matmul(transpose_x = mh_w_9_transpose_x_0, transpose_y = mh_w_9_transpose_y_0, x = var_364_cast_fp16, y = var_366_cast_fp16)[name = string("mh_w_9_cast_fp16")];
tensor<fp16, [1, 8, 1, 448]> mh_w_11_cast_fp16 = add(x = mh_w_9_cast_fp16, y = var_162_cast_fp16)[name = string("mh_w_11_cast_fp16")];
tensor<fp16, [1, 8, 1, 448]> var_374_cast_fp16 = softmax(axis = var_293, x = mh_w_11_cast_fp16)[name = string("op_374_cast_fp16")];
tensor<int32, [4]> var_375 = const()[name = string("op_375"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 448]> var_376_cast_fp16 = reshape(shape = var_375, x = value_3_cast_fp16)[name = string("op_376_cast_fp16")];
bool attn_5_transpose_x_0 = const()[name = string("attn_5_transpose_x_0"), val = bool(false)];
bool attn_5_transpose_y_0 = const()[name = string("attn_5_transpose_y_0"), val = bool(true)];
tensor<fp16, [1, 8, 64, 1]> attn_5_cast_fp16 = matmul(transpose_x = attn_5_transpose_x_0, transpose_y = attn_5_transpose_y_0, x = var_376_cast_fp16, y = var_374_cast_fp16)[name = string("attn_5_cast_fp16")];
tensor<int32, [4]> var_379 = const()[name = string("op_379"), val = tensor<int32, [4]>([1, 512, 1, -1])];
tensor<fp16, [1, 512, 1, 1]> input_11_cast_fp16 = reshape(shape = var_379, x = attn_5_cast_fp16)[name = string("input_11_cast_fp16")];
string obj_31_pad_type_0 = const()[name = string("obj_31_pad_type_0"), val = string("valid")];
tensor<int32, [2]> obj_31_strides_0 = const()[name = string("obj_31_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> obj_31_pad_0 = const()[name = string("obj_31_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> obj_31_dilations_0 = const()[name = string("obj_31_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 obj_31_groups_0 = const()[name = string("obj_31_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_1_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_1_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(62505024)))];
tensor<fp16, [512]> layers_1_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_1_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(63029376)))];
tensor<fp16, [1, 512, 1, 1]> obj_31_cast_fp16 = conv(bias = layers_1_self_attn_o_proj_bias_to_fp16, dilations = obj_31_dilations_0, groups = obj_31_groups_0, pad = obj_31_pad_0, pad_type = obj_31_pad_type_0, strides = obj_31_strides_0, weight = layers_1_self_attn_o_proj_weight_to_fp16, x = input_11_cast_fp16)[name = string("obj_31_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_9_cast_fp16 = add(x = inputs_7_cast_fp16, y = obj_31_cast_fp16)[name = string("inputs_9_cast_fp16")];
tensor<int32, [1]> out_9_axes_0 = const()[name = string("out_9_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_401_to_fp16 = const()[name = string("op_401_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_9_cast_fp16 = layer_norm(axes = out_9_axes_0, epsilon = var_401_to_fp16, x = inputs_9_cast_fp16)[name = string("out_9_cast_fp16")];
tensor<fp16, [512]> obj_33_gamma_0_to_fp16 = const()[name = string("obj_33_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(63030464)))];
tensor<fp16, [512]> obj_33_beta_0_to_fp16 = const()[name = string("obj_33_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(63031552)))];
fp16 obj_33_epsilon_0_to_fp16 = const()[name = string("obj_33_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> obj_33_cast_fp16 = batch_norm(beta = obj_33_beta_0_to_fp16, epsilon = obj_33_epsilon_0_to_fp16, gamma = obj_33_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_9_cast_fp16)[name = string("obj_33_cast_fp16")];
string query_7_pad_type_0 = const()[name = string("query_7_pad_type_0"), val = string("valid")];
tensor<int32, [2]> query_7_strides_0 = const()[name = string("query_7_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> query_7_pad_0 = const()[name = string("query_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> query_7_dilations_0 = const()[name = string("query_7_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 query_7_groups_0 = const()[name = string("query_7_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_1_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_1_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(63032640)))];
tensor<fp16, [512]> layers_1_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_1_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(63556992)))];
tensor<fp16, [1, 512, 1, 1]> query_7_cast_fp16 = conv(bias = layers_1_encoder_attn_q_proj_bias_to_fp16, dilations = query_7_dilations_0, groups = query_7_groups_0, pad = query_7_pad_0, pad_type = query_7_pad_type_0, strides = query_7_strides_0, weight = layers_1_encoder_attn_q_proj_weight_to_fp16, x = obj_33_cast_fp16)[name = string("query_7_cast_fp16")];
tensor<int32, [4]> var_421 = const()[name = string("op_421"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1]> mh_q_7_cast_fp16 = reshape(shape = var_421, x = query_7_cast_fp16)[name = string("mh_q_7_cast_fp16")];
fp16 var_423_to_fp16 = const()[name = string("op_423_to_fp16"), val = fp16(0x1p-3)];
tensor<fp16, [1, 8, 64, 1]> var_424_cast_fp16 = mul(x = mh_q_7_cast_fp16, y = var_423_to_fp16)[name = string("op_424_cast_fp16")];
tensor<int32, [4]> var_425 = const()[name = string("op_425"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1536]> var_426_cast_fp16 = reshape(shape = var_425, x = obj_35_cast_fp16)[name = string("op_426_cast_fp16")];
bool mh_w_13_transpose_x_0 = const()[name = string("mh_w_13_transpose_x_0"), val = bool(true)];
bool mh_w_13_transpose_y_0 = const()[name = string("mh_w_13_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 8, 1, 1536]> mh_w_13_cast_fp16 = matmul(transpose_x = mh_w_13_transpose_x_0, transpose_y = mh_w_13_transpose_y_0, x = var_424_cast_fp16, y = var_426_cast_fp16)[name = string("mh_w_13_cast_fp16")];
tensor<fp16, [1, 8, 1, 1536]> mh_w_15_cast_fp16 = add(x = mh_w_13_cast_fp16, y = var_222_cast_fp16)[name = string("mh_w_15_cast_fp16")];
tensor<fp16, [1, 8, 1, 1536]> obj_41_cast_fp16 = softmax(axis = var_293, x = mh_w_15_cast_fp16)[name = string("obj_41_cast_fp16")];
tensor<int32, [4]> var_435 = const()[name = string("op_435"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1536]> var_436_cast_fp16 = reshape(shape = var_435, x = obj_37_cast_fp16)[name = string("op_436_cast_fp16")];
bool attn_7_transpose_x_0 = const()[name = string("attn_7_transpose_x_0"), val = bool(false)];
bool attn_7_transpose_y_0 = const()[name = string("attn_7_transpose_y_0"), val = bool(true)];
tensor<fp16, [1, 8, 64, 1]> attn_7_cast_fp16 = matmul(transpose_x = attn_7_transpose_x_0, transpose_y = attn_7_transpose_y_0, x = var_436_cast_fp16, y = obj_41_cast_fp16)[name = string("attn_7_cast_fp16")];
tensor<int32, [4]> var_439 = const()[name = string("op_439"), val = tensor<int32, [4]>([1, 512, 1, -1])];
tensor<fp16, [1, 512, 1, 1]> input_13_cast_fp16 = reshape(shape = var_439, x = attn_7_cast_fp16)[name = string("input_13_cast_fp16")];
string obj_39_pad_type_0 = const()[name = string("obj_39_pad_type_0"), val = string("valid")];
tensor<int32, [2]> obj_39_strides_0 = const()[name = string("obj_39_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> obj_39_pad_0 = const()[name = string("obj_39_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> obj_39_dilations_0 = const()[name = string("obj_39_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 obj_39_groups_0 = const()[name = string("obj_39_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_1_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_1_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(63558080)))];
tensor<fp16, [512]> layers_1_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_1_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64082432)))];
tensor<fp16, [1, 512, 1, 1]> obj_39_cast_fp16 = conv(bias = layers_1_encoder_attn_o_proj_bias_to_fp16, dilations = obj_39_dilations_0, groups = obj_39_groups_0, pad = obj_39_pad_0, pad_type = obj_39_pad_type_0, strides = obj_39_strides_0, weight = layers_1_encoder_attn_o_proj_weight_to_fp16, x = input_13_cast_fp16)[name = string("obj_39_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_11_cast_fp16 = add(x = inputs_9_cast_fp16, y = obj_39_cast_fp16)[name = string("inputs_11_cast_fp16")];
tensor<int32, [1]> out_11_axes_0 = const()[name = string("out_11_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_457_to_fp16 = const()[name = string("op_457_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_11_cast_fp16 = layer_norm(axes = out_11_axes_0, epsilon = var_457_to_fp16, x = inputs_11_cast_fp16)[name = string("out_11_cast_fp16")];
tensor<fp16, [512]> input_15_gamma_0_to_fp16 = const()[name = string("input_15_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64083520)))];
tensor<fp16, [512]> input_15_beta_0_to_fp16 = const()[name = string("input_15_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64084608)))];
fp16 input_15_epsilon_0_to_fp16 = const()[name = string("input_15_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> input_15_cast_fp16 = batch_norm(beta = input_15_beta_0_to_fp16, epsilon = input_15_epsilon_0_to_fp16, gamma = input_15_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_11_cast_fp16)[name = string("input_15_cast_fp16")];
string input_17_pad_type_0 = const()[name = string("input_17_pad_type_0"), val = string("valid")];
tensor<int32, [2]> input_17_strides_0 = const()[name = string("input_17_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> input_17_pad_0 = const()[name = string("input_17_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> input_17_dilations_0 = const()[name = string("input_17_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 input_17_groups_0 = const()[name = string("input_17_groups_0"), val = int32(1)];
tensor<fp16, [2048, 512, 1, 1]> layers_1_fc1_weight_to_fp16 = const()[name = string("layers_1_fc1_weight_to_fp16"), val = tensor<fp16, [2048, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64085696)))];
tensor<fp16, [2048]> layers_1_fc1_bias_to_fp16 = const()[name = string("layers_1_fc1_bias_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(66182912)))];
tensor<fp16, [1, 2048, 1, 1]> input_17_cast_fp16 = conv(bias = layers_1_fc1_bias_to_fp16, dilations = input_17_dilations_0, groups = input_17_groups_0, pad = input_17_pad_0, pad_type = input_17_pad_type_0, strides = input_17_strides_0, weight = layers_1_fc1_weight_to_fp16, x = input_15_cast_fp16)[name = string("input_17_cast_fp16")];
string input_19_mode_0 = const()[name = string("input_19_mode_0"), val = string("EXACT")];
tensor<fp16, [1, 2048, 1, 1]> input_19_cast_fp16 = gelu(mode = input_19_mode_0, x = input_17_cast_fp16)[name = string("input_19_cast_fp16")];
string hidden_states_5_pad_type_0 = const()[name = string("hidden_states_5_pad_type_0"), val = string("valid")];
tensor<int32, [2]> hidden_states_5_strides_0 = const()[name = string("hidden_states_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> hidden_states_5_pad_0 = const()[name = string("hidden_states_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> hidden_states_5_dilations_0 = const()[name = string("hidden_states_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 hidden_states_5_groups_0 = const()[name = string("hidden_states_5_groups_0"), val = int32(1)];
tensor<fp16, [512, 2048, 1, 1]> layers_1_fc2_weight_to_fp16 = const()[name = string("layers_1_fc2_weight_to_fp16"), val = tensor<fp16, [512, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(66187072)))];
tensor<fp16, [512]> layers_1_fc2_bias_to_fp16 = const()[name = string("layers_1_fc2_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(68284288)))];
tensor<fp16, [1, 512, 1, 1]> hidden_states_5_cast_fp16 = conv(bias = layers_1_fc2_bias_to_fp16, dilations = hidden_states_5_dilations_0, groups = hidden_states_5_groups_0, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = hidden_states_5_strides_0, weight = layers_1_fc2_weight_to_fp16, x = input_19_cast_fp16)[name = string("hidden_states_5_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_13_cast_fp16 = add(x = inputs_11_cast_fp16, y = hidden_states_5_cast_fp16)[name = string("inputs_13_cast_fp16")];
tensor<int32, [4]> obj_53_begin_0 = const()[name = string("obj_53_begin_0"), val = tensor<int32, [4]>([2, 0, 0, 0])];
tensor<int32, [4]> obj_53_end_0 = const()[name = string("obj_53_end_0"), val = tensor<int32, [4]>([3, 512, 1, 1536])];
tensor<bool, [4]> obj_53_end_mask_0 = const()[name = string("obj_53_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
tensor<fp16, [1, 512, 1, 1536]> obj_53_cast_fp16 = slice_by_index(begin = obj_53_begin_0, end = obj_53_end_0, end_mask = obj_53_end_mask_0, x = read_state_2)[name = string("obj_53_cast_fp16")];
tensor<int32, [4]> obj_55_begin_0 = const()[name = string("obj_55_begin_0"), val = tensor<int32, [4]>([2, 0, 0, 0])];
tensor<int32, [4]> obj_55_end_0 = const()[name = string("obj_55_end_0"), val = tensor<int32, [4]>([3, 512, 1, 1536])];
tensor<bool, [4]> obj_55_end_mask_0 = const()[name = string("obj_55_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
tensor<fp16, [1, 512, 1, 1536]> obj_55_cast_fp16 = slice_by_index(begin = obj_55_begin_0, end = obj_55_end_0, end_mask = obj_55_end_mask_0, x = read_state_3)[name = string("obj_55_cast_fp16")];
int32 var_502 = const()[name = string("op_502"), val = int32(3)];
tensor<int32, [1]> out_13_axes_0 = const()[name = string("out_13_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_527_to_fp16 = const()[name = string("op_527_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_13_cast_fp16 = layer_norm(axes = out_13_axes_0, epsilon = var_527_to_fp16, x = inputs_13_cast_fp16)[name = string("out_13_cast_fp16")];
tensor<fp16, [512]> obj_43_gamma_0_to_fp16 = const()[name = string("obj_43_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(68285376)))];
tensor<fp16, [512]> obj_43_beta_0_to_fp16 = const()[name = string("obj_43_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(68286464)))];
fp16 obj_43_epsilon_0_to_fp16 = const()[name = string("obj_43_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> obj_43_cast_fp16 = batch_norm(beta = obj_43_beta_0_to_fp16, epsilon = obj_43_epsilon_0_to_fp16, gamma = obj_43_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_13_cast_fp16)[name = string("obj_43_cast_fp16")];
string query_9_pad_type_0 = const()[name = string("query_9_pad_type_0"), val = string("valid")];
tensor<int32, [2]> query_9_strides_0 = const()[name = string("query_9_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> query_9_pad_0 = const()[name = string("query_9_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> query_9_dilations_0 = const()[name = string("query_9_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 query_9_groups_0 = const()[name = string("query_9_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_2_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_2_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(68287552)))];
tensor<fp16, [512]> layers_2_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_2_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(68811904)))];
tensor<fp16, [1, 512, 1, 1]> query_9_cast_fp16 = conv(bias = layers_2_self_attn_q_proj_bias_to_fp16, dilations = query_9_dilations_0, groups = query_9_groups_0, pad = query_9_pad_0, pad_type = query_9_pad_type_0, strides = query_9_strides_0, weight = layers_2_self_attn_q_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = string("query_9_cast_fp16")];
string current_key_5_pad_type_0 = const()[name = string("current_key_5_pad_type_0"), val = string("valid")];
tensor<int32, [2]> current_key_5_strides_0 = const()[name = string("current_key_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> current_key_5_pad_0 = const()[name = string("current_key_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> current_key_5_dilations_0 = const()[name = string("current_key_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 current_key_5_groups_0 = const()[name = string("current_key_5_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_2_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_2_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(68812992)))];
tensor<fp16, [1, 512, 1, 1]> current_key_5_cast_fp16 = conv(dilations = current_key_5_dilations_0, groups = current_key_5_groups_0, pad = current_key_5_pad_0, pad_type = current_key_5_pad_type_0, strides = current_key_5_strides_0, weight = layers_2_self_attn_k_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = string("current_key_5_cast_fp16")];
string current_value_5_pad_type_0 = const()[name = string("current_value_5_pad_type_0"), val = string("valid")];
tensor<int32, [2]> current_value_5_strides_0 = const()[name = string("current_value_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> current_value_5_pad_0 = const()[name = string("current_value_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> current_value_5_dilations_0 = const()[name = string("current_value_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 current_value_5_groups_0 = const()[name = string("current_value_5_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_2_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_2_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(69337344)))];
tensor<fp16, [512]> layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(69861696)))];
tensor<fp16, [1, 512, 1, 1]> current_value_5_cast_fp16 = conv(bias = layers_2_self_attn_v_proj_bias_to_fp16, dilations = current_value_5_dilations_0, groups = current_value_5_groups_0, pad = current_value_5_pad_0, pad_type = current_value_5_pad_type_0, strides = current_value_5_strides_0, weight = layers_2_self_attn_v_proj_weight_to_fp16, x = obj_43_cast_fp16)[name = string("current_value_5_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> var_565_cast_fp16 = mul(x = current_key_5_cast_fp16, y = var_145_cast_fp16)[name = string("op_565_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> key_5_cast_fp16 = add(x = var_53_cast_fp16_2, y = var_565_cast_fp16)[name = string("key_5_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> var_567_cast_fp16 = mul(x = current_value_5_cast_fp16, y = var_145_cast_fp16)[name = string("op_567_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> value_5_cast_fp16 = add(x = var_62_cast_fp16_2, y = var_567_cast_fp16)[name = string("value_5_cast_fp16")];
tensor<int32, [4]> var_570 = const()[name = string("op_570"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1]> mh_q_9_cast_fp16 = reshape(shape = var_570, x = query_9_cast_fp16)[name = string("mh_q_9_cast_fp16")];
fp16 var_572_to_fp16 = const()[name = string("op_572_to_fp16"), val = fp16(0x1p-3)];
tensor<fp16, [1, 8, 64, 1]> var_573_cast_fp16 = mul(x = mh_q_9_cast_fp16, y = var_572_to_fp16)[name = string("op_573_cast_fp16")];
tensor<int32, [4]> var_574 = const()[name = string("op_574"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 448]> var_575_cast_fp16 = reshape(shape = var_574, x = key_5_cast_fp16)[name = string("op_575_cast_fp16")];
bool mh_w_17_transpose_x_0 = const()[name = string("mh_w_17_transpose_x_0"), val = bool(true)];
bool mh_w_17_transpose_y_0 = const()[name = string("mh_w_17_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 8, 1, 448]> mh_w_17_cast_fp16 = matmul(transpose_x = mh_w_17_transpose_x_0, transpose_y = mh_w_17_transpose_y_0, x = var_573_cast_fp16, y = var_575_cast_fp16)[name = string("mh_w_17_cast_fp16")];
tensor<fp16, [1, 8, 1, 448]> mh_w_19_cast_fp16 = add(x = mh_w_17_cast_fp16, y = var_162_cast_fp16)[name = string("mh_w_19_cast_fp16")];
tensor<fp16, [1, 8, 1, 448]> var_583_cast_fp16 = softmax(axis = var_502, x = mh_w_19_cast_fp16)[name = string("op_583_cast_fp16")];
tensor<int32, [4]> var_584 = const()[name = string("op_584"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 448]> var_585_cast_fp16 = reshape(shape = var_584, x = value_5_cast_fp16)[name = string("op_585_cast_fp16")];
bool attn_9_transpose_x_0 = const()[name = string("attn_9_transpose_x_0"), val = bool(false)];
bool attn_9_transpose_y_0 = const()[name = string("attn_9_transpose_y_0"), val = bool(true)];
tensor<fp16, [1, 8, 64, 1]> attn_9_cast_fp16 = matmul(transpose_x = attn_9_transpose_x_0, transpose_y = attn_9_transpose_y_0, x = var_585_cast_fp16, y = var_583_cast_fp16)[name = string("attn_9_cast_fp16")];
tensor<int32, [4]> var_588 = const()[name = string("op_588"), val = tensor<int32, [4]>([1, 512, 1, -1])];
tensor<fp16, [1, 512, 1, 1]> input_21_cast_fp16 = reshape(shape = var_588, x = attn_9_cast_fp16)[name = string("input_21_cast_fp16")];
string obj_49_pad_type_0 = const()[name = string("obj_49_pad_type_0"), val = string("valid")];
tensor<int32, [2]> obj_49_strides_0 = const()[name = string("obj_49_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> obj_49_pad_0 = const()[name = string("obj_49_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> obj_49_dilations_0 = const()[name = string("obj_49_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 obj_49_groups_0 = const()[name = string("obj_49_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_2_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_2_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(69862784)))];
tensor<fp16, [512]> layers_2_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_2_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(70387136)))];
tensor<fp16, [1, 512, 1, 1]> obj_49_cast_fp16 = conv(bias = layers_2_self_attn_o_proj_bias_to_fp16, dilations = obj_49_dilations_0, groups = obj_49_groups_0, pad = obj_49_pad_0, pad_type = obj_49_pad_type_0, strides = obj_49_strides_0, weight = layers_2_self_attn_o_proj_weight_to_fp16, x = input_21_cast_fp16)[name = string("obj_49_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_15_cast_fp16 = add(x = inputs_13_cast_fp16, y = obj_49_cast_fp16)[name = string("inputs_15_cast_fp16")];
tensor<int32, [1]> out_15_axes_0 = const()[name = string("out_15_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_610_to_fp16 = const()[name = string("op_610_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_15_cast_fp16 = layer_norm(axes = out_15_axes_0, epsilon = var_610_to_fp16, x = inputs_15_cast_fp16)[name = string("out_15_cast_fp16")];
tensor<fp16, [512]> obj_51_gamma_0_to_fp16 = const()[name = string("obj_51_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(70388224)))];
tensor<fp16, [512]> obj_51_beta_0_to_fp16 = const()[name = string("obj_51_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(70389312)))];
fp16 obj_51_epsilon_0_to_fp16 = const()[name = string("obj_51_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> obj_51_cast_fp16 = batch_norm(beta = obj_51_beta_0_to_fp16, epsilon = obj_51_epsilon_0_to_fp16, gamma = obj_51_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_15_cast_fp16)[name = string("obj_51_cast_fp16")];
string query_11_pad_type_0 = const()[name = string("query_11_pad_type_0"), val = string("valid")];
tensor<int32, [2]> query_11_strides_0 = const()[name = string("query_11_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> query_11_pad_0 = const()[name = string("query_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> query_11_dilations_0 = const()[name = string("query_11_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 query_11_groups_0 = const()[name = string("query_11_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_2_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_2_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(70390400)))];
tensor<fp16, [512]> layers_2_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_2_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(70914752)))];
tensor<fp16, [1, 512, 1, 1]> query_11_cast_fp16 = conv(bias = layers_2_encoder_attn_q_proj_bias_to_fp16, dilations = query_11_dilations_0, groups = query_11_groups_0, pad = query_11_pad_0, pad_type = query_11_pad_type_0, strides = query_11_strides_0, weight = layers_2_encoder_attn_q_proj_weight_to_fp16, x = obj_51_cast_fp16)[name = string("query_11_cast_fp16")];
tensor<int32, [4]> var_630 = const()[name = string("op_630"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1]> mh_q_11_cast_fp16 = reshape(shape = var_630, x = query_11_cast_fp16)[name = string("mh_q_11_cast_fp16")];
fp16 var_632_to_fp16 = const()[name = string("op_632_to_fp16"), val = fp16(0x1p-3)];
tensor<fp16, [1, 8, 64, 1]> var_633_cast_fp16 = mul(x = mh_q_11_cast_fp16, y = var_632_to_fp16)[name = string("op_633_cast_fp16")];
tensor<int32, [4]> var_634 = const()[name = string("op_634"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1536]> var_635_cast_fp16 = reshape(shape = var_634, x = obj_53_cast_fp16)[name = string("op_635_cast_fp16")];
bool mh_w_21_transpose_x_0 = const()[name = string("mh_w_21_transpose_x_0"), val = bool(true)];
bool mh_w_21_transpose_y_0 = const()[name = string("mh_w_21_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 8, 1, 1536]> mh_w_21_cast_fp16 = matmul(transpose_x = mh_w_21_transpose_x_0, transpose_y = mh_w_21_transpose_y_0, x = var_633_cast_fp16, y = var_635_cast_fp16)[name = string("mh_w_21_cast_fp16")];
tensor<fp16, [1, 8, 1, 1536]> mh_w_23_cast_fp16 = add(x = mh_w_21_cast_fp16, y = var_222_cast_fp16)[name = string("mh_w_23_cast_fp16")];
tensor<fp16, [1, 8, 1, 1536]> obj_59_cast_fp16 = softmax(axis = var_502, x = mh_w_23_cast_fp16)[name = string("obj_59_cast_fp16")];
tensor<int32, [4]> var_644 = const()[name = string("op_644"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1536]> var_645_cast_fp16 = reshape(shape = var_644, x = obj_55_cast_fp16)[name = string("op_645_cast_fp16")];
bool attn_11_transpose_x_0 = const()[name = string("attn_11_transpose_x_0"), val = bool(false)];
bool attn_11_transpose_y_0 = const()[name = string("attn_11_transpose_y_0"), val = bool(true)];
tensor<fp16, [1, 8, 64, 1]> attn_11_cast_fp16 = matmul(transpose_x = attn_11_transpose_x_0, transpose_y = attn_11_transpose_y_0, x = var_645_cast_fp16, y = obj_59_cast_fp16)[name = string("attn_11_cast_fp16")];
tensor<int32, [4]> var_648 = const()[name = string("op_648"), val = tensor<int32, [4]>([1, 512, 1, -1])];
tensor<fp16, [1, 512, 1, 1]> input_23_cast_fp16 = reshape(shape = var_648, x = attn_11_cast_fp16)[name = string("input_23_cast_fp16")];
string obj_57_pad_type_0 = const()[name = string("obj_57_pad_type_0"), val = string("valid")];
tensor<int32, [2]> obj_57_strides_0 = const()[name = string("obj_57_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> obj_57_pad_0 = const()[name = string("obj_57_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> obj_57_dilations_0 = const()[name = string("obj_57_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 obj_57_groups_0 = const()[name = string("obj_57_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_2_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_2_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(70915840)))];
tensor<fp16, [512]> layers_2_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_2_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(71440192)))];
tensor<fp16, [1, 512, 1, 1]> obj_57_cast_fp16 = conv(bias = layers_2_encoder_attn_o_proj_bias_to_fp16, dilations = obj_57_dilations_0, groups = obj_57_groups_0, pad = obj_57_pad_0, pad_type = obj_57_pad_type_0, strides = obj_57_strides_0, weight = layers_2_encoder_attn_o_proj_weight_to_fp16, x = input_23_cast_fp16)[name = string("obj_57_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_17_cast_fp16 = add(x = inputs_15_cast_fp16, y = obj_57_cast_fp16)[name = string("inputs_17_cast_fp16")];
tensor<int32, [1]> out_17_axes_0 = const()[name = string("out_17_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_666_to_fp16 = const()[name = string("op_666_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_17_cast_fp16 = layer_norm(axes = out_17_axes_0, epsilon = var_666_to_fp16, x = inputs_17_cast_fp16)[name = string("out_17_cast_fp16")];
tensor<fp16, [512]> input_25_gamma_0_to_fp16 = const()[name = string("input_25_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(71441280)))];
tensor<fp16, [512]> input_25_beta_0_to_fp16 = const()[name = string("input_25_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(71442368)))];
fp16 input_25_epsilon_0_to_fp16 = const()[name = string("input_25_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> input_25_cast_fp16 = batch_norm(beta = input_25_beta_0_to_fp16, epsilon = input_25_epsilon_0_to_fp16, gamma = input_25_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_17_cast_fp16)[name = string("input_25_cast_fp16")];
string input_27_pad_type_0 = const()[name = string("input_27_pad_type_0"), val = string("valid")];
tensor<int32, [2]> input_27_strides_0 = const()[name = string("input_27_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> input_27_pad_0 = const()[name = string("input_27_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> input_27_dilations_0 = const()[name = string("input_27_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 input_27_groups_0 = const()[name = string("input_27_groups_0"), val = int32(1)];
tensor<fp16, [2048, 512, 1, 1]> layers_2_fc1_weight_to_fp16 = const()[name = string("layers_2_fc1_weight_to_fp16"), val = tensor<fp16, [2048, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(71443456)))];
tensor<fp16, [2048]> layers_2_fc1_bias_to_fp16 = const()[name = string("layers_2_fc1_bias_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(73540672)))];
tensor<fp16, [1, 2048, 1, 1]> input_27_cast_fp16 = conv(bias = layers_2_fc1_bias_to_fp16, dilations = input_27_dilations_0, groups = input_27_groups_0, pad = input_27_pad_0, pad_type = input_27_pad_type_0, strides = input_27_strides_0, weight = layers_2_fc1_weight_to_fp16, x = input_25_cast_fp16)[name = string("input_27_cast_fp16")];
string input_29_mode_0 = const()[name = string("input_29_mode_0"), val = string("EXACT")];
tensor<fp16, [1, 2048, 1, 1]> input_29_cast_fp16 = gelu(mode = input_29_mode_0, x = input_27_cast_fp16)[name = string("input_29_cast_fp16")];
string hidden_states_7_pad_type_0 = const()[name = string("hidden_states_7_pad_type_0"), val = string("valid")];
tensor<int32, [2]> hidden_states_7_strides_0 = const()[name = string("hidden_states_7_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> hidden_states_7_pad_0 = const()[name = string("hidden_states_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> hidden_states_7_dilations_0 = const()[name = string("hidden_states_7_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 hidden_states_7_groups_0 = const()[name = string("hidden_states_7_groups_0"), val = int32(1)];
tensor<fp16, [512, 2048, 1, 1]> layers_2_fc2_weight_to_fp16 = const()[name = string("layers_2_fc2_weight_to_fp16"), val = tensor<fp16, [512, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(73544832)))];
tensor<fp16, [512]> layers_2_fc2_bias_to_fp16 = const()[name = string("layers_2_fc2_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(75642048)))];
tensor<fp16, [1, 512, 1, 1]> hidden_states_7_cast_fp16 = conv(bias = layers_2_fc2_bias_to_fp16, dilations = hidden_states_7_dilations_0, groups = hidden_states_7_groups_0, pad = hidden_states_7_pad_0, pad_type = hidden_states_7_pad_type_0, strides = hidden_states_7_strides_0, weight = layers_2_fc2_weight_to_fp16, x = input_29_cast_fp16)[name = string("hidden_states_7_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_19_cast_fp16 = add(x = inputs_17_cast_fp16, y = hidden_states_7_cast_fp16)[name = string("inputs_19_cast_fp16")];
tensor<int32, [4]> obj_71_begin_0 = const()[name = string("obj_71_begin_0"), val = tensor<int32, [4]>([3, 0, 0, 0])];
tensor<int32, [4]> obj_71_end_0 = const()[name = string("obj_71_end_0"), val = tensor<int32, [4]>([4, 512, 1, 1536])];
tensor<bool, [4]> obj_71_end_mask_0 = const()[name = string("obj_71_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
tensor<fp16, [1, 512, 1, 1536]> obj_71_cast_fp16 = slice_by_index(begin = obj_71_begin_0, end = obj_71_end_0, end_mask = obj_71_end_mask_0, x = read_state_2)[name = string("obj_71_cast_fp16")];
tensor<int32, [4]> obj_73_begin_0 = const()[name = string("obj_73_begin_0"), val = tensor<int32, [4]>([3, 0, 0, 0])];
tensor<int32, [4]> obj_73_end_0 = const()[name = string("obj_73_end_0"), val = tensor<int32, [4]>([4, 512, 1, 1536])];
tensor<bool, [4]> obj_73_end_mask_0 = const()[name = string("obj_73_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
tensor<fp16, [1, 512, 1, 1536]> obj_73_cast_fp16 = slice_by_index(begin = obj_73_begin_0, end = obj_73_end_0, end_mask = obj_73_end_mask_0, x = read_state_3)[name = string("obj_73_cast_fp16")];
int32 var_711 = const()[name = string("op_711"), val = int32(3)];
tensor<int32, [1]> out_19_axes_0 = const()[name = string("out_19_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_736_to_fp16 = const()[name = string("op_736_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_19_cast_fp16 = layer_norm(axes = out_19_axes_0, epsilon = var_736_to_fp16, x = inputs_19_cast_fp16)[name = string("out_19_cast_fp16")];
tensor<fp16, [512]> obj_61_gamma_0_to_fp16 = const()[name = string("obj_61_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(75643136)))];
tensor<fp16, [512]> obj_61_beta_0_to_fp16 = const()[name = string("obj_61_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(75644224)))];
fp16 obj_61_epsilon_0_to_fp16 = const()[name = string("obj_61_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> obj_61_cast_fp16 = batch_norm(beta = obj_61_beta_0_to_fp16, epsilon = obj_61_epsilon_0_to_fp16, gamma = obj_61_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_19_cast_fp16)[name = string("obj_61_cast_fp16")];
string query_13_pad_type_0 = const()[name = string("query_13_pad_type_0"), val = string("valid")];
tensor<int32, [2]> query_13_strides_0 = const()[name = string("query_13_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> query_13_pad_0 = const()[name = string("query_13_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> query_13_dilations_0 = const()[name = string("query_13_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 query_13_groups_0 = const()[name = string("query_13_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_3_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_3_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(75645312)))];
tensor<fp16, [512]> layers_3_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_3_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(76169664)))];
tensor<fp16, [1, 512, 1, 1]> query_13_cast_fp16 = conv(bias = layers_3_self_attn_q_proj_bias_to_fp16, dilations = query_13_dilations_0, groups = query_13_groups_0, pad = query_13_pad_0, pad_type = query_13_pad_type_0, strides = query_13_strides_0, weight = layers_3_self_attn_q_proj_weight_to_fp16, x = obj_61_cast_fp16)[name = string("query_13_cast_fp16")];
string current_key_7_pad_type_0 = const()[name = string("current_key_7_pad_type_0"), val = string("valid")];
tensor<int32, [2]> current_key_7_strides_0 = const()[name = string("current_key_7_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> current_key_7_pad_0 = const()[name = string("current_key_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> current_key_7_dilations_0 = const()[name = string("current_key_7_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 current_key_7_groups_0 = const()[name = string("current_key_7_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_3_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_3_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(76170752)))];
tensor<fp16, [1, 512, 1, 1]> current_key_7_cast_fp16 = conv(dilations = current_key_7_dilations_0, groups = current_key_7_groups_0, pad = current_key_7_pad_0, pad_type = current_key_7_pad_type_0, strides = current_key_7_strides_0, weight = layers_3_self_attn_k_proj_weight_to_fp16, x = obj_61_cast_fp16)[name = string("current_key_7_cast_fp16")];
string current_value_7_pad_type_0 = const()[name = string("current_value_7_pad_type_0"), val = string("valid")];
tensor<int32, [2]> current_value_7_strides_0 = const()[name = string("current_value_7_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> current_value_7_pad_0 = const()[name = string("current_value_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> current_value_7_dilations_0 = const()[name = string("current_value_7_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 current_value_7_groups_0 = const()[name = string("current_value_7_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_3_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_3_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(76695104)))];
tensor<fp16, [512]> layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(77219456)))];
tensor<fp16, [1, 512, 1, 1]> current_value_7_cast_fp16 = conv(bias = layers_3_self_attn_v_proj_bias_to_fp16, dilations = current_value_7_dilations_0, groups = current_value_7_groups_0, pad = current_value_7_pad_0, pad_type = current_value_7_pad_type_0, strides = current_value_7_strides_0, weight = layers_3_self_attn_v_proj_weight_to_fp16, x = obj_61_cast_fp16)[name = string("current_value_7_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> var_774_cast_fp16 = mul(x = current_key_7_cast_fp16, y = var_145_cast_fp16)[name = string("op_774_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> key_7_cast_fp16 = add(x = var_53_cast_fp16_3, y = var_774_cast_fp16)[name = string("key_7_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> var_776_cast_fp16 = mul(x = current_value_7_cast_fp16, y = var_145_cast_fp16)[name = string("op_776_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> value_7_cast_fp16 = add(x = var_62_cast_fp16_3, y = var_776_cast_fp16)[name = string("value_7_cast_fp16")];
tensor<int32, [4]> var_779 = const()[name = string("op_779"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1]> mh_q_13_cast_fp16 = reshape(shape = var_779, x = query_13_cast_fp16)[name = string("mh_q_13_cast_fp16")];
fp16 var_781_to_fp16 = const()[name = string("op_781_to_fp16"), val = fp16(0x1p-3)];
tensor<fp16, [1, 8, 64, 1]> var_782_cast_fp16 = mul(x = mh_q_13_cast_fp16, y = var_781_to_fp16)[name = string("op_782_cast_fp16")];
tensor<int32, [4]> var_783 = const()[name = string("op_783"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 448]> var_784_cast_fp16 = reshape(shape = var_783, x = key_7_cast_fp16)[name = string("op_784_cast_fp16")];
bool mh_w_25_transpose_x_0 = const()[name = string("mh_w_25_transpose_x_0"), val = bool(true)];
bool mh_w_25_transpose_y_0 = const()[name = string("mh_w_25_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 8, 1, 448]> mh_w_25_cast_fp16 = matmul(transpose_x = mh_w_25_transpose_x_0, transpose_y = mh_w_25_transpose_y_0, x = var_782_cast_fp16, y = var_784_cast_fp16)[name = string("mh_w_25_cast_fp16")];
tensor<fp16, [1, 8, 1, 448]> mh_w_27_cast_fp16 = add(x = mh_w_25_cast_fp16, y = var_162_cast_fp16)[name = string("mh_w_27_cast_fp16")];
tensor<fp16, [1, 8, 1, 448]> var_792_cast_fp16 = softmax(axis = var_711, x = mh_w_27_cast_fp16)[name = string("op_792_cast_fp16")];
tensor<int32, [4]> var_793 = const()[name = string("op_793"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 448]> var_794_cast_fp16 = reshape(shape = var_793, x = value_7_cast_fp16)[name = string("op_794_cast_fp16")];
bool attn_13_transpose_x_0 = const()[name = string("attn_13_transpose_x_0"), val = bool(false)];
bool attn_13_transpose_y_0 = const()[name = string("attn_13_transpose_y_0"), val = bool(true)];
tensor<fp16, [1, 8, 64, 1]> attn_13_cast_fp16 = matmul(transpose_x = attn_13_transpose_x_0, transpose_y = attn_13_transpose_y_0, x = var_794_cast_fp16, y = var_792_cast_fp16)[name = string("attn_13_cast_fp16")];
tensor<int32, [4]> var_797 = const()[name = string("op_797"), val = tensor<int32, [4]>([1, 512, 1, -1])];
tensor<fp16, [1, 512, 1, 1]> input_31_cast_fp16 = reshape(shape = var_797, x = attn_13_cast_fp16)[name = string("input_31_cast_fp16")];
string obj_67_pad_type_0 = const()[name = string("obj_67_pad_type_0"), val = string("valid")];
tensor<int32, [2]> obj_67_strides_0 = const()[name = string("obj_67_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> obj_67_pad_0 = const()[name = string("obj_67_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> obj_67_dilations_0 = const()[name = string("obj_67_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 obj_67_groups_0 = const()[name = string("obj_67_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_3_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_3_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(77220544)))];
tensor<fp16, [512]> layers_3_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_3_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(77744896)))];
tensor<fp16, [1, 512, 1, 1]> obj_67_cast_fp16 = conv(bias = layers_3_self_attn_o_proj_bias_to_fp16, dilations = obj_67_dilations_0, groups = obj_67_groups_0, pad = obj_67_pad_0, pad_type = obj_67_pad_type_0, strides = obj_67_strides_0, weight = layers_3_self_attn_o_proj_weight_to_fp16, x = input_31_cast_fp16)[name = string("obj_67_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_21_cast_fp16 = add(x = inputs_19_cast_fp16, y = obj_67_cast_fp16)[name = string("inputs_21_cast_fp16")];
tensor<int32, [1]> out_21_axes_0 = const()[name = string("out_21_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_819_to_fp16 = const()[name = string("op_819_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_21_cast_fp16 = layer_norm(axes = out_21_axes_0, epsilon = var_819_to_fp16, x = inputs_21_cast_fp16)[name = string("out_21_cast_fp16")];
tensor<fp16, [512]> obj_69_gamma_0_to_fp16 = const()[name = string("obj_69_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(77745984)))];
tensor<fp16, [512]> obj_69_beta_0_to_fp16 = const()[name = string("obj_69_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(77747072)))];
fp16 obj_69_epsilon_0_to_fp16 = const()[name = string("obj_69_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> obj_69_cast_fp16 = batch_norm(beta = obj_69_beta_0_to_fp16, epsilon = obj_69_epsilon_0_to_fp16, gamma = obj_69_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_21_cast_fp16)[name = string("obj_69_cast_fp16")];
string query_15_pad_type_0 = const()[name = string("query_15_pad_type_0"), val = string("valid")];
tensor<int32, [2]> query_15_strides_0 = const()[name = string("query_15_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> query_15_pad_0 = const()[name = string("query_15_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> query_15_dilations_0 = const()[name = string("query_15_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 query_15_groups_0 = const()[name = string("query_15_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_3_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_3_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(77748160)))];
tensor<fp16, [512]> layers_3_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_3_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(78272512)))];
tensor<fp16, [1, 512, 1, 1]> query_15_cast_fp16 = conv(bias = layers_3_encoder_attn_q_proj_bias_to_fp16, dilations = query_15_dilations_0, groups = query_15_groups_0, pad = query_15_pad_0, pad_type = query_15_pad_type_0, strides = query_15_strides_0, weight = layers_3_encoder_attn_q_proj_weight_to_fp16, x = obj_69_cast_fp16)[name = string("query_15_cast_fp16")];
tensor<int32, [4]> var_839 = const()[name = string("op_839"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1]> mh_q_15_cast_fp16 = reshape(shape = var_839, x = query_15_cast_fp16)[name = string("mh_q_15_cast_fp16")];
fp16 var_841_to_fp16 = const()[name = string("op_841_to_fp16"), val = fp16(0x1p-3)];
tensor<fp16, [1, 8, 64, 1]> var_842_cast_fp16 = mul(x = mh_q_15_cast_fp16, y = var_841_to_fp16)[name = string("op_842_cast_fp16")];
tensor<int32, [4]> var_843 = const()[name = string("op_843"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1536]> var_844_cast_fp16 = reshape(shape = var_843, x = obj_71_cast_fp16)[name = string("op_844_cast_fp16")];
bool mh_w_29_transpose_x_0 = const()[name = string("mh_w_29_transpose_x_0"), val = bool(true)];
bool mh_w_29_transpose_y_0 = const()[name = string("mh_w_29_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 8, 1, 1536]> mh_w_29_cast_fp16 = matmul(transpose_x = mh_w_29_transpose_x_0, transpose_y = mh_w_29_transpose_y_0, x = var_842_cast_fp16, y = var_844_cast_fp16)[name = string("mh_w_29_cast_fp16")];
tensor<fp16, [1, 8, 1, 1536]> mh_w_31_cast_fp16 = add(x = mh_w_29_cast_fp16, y = var_222_cast_fp16)[name = string("mh_w_31_cast_fp16")];
tensor<fp16, [1, 8, 1, 1536]> obj_77_cast_fp16 = softmax(axis = var_711, x = mh_w_31_cast_fp16)[name = string("obj_77_cast_fp16")];
tensor<int32, [4]> var_853 = const()[name = string("op_853"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1536]> var_854_cast_fp16 = reshape(shape = var_853, x = obj_73_cast_fp16)[name = string("op_854_cast_fp16")];
bool attn_15_transpose_x_0 = const()[name = string("attn_15_transpose_x_0"), val = bool(false)];
bool attn_15_transpose_y_0 = const()[name = string("attn_15_transpose_y_0"), val = bool(true)];
tensor<fp16, [1, 8, 64, 1]> attn_15_cast_fp16 = matmul(transpose_x = attn_15_transpose_x_0, transpose_y = attn_15_transpose_y_0, x = var_854_cast_fp16, y = obj_77_cast_fp16)[name = string("attn_15_cast_fp16")];
tensor<int32, [4]> var_857 = const()[name = string("op_857"), val = tensor<int32, [4]>([1, 512, 1, -1])];
tensor<fp16, [1, 512, 1, 1]> input_33_cast_fp16 = reshape(shape = var_857, x = attn_15_cast_fp16)[name = string("input_33_cast_fp16")];
string obj_75_pad_type_0 = const()[name = string("obj_75_pad_type_0"), val = string("valid")];
tensor<int32, [2]> obj_75_strides_0 = const()[name = string("obj_75_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> obj_75_pad_0 = const()[name = string("obj_75_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> obj_75_dilations_0 = const()[name = string("obj_75_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 obj_75_groups_0 = const()[name = string("obj_75_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_3_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_3_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(78273600)))];
tensor<fp16, [512]> layers_3_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_3_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(78797952)))];
tensor<fp16, [1, 512, 1, 1]> obj_75_cast_fp16 = conv(bias = layers_3_encoder_attn_o_proj_bias_to_fp16, dilations = obj_75_dilations_0, groups = obj_75_groups_0, pad = obj_75_pad_0, pad_type = obj_75_pad_type_0, strides = obj_75_strides_0, weight = layers_3_encoder_attn_o_proj_weight_to_fp16, x = input_33_cast_fp16)[name = string("obj_75_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_23_cast_fp16 = add(x = inputs_21_cast_fp16, y = obj_75_cast_fp16)[name = string("inputs_23_cast_fp16")];
tensor<int32, [1]> out_23_axes_0 = const()[name = string("out_23_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_878_to_fp16 = const()[name = string("op_878_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_23_cast_fp16 = layer_norm(axes = out_23_axes_0, epsilon = var_878_to_fp16, x = inputs_23_cast_fp16)[name = string("out_23_cast_fp16")];
tensor<fp16, [512]> input_35_gamma_0_to_fp16 = const()[name = string("input_35_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(78799040)))];
tensor<fp16, [512]> input_35_beta_0_to_fp16 = const()[name = string("input_35_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(78800128)))];
fp16 input_35_epsilon_0_to_fp16 = const()[name = string("input_35_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> input_35_cast_fp16 = batch_norm(beta = input_35_beta_0_to_fp16, epsilon = input_35_epsilon_0_to_fp16, gamma = input_35_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_23_cast_fp16)[name = string("input_35_cast_fp16")];
string input_37_pad_type_0 = const()[name = string("input_37_pad_type_0"), val = string("valid")];
tensor<int32, [2]> input_37_strides_0 = const()[name = string("input_37_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> input_37_pad_0 = const()[name = string("input_37_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> input_37_dilations_0 = const()[name = string("input_37_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 input_37_groups_0 = const()[name = string("input_37_groups_0"), val = int32(1)];
tensor<fp16, [2048, 512, 1, 1]> layers_3_fc1_weight_to_fp16 = const()[name = string("layers_3_fc1_weight_to_fp16"), val = tensor<fp16, [2048, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(78801216)))];
tensor<fp16, [2048]> layers_3_fc1_bias_to_fp16 = const()[name = string("layers_3_fc1_bias_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(80898432)))];
tensor<fp16, [1, 2048, 1, 1]> input_37_cast_fp16 = conv(bias = layers_3_fc1_bias_to_fp16, dilations = input_37_dilations_0, groups = input_37_groups_0, pad = input_37_pad_0, pad_type = input_37_pad_type_0, strides = input_37_strides_0, weight = layers_3_fc1_weight_to_fp16, x = input_35_cast_fp16)[name = string("input_37_cast_fp16")];
string input_39_mode_0 = const()[name = string("input_39_mode_0"), val = string("EXACT")];
tensor<fp16, [1, 2048, 1, 1]> input_39_cast_fp16 = gelu(mode = input_39_mode_0, x = input_37_cast_fp16)[name = string("input_39_cast_fp16")];
string hidden_states_9_pad_type_0 = const()[name = string("hidden_states_9_pad_type_0"), val = string("valid")];
tensor<int32, [2]> hidden_states_9_strides_0 = const()[name = string("hidden_states_9_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> hidden_states_9_pad_0 = const()[name = string("hidden_states_9_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> hidden_states_9_dilations_0 = const()[name = string("hidden_states_9_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 hidden_states_9_groups_0 = const()[name = string("hidden_states_9_groups_0"), val = int32(1)];
tensor<fp16, [512, 2048, 1, 1]> layers_3_fc2_weight_to_fp16 = const()[name = string("layers_3_fc2_weight_to_fp16"), val = tensor<fp16, [512, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(80902592)))];
tensor<fp16, [512]> layers_3_fc2_bias_to_fp16 = const()[name = string("layers_3_fc2_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(82999808)))];
tensor<fp16, [1, 512, 1, 1]> hidden_states_9_cast_fp16 = conv(bias = layers_3_fc2_bias_to_fp16, dilations = hidden_states_9_dilations_0, groups = hidden_states_9_groups_0, pad = hidden_states_9_pad_0, pad_type = hidden_states_9_pad_type_0, strides = hidden_states_9_strides_0, weight = layers_3_fc2_weight_to_fp16, x = input_39_cast_fp16)[name = string("hidden_states_9_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_25_cast_fp16 = add(x = inputs_23_cast_fp16, y = hidden_states_9_cast_fp16)[name = string("inputs_25_cast_fp16")];
tensor<int32, [4]> obj_89_begin_0 = const()[name = string("obj_89_begin_0"), val = tensor<int32, [4]>([4, 0, 0, 0])];
tensor<int32, [4]> obj_89_end_0 = const()[name = string("obj_89_end_0"), val = tensor<int32, [4]>([5, 512, 1, 1536])];
tensor<bool, [4]> obj_89_end_mask_0 = const()[name = string("obj_89_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
tensor<fp16, [1, 512, 1, 1536]> obj_89_cast_fp16 = slice_by_index(begin = obj_89_begin_0, end = obj_89_end_0, end_mask = obj_89_end_mask_0, x = read_state_2)[name = string("obj_89_cast_fp16")];
tensor<int32, [4]> obj_91_begin_0 = const()[name = string("obj_91_begin_0"), val = tensor<int32, [4]>([4, 0, 0, 0])];
tensor<int32, [4]> obj_91_end_0 = const()[name = string("obj_91_end_0"), val = tensor<int32, [4]>([5, 512, 1, 1536])];
tensor<bool, [4]> obj_91_end_mask_0 = const()[name = string("obj_91_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
tensor<fp16, [1, 512, 1, 1536]> obj_91_cast_fp16 = slice_by_index(begin = obj_91_begin_0, end = obj_91_end_0, end_mask = obj_91_end_mask_0, x = read_state_3)[name = string("obj_91_cast_fp16")];
int32 var_924 = const()[name = string("op_924"), val = int32(3)];
tensor<int32, [1]> out_25_axes_0 = const()[name = string("out_25_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_949_to_fp16 = const()[name = string("op_949_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_25_cast_fp16 = layer_norm(axes = out_25_axes_0, epsilon = var_949_to_fp16, x = inputs_25_cast_fp16)[name = string("out_25_cast_fp16")];
tensor<fp16, [512]> obj_79_gamma_0_to_fp16 = const()[name = string("obj_79_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(83000896)))];
tensor<fp16, [512]> obj_79_beta_0_to_fp16 = const()[name = string("obj_79_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(83001984)))];
fp16 obj_79_epsilon_0_to_fp16 = const()[name = string("obj_79_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> obj_79_cast_fp16 = batch_norm(beta = obj_79_beta_0_to_fp16, epsilon = obj_79_epsilon_0_to_fp16, gamma = obj_79_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_25_cast_fp16)[name = string("obj_79_cast_fp16")];
string query_17_pad_type_0 = const()[name = string("query_17_pad_type_0"), val = string("valid")];
tensor<int32, [2]> query_17_strides_0 = const()[name = string("query_17_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> query_17_pad_0 = const()[name = string("query_17_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> query_17_dilations_0 = const()[name = string("query_17_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 query_17_groups_0 = const()[name = string("query_17_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_4_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_4_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(83003072)))];
tensor<fp16, [512]> layers_4_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_4_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(83527424)))];
tensor<fp16, [1, 512, 1, 1]> query_17_cast_fp16 = conv(bias = layers_4_self_attn_q_proj_bias_to_fp16, dilations = query_17_dilations_0, groups = query_17_groups_0, pad = query_17_pad_0, pad_type = query_17_pad_type_0, strides = query_17_strides_0, weight = layers_4_self_attn_q_proj_weight_to_fp16, x = obj_79_cast_fp16)[name = string("query_17_cast_fp16")];
string current_key_9_pad_type_0 = const()[name = string("current_key_9_pad_type_0"), val = string("valid")];
tensor<int32, [2]> current_key_9_strides_0 = const()[name = string("current_key_9_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> current_key_9_pad_0 = const()[name = string("current_key_9_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> current_key_9_dilations_0 = const()[name = string("current_key_9_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 current_key_9_groups_0 = const()[name = string("current_key_9_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_4_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_4_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(83528512)))];
tensor<fp16, [1, 512, 1, 1]> current_key_9_cast_fp16 = conv(dilations = current_key_9_dilations_0, groups = current_key_9_groups_0, pad = current_key_9_pad_0, pad_type = current_key_9_pad_type_0, strides = current_key_9_strides_0, weight = layers_4_self_attn_k_proj_weight_to_fp16, x = obj_79_cast_fp16)[name = string("current_key_9_cast_fp16")];
string current_value_9_pad_type_0 = const()[name = string("current_value_9_pad_type_0"), val = string("valid")];
tensor<int32, [2]> current_value_9_strides_0 = const()[name = string("current_value_9_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> current_value_9_pad_0 = const()[name = string("current_value_9_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> current_value_9_dilations_0 = const()[name = string("current_value_9_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 current_value_9_groups_0 = const()[name = string("current_value_9_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_4_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_4_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(84052864)))];
tensor<fp16, [512]> layers_4_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_4_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(84577216)))];
tensor<fp16, [1, 512, 1, 1]> current_value_9_cast_fp16 = conv(bias = layers_4_self_attn_v_proj_bias_to_fp16, dilations = current_value_9_dilations_0, groups = current_value_9_groups_0, pad = current_value_9_pad_0, pad_type = current_value_9_pad_type_0, strides = current_value_9_strides_0, weight = layers_4_self_attn_v_proj_weight_to_fp16, x = obj_79_cast_fp16)[name = string("current_value_9_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> var_987_cast_fp16 = mul(x = current_key_9_cast_fp16, y = var_145_cast_fp16)[name = string("op_987_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> key_9_cast_fp16 = add(x = var_53_cast_fp16_4, y = var_987_cast_fp16)[name = string("key_9_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> var_989_cast_fp16 = mul(x = current_value_9_cast_fp16, y = var_145_cast_fp16)[name = string("op_989_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> value_9_cast_fp16 = add(x = var_62_cast_fp16_4, y = var_989_cast_fp16)[name = string("value_9_cast_fp16")];
tensor<int32, [4]> var_992 = const()[name = string("op_992"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1]> mh_q_17_cast_fp16 = reshape(shape = var_992, x = query_17_cast_fp16)[name = string("mh_q_17_cast_fp16")];
fp16 var_994_to_fp16 = const()[name = string("op_994_to_fp16"), val = fp16(0x1p-3)];
tensor<fp16, [1, 8, 64, 1]> var_995_cast_fp16 = mul(x = mh_q_17_cast_fp16, y = var_994_to_fp16)[name = string("op_995_cast_fp16")];
tensor<int32, [4]> var_996 = const()[name = string("op_996"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 448]> var_997_cast_fp16 = reshape(shape = var_996, x = key_9_cast_fp16)[name = string("op_997_cast_fp16")];
bool mh_w_33_transpose_x_0 = const()[name = string("mh_w_33_transpose_x_0"), val = bool(true)];
bool mh_w_33_transpose_y_0 = const()[name = string("mh_w_33_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 8, 1, 448]> mh_w_33_cast_fp16 = matmul(transpose_x = mh_w_33_transpose_x_0, transpose_y = mh_w_33_transpose_y_0, x = var_995_cast_fp16, y = var_997_cast_fp16)[name = string("mh_w_33_cast_fp16")];
tensor<fp16, [1, 8, 1, 448]> mh_w_35_cast_fp16 = add(x = mh_w_33_cast_fp16, y = var_162_cast_fp16)[name = string("mh_w_35_cast_fp16")];
tensor<fp16, [1, 8, 1, 448]> var_1005_cast_fp16 = softmax(axis = var_924, x = mh_w_35_cast_fp16)[name = string("op_1005_cast_fp16")];
tensor<int32, [4]> var_1006 = const()[name = string("op_1006"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 448]> var_1007_cast_fp16 = reshape(shape = var_1006, x = value_9_cast_fp16)[name = string("op_1007_cast_fp16")];
bool attn_17_transpose_x_0 = const()[name = string("attn_17_transpose_x_0"), val = bool(false)];
bool attn_17_transpose_y_0 = const()[name = string("attn_17_transpose_y_0"), val = bool(true)];
tensor<fp16, [1, 8, 64, 1]> attn_17_cast_fp16 = matmul(transpose_x = attn_17_transpose_x_0, transpose_y = attn_17_transpose_y_0, x = var_1007_cast_fp16, y = var_1005_cast_fp16)[name = string("attn_17_cast_fp16")];
tensor<int32, [4]> var_1010 = const()[name = string("op_1010"), val = tensor<int32, [4]>([1, 512, 1, -1])];
tensor<fp16, [1, 512, 1, 1]> input_41_cast_fp16 = reshape(shape = var_1010, x = attn_17_cast_fp16)[name = string("input_41_cast_fp16")];
string obj_85_pad_type_0 = const()[name = string("obj_85_pad_type_0"), val = string("valid")];
tensor<int32, [2]> obj_85_strides_0 = const()[name = string("obj_85_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> obj_85_pad_0 = const()[name = string("obj_85_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> obj_85_dilations_0 = const()[name = string("obj_85_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 obj_85_groups_0 = const()[name = string("obj_85_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_4_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_4_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(84578304)))];
tensor<fp16, [512]> layers_4_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_4_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(85102656)))];
tensor<fp16, [1, 512, 1, 1]> obj_85_cast_fp16 = conv(bias = layers_4_self_attn_o_proj_bias_to_fp16, dilations = obj_85_dilations_0, groups = obj_85_groups_0, pad = obj_85_pad_0, pad_type = obj_85_pad_type_0, strides = obj_85_strides_0, weight = layers_4_self_attn_o_proj_weight_to_fp16, x = input_41_cast_fp16)[name = string("obj_85_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_27_cast_fp16 = add(x = inputs_25_cast_fp16, y = obj_85_cast_fp16)[name = string("inputs_27_cast_fp16")];
tensor<int32, [1]> out_27_axes_0 = const()[name = string("out_27_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_1032_to_fp16 = const()[name = string("op_1032_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_27_cast_fp16 = layer_norm(axes = out_27_axes_0, epsilon = var_1032_to_fp16, x = inputs_27_cast_fp16)[name = string("out_27_cast_fp16")];
tensor<fp16, [512]> obj_87_gamma_0_to_fp16 = const()[name = string("obj_87_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(85103744)))];
tensor<fp16, [512]> obj_87_beta_0_to_fp16 = const()[name = string("obj_87_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(85104832)))];
fp16 obj_87_epsilon_0_to_fp16 = const()[name = string("obj_87_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> obj_87_cast_fp16 = batch_norm(beta = obj_87_beta_0_to_fp16, epsilon = obj_87_epsilon_0_to_fp16, gamma = obj_87_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_27_cast_fp16)[name = string("obj_87_cast_fp16")];
string query_19_pad_type_0 = const()[name = string("query_19_pad_type_0"), val = string("valid")];
tensor<int32, [2]> query_19_strides_0 = const()[name = string("query_19_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> query_19_pad_0 = const()[name = string("query_19_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> query_19_dilations_0 = const()[name = string("query_19_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 query_19_groups_0 = const()[name = string("query_19_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_4_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_4_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(85105920)))];
tensor<fp16, [512]> layers_4_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_4_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(85630272)))];
tensor<fp16, [1, 512, 1, 1]> query_19_cast_fp16 = conv(bias = layers_4_encoder_attn_q_proj_bias_to_fp16, dilations = query_19_dilations_0, groups = query_19_groups_0, pad = query_19_pad_0, pad_type = query_19_pad_type_0, strides = query_19_strides_0, weight = layers_4_encoder_attn_q_proj_weight_to_fp16, x = obj_87_cast_fp16)[name = string("query_19_cast_fp16")];
tensor<int32, [4]> var_1052 = const()[name = string("op_1052"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1]> mh_q_19_cast_fp16 = reshape(shape = var_1052, x = query_19_cast_fp16)[name = string("mh_q_19_cast_fp16")];
fp16 var_1054_to_fp16 = const()[name = string("op_1054_to_fp16"), val = fp16(0x1p-3)];
tensor<fp16, [1, 8, 64, 1]> var_1055_cast_fp16 = mul(x = mh_q_19_cast_fp16, y = var_1054_to_fp16)[name = string("op_1055_cast_fp16")];
tensor<int32, [4]> var_1056 = const()[name = string("op_1056"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1536]> var_1057_cast_fp16 = reshape(shape = var_1056, x = obj_89_cast_fp16)[name = string("op_1057_cast_fp16")];
bool mh_w_37_transpose_x_0 = const()[name = string("mh_w_37_transpose_x_0"), val = bool(true)];
bool mh_w_37_transpose_y_0 = const()[name = string("mh_w_37_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 8, 1, 1536]> mh_w_37_cast_fp16 = matmul(transpose_x = mh_w_37_transpose_x_0, transpose_y = mh_w_37_transpose_y_0, x = var_1055_cast_fp16, y = var_1057_cast_fp16)[name = string("mh_w_37_cast_fp16")];
tensor<fp16, [1, 8, 1, 1536]> mh_w_39_cast_fp16 = add(x = mh_w_37_cast_fp16, y = var_222_cast_fp16)[name = string("mh_w_39_cast_fp16")];
tensor<fp16, [1, 8, 1, 1536]> obj_95_cast_fp16 = softmax(axis = var_924, x = mh_w_39_cast_fp16)[name = string("obj_95_cast_fp16")];
tensor<int32, [4]> var_1066 = const()[name = string("op_1066"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1536]> var_1067_cast_fp16 = reshape(shape = var_1066, x = obj_91_cast_fp16)[name = string("op_1067_cast_fp16")];
bool attn_19_transpose_x_0 = const()[name = string("attn_19_transpose_x_0"), val = bool(false)];
bool attn_19_transpose_y_0 = const()[name = string("attn_19_transpose_y_0"), val = bool(true)];
tensor<fp16, [1, 8, 64, 1]> attn_19_cast_fp16 = matmul(transpose_x = attn_19_transpose_x_0, transpose_y = attn_19_transpose_y_0, x = var_1067_cast_fp16, y = obj_95_cast_fp16)[name = string("attn_19_cast_fp16")];
tensor<int32, [4]> var_1070 = const()[name = string("op_1070"), val = tensor<int32, [4]>([1, 512, 1, -1])];
tensor<fp16, [1, 512, 1, 1]> input_43_cast_fp16 = reshape(shape = var_1070, x = attn_19_cast_fp16)[name = string("input_43_cast_fp16")];
string obj_93_pad_type_0 = const()[name = string("obj_93_pad_type_0"), val = string("valid")];
tensor<int32, [2]> obj_93_strides_0 = const()[name = string("obj_93_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> obj_93_pad_0 = const()[name = string("obj_93_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> obj_93_dilations_0 = const()[name = string("obj_93_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 obj_93_groups_0 = const()[name = string("obj_93_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_4_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_4_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(85631360)))];
tensor<fp16, [512]> layers_4_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_4_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(86155712)))];
tensor<fp16, [1, 512, 1, 1]> obj_93_cast_fp16 = conv(bias = layers_4_encoder_attn_o_proj_bias_to_fp16, dilations = obj_93_dilations_0, groups = obj_93_groups_0, pad = obj_93_pad_0, pad_type = obj_93_pad_type_0, strides = obj_93_strides_0, weight = layers_4_encoder_attn_o_proj_weight_to_fp16, x = input_43_cast_fp16)[name = string("obj_93_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_29_cast_fp16 = add(x = inputs_27_cast_fp16, y = obj_93_cast_fp16)[name = string("inputs_29_cast_fp16")];
tensor<int32, [1]> out_29_axes_0 = const()[name = string("out_29_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_1091_to_fp16 = const()[name = string("op_1091_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_29_cast_fp16 = layer_norm(axes = out_29_axes_0, epsilon = var_1091_to_fp16, x = inputs_29_cast_fp16)[name = string("out_29_cast_fp16")];
tensor<fp16, [512]> input_45_gamma_0_to_fp16 = const()[name = string("input_45_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(86156800)))];
tensor<fp16, [512]> input_45_beta_0_to_fp16 = const()[name = string("input_45_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(86157888)))];
fp16 input_45_epsilon_0_to_fp16 = const()[name = string("input_45_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> input_45_cast_fp16 = batch_norm(beta = input_45_beta_0_to_fp16, epsilon = input_45_epsilon_0_to_fp16, gamma = input_45_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_29_cast_fp16)[name = string("input_45_cast_fp16")];
string input_47_pad_type_0 = const()[name = string("input_47_pad_type_0"), val = string("valid")];
tensor<int32, [2]> input_47_strides_0 = const()[name = string("input_47_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> input_47_pad_0 = const()[name = string("input_47_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> input_47_dilations_0 = const()[name = string("input_47_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 input_47_groups_0 = const()[name = string("input_47_groups_0"), val = int32(1)];
tensor<fp16, [2048, 512, 1, 1]> layers_4_fc1_weight_to_fp16 = const()[name = string("layers_4_fc1_weight_to_fp16"), val = tensor<fp16, [2048, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(86158976)))];
tensor<fp16, [2048]> layers_4_fc1_bias_to_fp16 = const()[name = string("layers_4_fc1_bias_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(88256192)))];
tensor<fp16, [1, 2048, 1, 1]> input_47_cast_fp16 = conv(bias = layers_4_fc1_bias_to_fp16, dilations = input_47_dilations_0, groups = input_47_groups_0, pad = input_47_pad_0, pad_type = input_47_pad_type_0, strides = input_47_strides_0, weight = layers_4_fc1_weight_to_fp16, x = input_45_cast_fp16)[name = string("input_47_cast_fp16")];
string input_49_mode_0 = const()[name = string("input_49_mode_0"), val = string("EXACT")];
tensor<fp16, [1, 2048, 1, 1]> input_49_cast_fp16 = gelu(mode = input_49_mode_0, x = input_47_cast_fp16)[name = string("input_49_cast_fp16")];
string hidden_states_11_pad_type_0 = const()[name = string("hidden_states_11_pad_type_0"), val = string("valid")];
tensor<int32, [2]> hidden_states_11_strides_0 = const()[name = string("hidden_states_11_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> hidden_states_11_pad_0 = const()[name = string("hidden_states_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> hidden_states_11_dilations_0 = const()[name = string("hidden_states_11_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 hidden_states_11_groups_0 = const()[name = string("hidden_states_11_groups_0"), val = int32(1)];
tensor<fp16, [512, 2048, 1, 1]> layers_4_fc2_weight_to_fp16 = const()[name = string("layers_4_fc2_weight_to_fp16"), val = tensor<fp16, [512, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(88260352)))];
tensor<fp16, [512]> layers_4_fc2_bias_to_fp16 = const()[name = string("layers_4_fc2_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(90357568)))];
tensor<fp16, [1, 512, 1, 1]> hidden_states_11_cast_fp16 = conv(bias = layers_4_fc2_bias_to_fp16, dilations = hidden_states_11_dilations_0, groups = hidden_states_11_groups_0, pad = hidden_states_11_pad_0, pad_type = hidden_states_11_pad_type_0, strides = hidden_states_11_strides_0, weight = layers_4_fc2_weight_to_fp16, x = input_49_cast_fp16)[name = string("hidden_states_11_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_31_cast_fp16 = add(x = inputs_29_cast_fp16, y = hidden_states_11_cast_fp16)[name = string("inputs_31_cast_fp16")];
tensor<int32, [4]> obj_107_begin_0 = const()[name = string("obj_107_begin_0"), val = tensor<int32, [4]>([5, 0, 0, 0])];
tensor<int32, [4]> obj_107_end_0 = const()[name = string("obj_107_end_0"), val = tensor<int32, [4]>([6, 512, 1, 1536])];
tensor<bool, [4]> obj_107_end_mask_0 = const()[name = string("obj_107_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
tensor<fp16, [1, 512, 1, 1536]> obj_107_cast_fp16 = slice_by_index(begin = obj_107_begin_0, end = obj_107_end_0, end_mask = obj_107_end_mask_0, x = read_state_2)[name = string("obj_107_cast_fp16")];
tensor<int32, [4]> obj_109_begin_0 = const()[name = string("obj_109_begin_0"), val = tensor<int32, [4]>([5, 0, 0, 0])];
tensor<int32, [4]> obj_109_end_0 = const()[name = string("obj_109_end_0"), val = tensor<int32, [4]>([6, 512, 1, 1536])];
tensor<bool, [4]> obj_109_end_mask_0 = const()[name = string("obj_109_end_mask_0"), val = tensor<bool, [4]>([false, true, true, true])];
tensor<fp16, [1, 512, 1, 1536]> obj_109_cast_fp16 = slice_by_index(begin = obj_109_begin_0, end = obj_109_end_0, end_mask = obj_109_end_mask_0, x = read_state_3)[name = string("obj_109_cast_fp16")];
int32 var_1137 = const()[name = string("op_1137"), val = int32(3)];
tensor<int32, [1]> out_31_axes_0 = const()[name = string("out_31_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_1162_to_fp16 = const()[name = string("op_1162_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_31_cast_fp16 = layer_norm(axes = out_31_axes_0, epsilon = var_1162_to_fp16, x = inputs_31_cast_fp16)[name = string("out_31_cast_fp16")];
tensor<fp16, [512]> obj_97_gamma_0_to_fp16 = const()[name = string("obj_97_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(90358656)))];
tensor<fp16, [512]> obj_97_beta_0_to_fp16 = const()[name = string("obj_97_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(90359744)))];
fp16 obj_97_epsilon_0_to_fp16 = const()[name = string("obj_97_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> obj_97_cast_fp16 = batch_norm(beta = obj_97_beta_0_to_fp16, epsilon = obj_97_epsilon_0_to_fp16, gamma = obj_97_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_31_cast_fp16)[name = string("obj_97_cast_fp16")];
string query_21_pad_type_0 = const()[name = string("query_21_pad_type_0"), val = string("valid")];
tensor<int32, [2]> query_21_strides_0 = const()[name = string("query_21_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> query_21_pad_0 = const()[name = string("query_21_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> query_21_dilations_0 = const()[name = string("query_21_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 query_21_groups_0 = const()[name = string("query_21_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_5_self_attn_q_proj_weight_to_fp16 = const()[name = string("layers_5_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(90360832)))];
tensor<fp16, [512]> layers_5_self_attn_q_proj_bias_to_fp16 = const()[name = string("layers_5_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(90885184)))];
tensor<fp16, [1, 512, 1, 1]> query_21_cast_fp16 = conv(bias = layers_5_self_attn_q_proj_bias_to_fp16, dilations = query_21_dilations_0, groups = query_21_groups_0, pad = query_21_pad_0, pad_type = query_21_pad_type_0, strides = query_21_strides_0, weight = layers_5_self_attn_q_proj_weight_to_fp16, x = obj_97_cast_fp16)[name = string("query_21_cast_fp16")];
string current_key_pad_type_0 = const()[name = string("current_key_pad_type_0"), val = string("valid")];
tensor<int32, [2]> current_key_strides_0 = const()[name = string("current_key_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> current_key_pad_0 = const()[name = string("current_key_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> current_key_dilations_0 = const()[name = string("current_key_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 current_key_groups_0 = const()[name = string("current_key_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_5_self_attn_k_proj_weight_to_fp16 = const()[name = string("layers_5_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(90886272)))];
tensor<fp16, [1, 512, 1, 1]> current_key_cast_fp16 = conv(dilations = current_key_dilations_0, groups = current_key_groups_0, pad = current_key_pad_0, pad_type = current_key_pad_type_0, strides = current_key_strides_0, weight = layers_5_self_attn_k_proj_weight_to_fp16, x = obj_97_cast_fp16)[name = string("current_key_cast_fp16")];
string current_value_pad_type_0 = const()[name = string("current_value_pad_type_0"), val = string("valid")];
tensor<int32, [2]> current_value_strides_0 = const()[name = string("current_value_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> current_value_pad_0 = const()[name = string("current_value_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> current_value_dilations_0 = const()[name = string("current_value_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 current_value_groups_0 = const()[name = string("current_value_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_5_self_attn_v_proj_weight_to_fp16 = const()[name = string("layers_5_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(91410624)))];
tensor<fp16, [512]> layers_5_self_attn_v_proj_bias_to_fp16 = const()[name = string("layers_5_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(91934976)))];
tensor<fp16, [1, 512, 1, 1]> current_value_cast_fp16 = conv(bias = layers_5_self_attn_v_proj_bias_to_fp16, dilations = current_value_dilations_0, groups = current_value_groups_0, pad = current_value_pad_0, pad_type = current_value_pad_type_0, strides = current_value_strides_0, weight = layers_5_self_attn_v_proj_weight_to_fp16, x = obj_97_cast_fp16)[name = string("current_value_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> var_1200_cast_fp16 = mul(x = current_key_cast_fp16, y = var_145_cast_fp16)[name = string("op_1200_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> key_cast_fp16 = add(x = var_53_cast_fp16_5, y = var_1200_cast_fp16)[name = string("key_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> var_1202_cast_fp16 = mul(x = current_value_cast_fp16, y = var_145_cast_fp16)[name = string("op_1202_cast_fp16")];
tensor<fp16, [1, 512, 1, 448]> value_cast_fp16 = add(x = var_62_cast_fp16_5, y = var_1202_cast_fp16)[name = string("value_cast_fp16")];
tensor<int32, [4]> var_1205 = const()[name = string("op_1205"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1]> mh_q_21_cast_fp16 = reshape(shape = var_1205, x = query_21_cast_fp16)[name = string("mh_q_21_cast_fp16")];
fp16 var_1207_to_fp16 = const()[name = string("op_1207_to_fp16"), val = fp16(0x1p-3)];
tensor<fp16, [1, 8, 64, 1]> var_1208_cast_fp16 = mul(x = mh_q_21_cast_fp16, y = var_1207_to_fp16)[name = string("op_1208_cast_fp16")];
tensor<int32, [4]> var_1209 = const()[name = string("op_1209"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 448]> var_1210_cast_fp16 = reshape(shape = var_1209, x = key_cast_fp16)[name = string("op_1210_cast_fp16")];
bool mh_w_41_transpose_x_0 = const()[name = string("mh_w_41_transpose_x_0"), val = bool(true)];
bool mh_w_41_transpose_y_0 = const()[name = string("mh_w_41_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 8, 1, 448]> mh_w_41_cast_fp16 = matmul(transpose_x = mh_w_41_transpose_x_0, transpose_y = mh_w_41_transpose_y_0, x = var_1208_cast_fp16, y = var_1210_cast_fp16)[name = string("mh_w_41_cast_fp16")];
tensor<fp16, [1, 8, 1, 448]> mh_w_43_cast_fp16 = add(x = mh_w_41_cast_fp16, y = var_162_cast_fp16)[name = string("mh_w_43_cast_fp16")];
tensor<fp16, [1, 8, 1, 448]> var_1218_cast_fp16 = softmax(axis = var_1137, x = mh_w_43_cast_fp16)[name = string("op_1218_cast_fp16")];
tensor<int32, [4]> var_1219 = const()[name = string("op_1219"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 448]> var_1220_cast_fp16 = reshape(shape = var_1219, x = value_cast_fp16)[name = string("op_1220_cast_fp16")];
bool attn_21_transpose_x_0 = const()[name = string("attn_21_transpose_x_0"), val = bool(false)];
bool attn_21_transpose_y_0 = const()[name = string("attn_21_transpose_y_0"), val = bool(true)];
tensor<fp16, [1, 8, 64, 1]> attn_21_cast_fp16 = matmul(transpose_x = attn_21_transpose_x_0, transpose_y = attn_21_transpose_y_0, x = var_1220_cast_fp16, y = var_1218_cast_fp16)[name = string("attn_21_cast_fp16")];
tensor<int32, [4]> var_1223 = const()[name = string("op_1223"), val = tensor<int32, [4]>([1, 512, 1, -1])];
tensor<fp16, [1, 512, 1, 1]> input_51_cast_fp16 = reshape(shape = var_1223, x = attn_21_cast_fp16)[name = string("input_51_cast_fp16")];
string obj_103_pad_type_0 = const()[name = string("obj_103_pad_type_0"), val = string("valid")];
tensor<int32, [2]> obj_103_strides_0 = const()[name = string("obj_103_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> obj_103_pad_0 = const()[name = string("obj_103_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> obj_103_dilations_0 = const()[name = string("obj_103_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 obj_103_groups_0 = const()[name = string("obj_103_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_5_self_attn_o_proj_weight_to_fp16 = const()[name = string("layers_5_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(91936064)))];
tensor<fp16, [512]> layers_5_self_attn_o_proj_bias_to_fp16 = const()[name = string("layers_5_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(92460416)))];
tensor<fp16, [1, 512, 1, 1]> obj_103_cast_fp16 = conv(bias = layers_5_self_attn_o_proj_bias_to_fp16, dilations = obj_103_dilations_0, groups = obj_103_groups_0, pad = obj_103_pad_0, pad_type = obj_103_pad_type_0, strides = obj_103_strides_0, weight = layers_5_self_attn_o_proj_weight_to_fp16, x = input_51_cast_fp16)[name = string("obj_103_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_33_cast_fp16 = add(x = inputs_31_cast_fp16, y = obj_103_cast_fp16)[name = string("inputs_33_cast_fp16")];
tensor<int32, [1]> out_33_axes_0 = const()[name = string("out_33_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_1245_to_fp16 = const()[name = string("op_1245_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_33_cast_fp16 = layer_norm(axes = out_33_axes_0, epsilon = var_1245_to_fp16, x = inputs_33_cast_fp16)[name = string("out_33_cast_fp16")];
tensor<fp16, [512]> obj_105_gamma_0_to_fp16 = const()[name = string("obj_105_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(92461504)))];
tensor<fp16, [512]> obj_105_beta_0_to_fp16 = const()[name = string("obj_105_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(92462592)))];
fp16 obj_105_epsilon_0_to_fp16 = const()[name = string("obj_105_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> obj_105_cast_fp16 = batch_norm(beta = obj_105_beta_0_to_fp16, epsilon = obj_105_epsilon_0_to_fp16, gamma = obj_105_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_33_cast_fp16)[name = string("obj_105_cast_fp16")];
string query_pad_type_0 = const()[name = string("query_pad_type_0"), val = string("valid")];
tensor<int32, [2]> query_strides_0 = const()[name = string("query_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> query_pad_0 = const()[name = string("query_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> query_dilations_0 = const()[name = string("query_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 query_groups_0 = const()[name = string("query_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_5_encoder_attn_q_proj_weight_to_fp16 = const()[name = string("layers_5_encoder_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(92463680)))];
tensor<fp16, [512]> layers_5_encoder_attn_q_proj_bias_to_fp16 = const()[name = string("layers_5_encoder_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(92988032)))];
tensor<fp16, [1, 512, 1, 1]> query_cast_fp16 = conv(bias = layers_5_encoder_attn_q_proj_bias_to_fp16, dilations = query_dilations_0, groups = query_groups_0, pad = query_pad_0, pad_type = query_pad_type_0, strides = query_strides_0, weight = layers_5_encoder_attn_q_proj_weight_to_fp16, x = obj_105_cast_fp16)[name = string("query_cast_fp16")];
tensor<int32, [4]> var_1265 = const()[name = string("op_1265"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1]> mh_q_cast_fp16 = reshape(shape = var_1265, x = query_cast_fp16)[name = string("mh_q_cast_fp16")];
fp16 var_1267_to_fp16 = const()[name = string("op_1267_to_fp16"), val = fp16(0x1p-3)];
tensor<fp16, [1, 8, 64, 1]> var_1268_cast_fp16 = mul(x = mh_q_cast_fp16, y = var_1267_to_fp16)[name = string("op_1268_cast_fp16")];
tensor<int32, [4]> var_1269 = const()[name = string("op_1269"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1536]> var_1270_cast_fp16 = reshape(shape = var_1269, x = obj_107_cast_fp16)[name = string("op_1270_cast_fp16")];
bool mh_w_45_transpose_x_0 = const()[name = string("mh_w_45_transpose_x_0"), val = bool(true)];
bool mh_w_45_transpose_y_0 = const()[name = string("mh_w_45_transpose_y_0"), val = bool(false)];
tensor<fp16, [1, 8, 1, 1536]> mh_w_45_cast_fp16 = matmul(transpose_x = mh_w_45_transpose_x_0, transpose_y = mh_w_45_transpose_y_0, x = var_1268_cast_fp16, y = var_1270_cast_fp16)[name = string("mh_w_45_cast_fp16")];
tensor<fp16, [1, 8, 1, 1536]> mh_w_cast_fp16 = add(x = mh_w_45_cast_fp16, y = var_222_cast_fp16)[name = string("mh_w_cast_fp16")];
tensor<fp16, [1, 8, 1, 1536]> obj_113_cast_fp16 = softmax(axis = var_1137, x = mh_w_cast_fp16)[name = string("obj_113_cast_fp16")];
tensor<int32, [4]> var_1279 = const()[name = string("op_1279"), val = tensor<int32, [4]>([1, 8, 64, -1])];
tensor<fp16, [1, 8, 64, 1536]> var_1280_cast_fp16 = reshape(shape = var_1279, x = obj_109_cast_fp16)[name = string("op_1280_cast_fp16")];
bool attn_transpose_x_0 = const()[name = string("attn_transpose_x_0"), val = bool(false)];
bool attn_transpose_y_0 = const()[name = string("attn_transpose_y_0"), val = bool(true)];
tensor<fp16, [1, 8, 64, 1]> attn_cast_fp16 = matmul(transpose_x = attn_transpose_x_0, transpose_y = attn_transpose_y_0, x = var_1280_cast_fp16, y = obj_113_cast_fp16)[name = string("attn_cast_fp16")];
tensor<int32, [4]> var_1283 = const()[name = string("op_1283"), val = tensor<int32, [4]>([1, 512, 1, -1])];
tensor<fp16, [1, 512, 1, 1]> input_53_cast_fp16 = reshape(shape = var_1283, x = attn_cast_fp16)[name = string("input_53_cast_fp16")];
string obj_111_pad_type_0 = const()[name = string("obj_111_pad_type_0"), val = string("valid")];
tensor<int32, [2]> obj_111_strides_0 = const()[name = string("obj_111_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> obj_111_pad_0 = const()[name = string("obj_111_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> obj_111_dilations_0 = const()[name = string("obj_111_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 obj_111_groups_0 = const()[name = string("obj_111_groups_0"), val = int32(1)];
tensor<fp16, [512, 512, 1, 1]> layers_5_encoder_attn_o_proj_weight_to_fp16 = const()[name = string("layers_5_encoder_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [512, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(92989120)))];
tensor<fp16, [512]> layers_5_encoder_attn_o_proj_bias_to_fp16 = const()[name = string("layers_5_encoder_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(93513472)))];
tensor<fp16, [1, 512, 1, 1]> obj_111_cast_fp16 = conv(bias = layers_5_encoder_attn_o_proj_bias_to_fp16, dilations = obj_111_dilations_0, groups = obj_111_groups_0, pad = obj_111_pad_0, pad_type = obj_111_pad_type_0, strides = obj_111_strides_0, weight = layers_5_encoder_attn_o_proj_weight_to_fp16, x = input_53_cast_fp16)[name = string("obj_111_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_35_cast_fp16 = add(x = inputs_33_cast_fp16, y = obj_111_cast_fp16)[name = string("inputs_35_cast_fp16")];
tensor<int32, [1]> out_35_axes_0 = const()[name = string("out_35_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_1304_to_fp16 = const()[name = string("op_1304_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_35_cast_fp16 = layer_norm(axes = out_35_axes_0, epsilon = var_1304_to_fp16, x = inputs_35_cast_fp16)[name = string("out_35_cast_fp16")];
tensor<fp16, [512]> input_55_gamma_0_to_fp16 = const()[name = string("input_55_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(93514560)))];
tensor<fp16, [512]> input_55_beta_0_to_fp16 = const()[name = string("input_55_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(93515648)))];
fp16 input_55_epsilon_0_to_fp16 = const()[name = string("input_55_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> input_55_cast_fp16 = batch_norm(beta = input_55_beta_0_to_fp16, epsilon = input_55_epsilon_0_to_fp16, gamma = input_55_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_35_cast_fp16)[name = string("input_55_cast_fp16")];
string input_57_pad_type_0 = const()[name = string("input_57_pad_type_0"), val = string("valid")];
tensor<int32, [2]> input_57_strides_0 = const()[name = string("input_57_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> input_57_pad_0 = const()[name = string("input_57_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> input_57_dilations_0 = const()[name = string("input_57_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 input_57_groups_0 = const()[name = string("input_57_groups_0"), val = int32(1)];
tensor<fp16, [2048, 512, 1, 1]> layers_5_fc1_weight_to_fp16 = const()[name = string("layers_5_fc1_weight_to_fp16"), val = tensor<fp16, [2048, 512, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(93516736)))];
tensor<fp16, [2048]> layers_5_fc1_bias_to_fp16 = const()[name = string("layers_5_fc1_bias_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(95613952)))];
tensor<fp16, [1, 2048, 1, 1]> input_57_cast_fp16 = conv(bias = layers_5_fc1_bias_to_fp16, dilations = input_57_dilations_0, groups = input_57_groups_0, pad = input_57_pad_0, pad_type = input_57_pad_type_0, strides = input_57_strides_0, weight = layers_5_fc1_weight_to_fp16, x = input_55_cast_fp16)[name = string("input_57_cast_fp16")];
string input_mode_0 = const()[name = string("input_mode_0"), val = string("EXACT")];
tensor<fp16, [1, 2048, 1, 1]> input_cast_fp16 = gelu(mode = input_mode_0, x = input_57_cast_fp16)[name = string("input_cast_fp16")];
string hidden_states_13_pad_type_0 = const()[name = string("hidden_states_13_pad_type_0"), val = string("valid")];
tensor<int32, [2]> hidden_states_13_strides_0 = const()[name = string("hidden_states_13_strides_0"), val = tensor<int32, [2]>([1, 1])];
tensor<int32, [4]> hidden_states_13_pad_0 = const()[name = string("hidden_states_13_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [2]> hidden_states_13_dilations_0 = const()[name = string("hidden_states_13_dilations_0"), val = tensor<int32, [2]>([1, 1])];
int32 hidden_states_13_groups_0 = const()[name = string("hidden_states_13_groups_0"), val = int32(1)];
tensor<fp16, [512, 2048, 1, 1]> layers_5_fc2_weight_to_fp16 = const()[name = string("layers_5_fc2_weight_to_fp16"), val = tensor<fp16, [512, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(95618112)))];
tensor<fp16, [512]> layers_5_fc2_bias_to_fp16 = const()[name = string("layers_5_fc2_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(97715328)))];
tensor<fp16, [1, 512, 1, 1]> hidden_states_13_cast_fp16 = conv(bias = layers_5_fc2_bias_to_fp16, dilations = hidden_states_13_dilations_0, groups = hidden_states_13_groups_0, pad = hidden_states_13_pad_0, pad_type = hidden_states_13_pad_type_0, strides = hidden_states_13_strides_0, weight = layers_5_fc2_weight_to_fp16, x = input_cast_fp16)[name = string("hidden_states_13_cast_fp16")];
tensor<fp16, [1, 512, 1, 1]> inputs_cast_fp16 = add(x = inputs_35_cast_fp16, y = hidden_states_13_cast_fp16)[name = string("inputs_cast_fp16")];
tensor<int32, [1]> out_axes_0 = const()[name = string("out_axes_0"), val = tensor<int32, [1]>([1])];
fp16 var_1347_to_fp16 = const()[name = string("op_1347_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> out_cast_fp16 = layer_norm(axes = out_axes_0, epsilon = var_1347_to_fp16, x = inputs_cast_fp16)[name = string("out_cast_fp16")];
tensor<fp16, [512]> hidden_states_gamma_0_to_fp16 = const()[name = string("hidden_states_gamma_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(97716416)))];
tensor<fp16, [512]> hidden_states_beta_0_to_fp16 = const()[name = string("hidden_states_beta_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(97717504)))];
fp16 hidden_states_epsilon_0_to_fp16 = const()[name = string("hidden_states_epsilon_0_to_fp16"), val = fp16(0x1.5p-17)];
tensor<fp16, [1, 512, 1, 1]> hidden_states_cast_fp16 = batch_norm(beta = hidden_states_beta_0_to_fp16, epsilon = hidden_states_epsilon_0_to_fp16, gamma = hidden_states_gamma_0_to_fp16, mean = obj_5_mean_0_to_fp16, variance = obj_5_variance_0_to_fp16, x = out_cast_fp16)[name = string("hidden_states_cast_fp16")];
tensor<int32, [1]> var_1358_axes_0 = const()[name = string("op_1358_axes_0"), val = tensor<int32, [1]>([2])];
tensor<fp16, [1, 512, 1]> var_1358_cast_fp16 = squeeze(axes = var_1358_axes_0, x = hidden_states_cast_fp16)[name = string("op_1358_cast_fp16")];
tensor<int32, [3]> var_1361_perm_0 = const()[name = string("op_1361_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
tensor<fp16, [51864]> linear_0_bias_0_to_fp16 = const()[name = string("linear_0_bias_0_to_fp16"), val = tensor<fp16, [51864]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(97718592)))];
tensor<fp16, [1, 1, 512]> var_1361_cast_fp16 = transpose(perm = var_1361_perm_0, x = var_1358_cast_fp16)[name = string("transpose_0")];
tensor<fp16, [1, 1, 51864]> logits = linear(bias = linear_0_bias_0_to_fp16, weight = embed_tokens_weight_to_fp16, x = var_1361_cast_fp16)[name = string("linear_0_cast_fp16")];
int32 var_1365 = const()[name = string("op_1365"), val = int32(1)];
bool obj_117_interleave_0 = const()[name = string("obj_117_interleave_0"), val = bool(false)];
tensor<fp16, [1, 3072, 1, 1]> key_cache_updates = concat(axis = var_1365, interleave = obj_117_interleave_0, values = (current_key_1_cast_fp16, current_key_3_cast_fp16, current_key_5_cast_fp16, current_key_7_cast_fp16, current_key_9_cast_fp16, current_key_cast_fp16))[name = string("obj_117_cast_fp16")];
int32 var_1368 = const()[name = string("op_1368"), val = int32(1)];
bool obj_119_interleave_0 = const()[name = string("obj_119_interleave_0"), val = bool(false)];
tensor<fp16, [1, 3072, 1, 1]> value_cache_updates = concat(axis = var_1368, interleave = obj_119_interleave_0, values = (current_value_1_cast_fp16, current_value_3_cast_fp16, current_value_5_cast_fp16, current_value_7_cast_fp16, current_value_9_cast_fp16, current_value_cast_fp16))[name = string("obj_119_cast_fp16")];
tensor<int32, [4]> var_1379_begin_0 = const()[name = string("op_1379_begin_0"), val = tensor<int32, [4]>([0, 3, 0, 0])];
tensor<int32, [4]> var_1379_end_0 = const()[name = string("op_1379_end_0"), val = tensor<int32, [4]>([1, 4, 1, 1536])];
tensor<bool, [4]> var_1379_end_mask_0 = const()[name = string("op_1379_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
tensor<fp16, [1, 1, 1, 1536]> var_1379_cast_fp16 = slice_by_index(begin = var_1379_begin_0, end = var_1379_end_0, end_mask = var_1379_end_mask_0, x = obj_77_cast_fp16)[name = string("op_1379_cast_fp16")];
tensor<int32, [4]> var_1382_begin_0 = const()[name = string("op_1382_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> var_1382_end_0 = const()[name = string("op_1382_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
tensor<bool, [4]> var_1382_end_mask_0 = const()[name = string("op_1382_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<bool, [4]> var_1382_squeeze_mask_0 = const()[name = string("op_1382_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
tensor<fp16, [1, 1, 1536]> var_1382_cast_fp16 = slice_by_index(begin = var_1382_begin_0, end = var_1382_end_0, end_mask = var_1382_end_mask_0, squeeze_mask = var_1382_squeeze_mask_0, x = var_1379_cast_fp16)[name = string("op_1382_cast_fp16")];
tensor<int32, [4]> var_1397_begin_0 = const()[name = string("op_1397_begin_0"), val = tensor<int32, [4]>([0, 7, 0, 0])];
tensor<int32, [4]> var_1397_end_0 = const()[name = string("op_1397_end_0"), val = tensor<int32, [4]>([1, 8, 1, 1536])];
tensor<bool, [4]> var_1397_end_mask_0 = const()[name = string("op_1397_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
tensor<fp16, [1, 1, 1, 1536]> var_1397_cast_fp16 = slice_by_index(begin = var_1397_begin_0, end = var_1397_end_0, end_mask = var_1397_end_mask_0, x = obj_95_cast_fp16)[name = string("op_1397_cast_fp16")];
tensor<int32, [4]> var_1400_begin_0 = const()[name = string("op_1400_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> var_1400_end_0 = const()[name = string("op_1400_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
tensor<bool, [4]> var_1400_end_mask_0 = const()[name = string("op_1400_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<bool, [4]> var_1400_squeeze_mask_0 = const()[name = string("op_1400_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
tensor<fp16, [1, 1, 1536]> var_1400_cast_fp16 = slice_by_index(begin = var_1400_begin_0, end = var_1400_end_0, end_mask = var_1400_end_mask_0, squeeze_mask = var_1400_squeeze_mask_0, x = var_1397_cast_fp16)[name = string("op_1400_cast_fp16")];
tensor<int32, [4]> var_1415_begin_0 = const()[name = string("op_1415_begin_0"), val = tensor<int32, [4]>([0, 1, 0, 0])];
tensor<int32, [4]> var_1415_end_0 = const()[name = string("op_1415_end_0"), val = tensor<int32, [4]>([1, 2, 1, 1536])];
tensor<bool, [4]> var_1415_end_mask_0 = const()[name = string("op_1415_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
tensor<fp16, [1, 1, 1, 1536]> var_1415_cast_fp16 = slice_by_index(begin = var_1415_begin_0, end = var_1415_end_0, end_mask = var_1415_end_mask_0, x = obj_113_cast_fp16)[name = string("op_1415_cast_fp16")];
tensor<int32, [4]> var_1418_begin_0 = const()[name = string("op_1418_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> var_1418_end_0 = const()[name = string("op_1418_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
tensor<bool, [4]> var_1418_end_mask_0 = const()[name = string("op_1418_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<bool, [4]> var_1418_squeeze_mask_0 = const()[name = string("op_1418_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
tensor<fp16, [1, 1, 1536]> var_1418_cast_fp16 = slice_by_index(begin = var_1418_begin_0, end = var_1418_end_0, end_mask = var_1418_end_mask_0, squeeze_mask = var_1418_squeeze_mask_0, x = var_1415_cast_fp16)[name = string("op_1418_cast_fp16")];
tensor<int32, [4]> var_1433_begin_0 = const()[name = string("op_1433_begin_0"), val = tensor<int32, [4]>([0, 5, 0, 0])];
tensor<int32, [4]> var_1433_end_0 = const()[name = string("op_1433_end_0"), val = tensor<int32, [4]>([1, 6, 1, 1536])];
tensor<bool, [4]> var_1433_end_mask_0 = const()[name = string("op_1433_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
tensor<fp16, [1, 1, 1, 1536]> var_1433_cast_fp16 = slice_by_index(begin = var_1433_begin_0, end = var_1433_end_0, end_mask = var_1433_end_mask_0, x = obj_113_cast_fp16)[name = string("op_1433_cast_fp16")];
tensor<int32, [4]> var_1436_begin_0 = const()[name = string("op_1436_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> var_1436_end_0 = const()[name = string("op_1436_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
tensor<bool, [4]> var_1436_end_mask_0 = const()[name = string("op_1436_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<bool, [4]> var_1436_squeeze_mask_0 = const()[name = string("op_1436_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
tensor<fp16, [1, 1, 1536]> var_1436_cast_fp16 = slice_by_index(begin = var_1436_begin_0, end = var_1436_end_0, end_mask = var_1436_end_mask_0, squeeze_mask = var_1436_squeeze_mask_0, x = var_1433_cast_fp16)[name = string("op_1436_cast_fp16")];
tensor<int32, [4]> var_1451_begin_0 = const()[name = string("op_1451_begin_0"), val = tensor<int32, [4]>([0, 7, 0, 0])];
tensor<int32, [4]> var_1451_end_0 = const()[name = string("op_1451_end_0"), val = tensor<int32, [4]>([1, 8, 1, 1536])];
tensor<bool, [4]> var_1451_end_mask_0 = const()[name = string("op_1451_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
tensor<fp16, [1, 1, 1, 1536]> var_1451_cast_fp16 = slice_by_index(begin = var_1451_begin_0, end = var_1451_end_0, end_mask = var_1451_end_mask_0, x = obj_113_cast_fp16)[name = string("op_1451_cast_fp16")];
tensor<int32, [4]> var_1454_begin_0 = const()[name = string("op_1454_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
tensor<int32, [4]> var_1454_end_0 = const()[name = string("op_1454_end_0"), val = tensor<int32, [4]>([1, 1, 1, 1536])];
tensor<bool, [4]> var_1454_end_mask_0 = const()[name = string("op_1454_end_mask_0"), val = tensor<bool, [4]>([true, true, false, true])];
tensor<bool, [4]> var_1454_squeeze_mask_0 = const()[name = string("op_1454_squeeze_mask_0"), val = tensor<bool, [4]>([false, false, true, false])];
tensor<fp16, [1, 1, 1536]> var_1454_cast_fp16 = slice_by_index(begin = var_1454_begin_0, end = var_1454_end_0, end_mask = var_1454_end_mask_0, squeeze_mask = var_1454_squeeze_mask_0, x = var_1451_cast_fp16)[name = string("op_1454_cast_fp16")];
int32 var_1461 = const()[name = string("op_1461"), val = int32(1)];
bool var_1462_interleave_0 = const()[name = string("op_1462_interleave_0"), val = bool(false)];
tensor<fp16, [1, 5, 1536]> var_1462_cast_fp16 = concat(axis = var_1461, interleave = var_1462_interleave_0, values = (var_1382_cast_fp16, var_1400_cast_fp16, var_1418_cast_fp16, var_1436_cast_fp16, var_1454_cast_fp16))[name = string("op_1462_cast_fp16")];
bool var_1465 = const()[name = string("op_1465"), val = bool(false)];
tensor<int32, [1]> obj_axes_0 = const()[name = string("obj_axes_0"), val = tensor<int32, [1]>([1])];
tensor<fp16, [1, 1536]> alignment_heads_weights = reduce_mean(axes = obj_axes_0, keep_dims = var_1465, x = var_1462_cast_fp16)[name = string("obj_cast_fp16")];
} -> (logits, key_cache_updates, value_cache_updates, alignment_heads_weights);
}