lithium0003 commited on
Commit
015337e
·
verified ·
1 Parent(s): 220b489

add converted files

Files changed (35) hide show
  1. ggml-base-encoder.mlmodelc/analytics/coremldata.bin +3 -0
  2. ggml-base-encoder.mlmodelc/coremldata.bin +3 -0
  3. ggml-base-encoder.mlmodelc/metadata.json +66 -0
  4. ggml-base-encoder.mlmodelc/model.mil +384 -0
  5. ggml-base-encoder.mlmodelc/weights/weight.bin +3 -0
  6. ggml-base.bin +3 -0
  7. ggml-large-v3-encoder.mlmodelc/analytics/coremldata.bin +3 -0
  8. ggml-large-v3-encoder.mlmodelc/coremldata.bin +3 -0
  9. ggml-large-v3-encoder.mlmodelc/metadata.json +66 -0
  10. ggml-large-v3-encoder.mlmodelc/model.mil +0 -0
  11. ggml-large-v3-encoder.mlmodelc/weights/weight.bin +3 -0
  12. ggml-large-v3-q8_0.bin +3 -0
  13. ggml-medium-encoder.mlmodelc/analytics/coremldata.bin +3 -0
  14. ggml-medium-encoder.mlmodelc/coremldata.bin +3 -0
  15. ggml-medium-encoder.mlmodelc/metadata.json +66 -0
  16. ggml-medium-encoder.mlmodelc/model.mil +0 -0
  17. ggml-medium-encoder.mlmodelc/weights/weight.bin +3 -0
  18. ggml-medium.bin +3 -0
  19. ggml-small-encoder.mlmodelc/analytics/coremldata.bin +3 -0
  20. ggml-small-encoder.mlmodelc/coremldata.bin +3 -0
  21. ggml-small-encoder.mlmodelc/metadata.json +66 -0
  22. ggml-small-encoder.mlmodelc/model.mil +0 -0
  23. ggml-small-encoder.mlmodelc/weights/weight.bin +3 -0
  24. ggml-small.bin +3 -0
  25. ggml-tiny-encoder.mlmodelc/analytics/coremldata.bin +3 -0
  26. ggml-tiny-encoder.mlmodelc/coremldata.bin +3 -0
  27. ggml-tiny-encoder.mlmodelc/metadata.json +66 -0
  28. ggml-tiny-encoder.mlmodelc/model.mil +268 -0
  29. ggml-tiny-encoder.mlmodelc/weights/weight.bin +3 -0
  30. ggml-tiny.bin +3 -0
  31. index/base +6 -0
  32. index/large-v3 +6 -0
  33. index/medium +6 -0
  34. index/small +6 -0
  35. index/tiny +6 -0
ggml-base-encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31fb009a1caa38a49165cb418f454ef1e5d3cd8e7a1bc37a721575d800a8c712
3
+ size 243
ggml-base-encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b353632cc6cb8774fdd42eab32407ce491f6e786a1e15c30fc9c58c7e39cd437
3
+ size 318
ggml-base-encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1500 × 512)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1500, 512]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 8,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios17.layerNorm" : 13,
23
+ "Ios17.reshape" : 24,
24
+ "Ios17.conv" : 2,
25
+ "Ios17.linear" : 36,
26
+ "Ios17.add" : 13,
27
+ "Ios17.matmul" : 12,
28
+ "Ios16.gelu" : 8,
29
+ "Ios16.softmax" : 6,
30
+ "Ios17.mul" : 12,
31
+ "Ios17.transpose" : 25
32
+ },
33
+ "computePrecision" : "Mixed (Float16, Int32)",
34
+ "isUpdatable" : "0",
35
+ "availability" : {
36
+ "macOS" : "14.0",
37
+ "tvOS" : "17.0",
38
+ "visionOS" : "1.0",
39
+ "watchOS" : "10.0",
40
+ "iOS" : "17.0",
41
+ "macCatalyst" : "17.0"
42
+ },
43
+ "modelType" : {
44
+ "name" : "MLModelType_mlProgram"
45
+ },
46
+ "userDefinedMetadata" : {
47
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
48
+ "com.github.apple.coremltools.source" : "torch==2.2.2",
49
+ "com.github.apple.coremltools.version" : "7.2"
50
+ },
51
+ "inputSchema" : [
52
+ {
53
+ "hasShapeFlexibility" : "0",
54
+ "isOptional" : "0",
55
+ "dataType" : "Float16",
56
+ "formattedType" : "MultiArray (Float16 1 × 80 × 3000)",
57
+ "shortDescription" : "",
58
+ "shape" : "[1, 80, 3000]",
59
+ "name" : "logmel_data",
60
+ "type" : "MultiArray"
61
+ }
62
+ ],
63
+ "generatedClassName" : "ggml_base_encoder",
64
+ "method" : "predict"
65
+ }
66
+ ]
ggml-base-encoder.mlmodelc/model.mil ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.2.2"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
3
+ {
4
+ func main<ios17>(tensor<fp16, [1, 80, 3000]> logmel_data) {
5
+ tensor<int32, []> var_20 = const()[name = tensor<string, []>("op_20"), val = tensor<int32, []>(1)];
6
+ tensor<int32, [1]> var_28 = const()[name = tensor<string, []>("op_28"), val = tensor<int32, [1]>([1])];
7
+ tensor<int32, [1]> var_30 = const()[name = tensor<string, []>("op_30"), val = tensor<int32, [1]>([1])];
8
+ tensor<string, []> var_32_pad_type_0 = const()[name = tensor<string, []>("op_32_pad_type_0"), val = tensor<string, []>("custom")];
9
+ tensor<int32, [2]> var_32_pad_0 = const()[name = tensor<string, []>("op_32_pad_0"), val = tensor<int32, [2]>([1, 1])];
10
+ tensor<fp16, [512, 80, 3]> weight_3_to_fp16 = const()[name = tensor<string, []>("weight_3_to_fp16"), val = tensor<fp16, [512, 80, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
11
+ tensor<fp16, [512]> bias_3_to_fp16 = const()[name = tensor<string, []>("bias_3_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(245888)))];
12
+ tensor<fp16, [1, 512, 3000]> var_32_cast_fp16 = conv(bias = bias_3_to_fp16, dilations = var_30, groups = var_20, pad = var_32_pad_0, pad_type = var_32_pad_type_0, strides = var_28, weight = weight_3_to_fp16, x = logmel_data)[name = tensor<string, []>("op_32_cast_fp16")];
13
+ tensor<string, []> input_1_mode_0 = const()[name = tensor<string, []>("input_1_mode_0"), val = tensor<string, []>("EXACT")];
14
+ tensor<fp16, [1, 512, 3000]> input_1_cast_fp16 = gelu(mode = input_1_mode_0, x = var_32_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
15
+ tensor<int32, []> var_37 = const()[name = tensor<string, []>("op_37"), val = tensor<int32, []>(1)];
16
+ tensor<int32, [1]> var_46 = const()[name = tensor<string, []>("op_46"), val = tensor<int32, [1]>([2])];
17
+ tensor<int32, [1]> var_48 = const()[name = tensor<string, []>("op_48"), val = tensor<int32, [1]>([1])];
18
+ tensor<string, []> var_50_pad_type_0 = const()[name = tensor<string, []>("op_50_pad_type_0"), val = tensor<string, []>("custom")];
19
+ tensor<int32, [2]> var_50_pad_0 = const()[name = tensor<string, []>("op_50_pad_0"), val = tensor<int32, [2]>([1, 1])];
20
+ tensor<fp16, [512, 512, 3]> weight_7_to_fp16 = const()[name = tensor<string, []>("weight_7_to_fp16"), val = tensor<fp16, [512, 512, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(246976)))];
21
+ tensor<fp16, [512]> bias_7_to_fp16 = const()[name = tensor<string, []>("bias_7_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1819904)))];
22
+ tensor<fp16, [1, 512, 1500]> var_50_cast_fp16 = conv(bias = bias_7_to_fp16, dilations = var_48, groups = var_37, pad = var_50_pad_0, pad_type = var_50_pad_type_0, strides = var_46, weight = weight_7_to_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("op_50_cast_fp16")];
23
+ tensor<string, []> x_3_mode_0 = const()[name = tensor<string, []>("x_3_mode_0"), val = tensor<string, []>("EXACT")];
24
+ tensor<fp16, [1, 512, 1500]> x_3_cast_fp16 = gelu(mode = x_3_mode_0, x = var_50_cast_fp16)[name = tensor<string, []>("x_3_cast_fp16")];
25
+ tensor<int32, [3]> var_56 = const()[name = tensor<string, []>("op_56"), val = tensor<int32, [3]>([0, 2, 1])];
26
+ tensor<fp16, [1500, 512]> positional_embedding_to_fp16 = const()[name = tensor<string, []>("positional_embedding_to_fp16"), val = tensor<fp16, [1500, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1820992)))];
27
+ tensor<fp16, [1, 1500, 512]> transpose_60 = transpose(perm = var_56, x = x_3_cast_fp16)[name = tensor<string, []>("transpose_60")];
28
+ tensor<fp16, [1, 1500, 512]> var_59_cast_fp16 = add(x = transpose_60, y = positional_embedding_to_fp16)[name = tensor<string, []>("op_59_cast_fp16")];
29
+ tensor<int32, []> var_72 = const()[name = tensor<string, []>("op_72"), val = tensor<int32, []>(-1)];
30
+ tensor<int32, [1]> var_88_axes_0 = const()[name = tensor<string, []>("op_88_axes_0"), val = tensor<int32, [1]>([-1])];
31
+ tensor<fp16, [512]> blocks_0_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3357056)))];
32
+ tensor<fp16, [512]> blocks_0_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3358144)))];
33
+ tensor<fp16, []> var_78_to_fp16 = const()[name = tensor<string, []>("op_78_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
34
+ tensor<fp16, [1, 1500, 512]> var_88_cast_fp16 = layer_norm(axes = var_88_axes_0, beta = blocks_0_attn_ln_bias_to_fp16, epsilon = var_78_to_fp16, gamma = blocks_0_attn_ln_weight_to_fp16, x = var_59_cast_fp16)[name = tensor<string, []>("op_88_cast_fp16")];
35
+ tensor<fp16, [512, 512]> var_99_to_fp16 = const()[name = tensor<string, []>("op_99_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3359232)))];
36
+ tensor<fp16, [512]> var_100_to_fp16 = const()[name = tensor<string, []>("op_100_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3883584)))];
37
+ tensor<fp16, [1, 1500, 512]> linear_0_cast_fp16 = linear(bias = var_100_to_fp16, weight = var_99_to_fp16, x = var_88_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
38
+ tensor<fp16, [512, 512]> var_103_to_fp16 = const()[name = tensor<string, []>("op_103_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3884672)))];
39
+ tensor<fp16, [512]> linear_1_bias_0_to_fp16 = const()[name = tensor<string, []>("linear_1_bias_0_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4409024)))];
40
+ tensor<fp16, [1, 1500, 512]> linear_1_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_103_to_fp16, x = var_88_cast_fp16)[name = tensor<string, []>("linear_1_cast_fp16")];
41
+ tensor<fp16, [512, 512]> var_107_to_fp16 = const()[name = tensor<string, []>("op_107_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4410112)))];
42
+ tensor<fp16, [512]> var_108_to_fp16 = const()[name = tensor<string, []>("op_108_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4934464)))];
43
+ tensor<fp16, [1, 1500, 512]> linear_2_cast_fp16 = linear(bias = var_108_to_fp16, weight = var_107_to_fp16, x = var_88_cast_fp16)[name = tensor<string, []>("linear_2_cast_fp16")];
44
+ tensor<int32, [4]> var_116 = const()[name = tensor<string, []>("op_116"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
45
+ tensor<fp16, [1, 1500, 8, 64]> var_117_cast_fp16 = reshape(shape = var_116, x = linear_0_cast_fp16)[name = tensor<string, []>("op_117_cast_fp16")];
46
+ tensor<fp16, [1, 1, 1, 1]> const_42_to_fp16 = const()[name = tensor<string, []>("const_42_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
47
+ tensor<fp16, [1, 1500, 8, 64]> q_3_cast_fp16 = mul(x = var_117_cast_fp16, y = const_42_to_fp16)[name = tensor<string, []>("q_3_cast_fp16")];
48
+ tensor<int32, [4]> var_123 = const()[name = tensor<string, []>("op_123"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
49
+ tensor<fp16, [1, 1500, 8, 64]> var_124_cast_fp16 = reshape(shape = var_123, x = linear_1_cast_fp16)[name = tensor<string, []>("op_124_cast_fp16")];
50
+ tensor<fp16, [1, 1, 1, 1]> const_43_to_fp16 = const()[name = tensor<string, []>("const_43_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
51
+ tensor<fp16, [1, 1500, 8, 64]> k_3_cast_fp16 = mul(x = var_124_cast_fp16, y = const_43_to_fp16)[name = tensor<string, []>("k_3_cast_fp16")];
52
+ tensor<int32, [4]> var_130 = const()[name = tensor<string, []>("op_130"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
53
+ tensor<fp16, [1, 1500, 8, 64]> var_131_cast_fp16 = reshape(shape = var_130, x = linear_2_cast_fp16)[name = tensor<string, []>("op_131_cast_fp16")];
54
+ tensor<int32, [4]> var_132 = const()[name = tensor<string, []>("op_132"), val = tensor<int32, [4]>([0, 2, 1, 3])];
55
+ tensor<bool, []> qk_1_transpose_x_0 = const()[name = tensor<string, []>("qk_1_transpose_x_0"), val = tensor<bool, []>(false)];
56
+ tensor<bool, []> qk_1_transpose_y_0 = const()[name = tensor<string, []>("qk_1_transpose_y_0"), val = tensor<bool, []>(false)];
57
+ tensor<int32, [4]> transpose_24_perm_0 = const()[name = tensor<string, []>("transpose_24_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
58
+ tensor<int32, [4]> transpose_25_perm_0 = const()[name = tensor<string, []>("transpose_25_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
59
+ tensor<fp16, [1, 8, 64, 1500]> transpose_57 = transpose(perm = transpose_25_perm_0, x = k_3_cast_fp16)[name = tensor<string, []>("transpose_57")];
60
+ tensor<fp16, [1, 8, 1500, 64]> transpose_58 = transpose(perm = transpose_24_perm_0, x = q_3_cast_fp16)[name = tensor<string, []>("transpose_58")];
61
+ tensor<fp16, [1, 8, 1500, 1500]> qk_1_cast_fp16 = matmul(transpose_x = qk_1_transpose_x_0, transpose_y = qk_1_transpose_y_0, x = transpose_58, y = transpose_57)[name = tensor<string, []>("qk_1_cast_fp16")];
62
+ tensor<fp16, [1, 8, 1500, 1500]> var_136_cast_fp16 = softmax(axis = var_72, x = qk_1_cast_fp16)[name = tensor<string, []>("op_136_cast_fp16")];
63
+ tensor<bool, []> var_138_transpose_x_0 = const()[name = tensor<string, []>("op_138_transpose_x_0"), val = tensor<bool, []>(false)];
64
+ tensor<bool, []> var_138_transpose_y_0 = const()[name = tensor<string, []>("op_138_transpose_y_0"), val = tensor<bool, []>(false)];
65
+ tensor<fp16, [1, 8, 1500, 64]> transpose_59 = transpose(perm = var_132, x = var_131_cast_fp16)[name = tensor<string, []>("transpose_59")];
66
+ tensor<fp16, [1, 8, 1500, 64]> var_138_cast_fp16 = matmul(transpose_x = var_138_transpose_x_0, transpose_y = var_138_transpose_y_0, x = var_136_cast_fp16, y = transpose_59)[name = tensor<string, []>("op_138_cast_fp16")];
67
+ tensor<int32, [4]> var_139 = const()[name = tensor<string, []>("op_139"), val = tensor<int32, [4]>([0, 2, 1, 3])];
68
+ tensor<int32, [3]> concat_0 = const()[name = tensor<string, []>("concat_0"), val = tensor<int32, [3]>([1, 1500, 512])];
69
+ tensor<fp16, [1, 1500, 8, 64]> transpose_56 = transpose(perm = var_139, x = var_138_cast_fp16)[name = tensor<string, []>("transpose_56")];
70
+ tensor<fp16, [1, 1500, 512]> x_11_cast_fp16 = reshape(shape = concat_0, x = transpose_56)[name = tensor<string, []>("x_11_cast_fp16")];
71
+ tensor<fp16, [512, 512]> var_144_to_fp16 = const()[name = tensor<string, []>("op_144_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4935552)))];
72
+ tensor<fp16, [512]> var_145_to_fp16 = const()[name = tensor<string, []>("op_145_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5459904)))];
73
+ tensor<fp16, [1, 1500, 512]> linear_3_cast_fp16 = linear(bias = var_145_to_fp16, weight = var_144_to_fp16, x = x_11_cast_fp16)[name = tensor<string, []>("linear_3_cast_fp16")];
74
+ tensor<fp16, [1, 1500, 512]> x_13_cast_fp16 = add(x = var_59_cast_fp16, y = linear_3_cast_fp16)[name = tensor<string, []>("x_13_cast_fp16")];
75
+ tensor<int32, [1]> var_152_axes_0 = const()[name = tensor<string, []>("op_152_axes_0"), val = tensor<int32, [1]>([-1])];
76
+ tensor<fp16, [512]> blocks_0_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5460992)))];
77
+ tensor<fp16, [512]> blocks_0_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5462080)))];
78
+ tensor<fp16, [1, 1500, 512]> var_152_cast_fp16 = layer_norm(axes = var_152_axes_0, beta = blocks_0_mlp_ln_bias_to_fp16, epsilon = var_78_to_fp16, gamma = blocks_0_mlp_ln_weight_to_fp16, x = x_13_cast_fp16)[name = tensor<string, []>("op_152_cast_fp16")];
79
+ tensor<fp16, [2048, 512]> var_161_to_fp16 = const()[name = tensor<string, []>("op_161_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5463168)))];
80
+ tensor<fp16, [2048]> var_162_to_fp16 = const()[name = tensor<string, []>("op_162_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(7560384)))];
81
+ tensor<fp16, [1, 1500, 2048]> linear_4_cast_fp16 = linear(bias = var_162_to_fp16, weight = var_161_to_fp16, x = var_152_cast_fp16)[name = tensor<string, []>("linear_4_cast_fp16")];
82
+ tensor<string, []> x_17_mode_0 = const()[name = tensor<string, []>("x_17_mode_0"), val = tensor<string, []>("EXACT")];
83
+ tensor<fp16, [1, 1500, 2048]> x_17_cast_fp16 = gelu(mode = x_17_mode_0, x = linear_4_cast_fp16)[name = tensor<string, []>("x_17_cast_fp16")];
84
+ tensor<fp16, [512, 2048]> var_167_to_fp16 = const()[name = tensor<string, []>("op_167_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(7564544)))];
85
+ tensor<fp16, [512]> var_168_to_fp16 = const()[name = tensor<string, []>("op_168_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9661760)))];
86
+ tensor<fp16, [1, 1500, 512]> linear_5_cast_fp16 = linear(bias = var_168_to_fp16, weight = var_167_to_fp16, x = x_17_cast_fp16)[name = tensor<string, []>("linear_5_cast_fp16")];
87
+ tensor<fp16, [1, 1500, 512]> x_19_cast_fp16 = add(x = x_13_cast_fp16, y = linear_5_cast_fp16)[name = tensor<string, []>("x_19_cast_fp16")];
88
+ tensor<int32, []> var_178 = const()[name = tensor<string, []>("op_178"), val = tensor<int32, []>(-1)];
89
+ tensor<int32, [1]> var_194_axes_0 = const()[name = tensor<string, []>("op_194_axes_0"), val = tensor<int32, [1]>([-1])];
90
+ tensor<fp16, [512]> blocks_1_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9662848)))];
91
+ tensor<fp16, [512]> blocks_1_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9663936)))];
92
+ tensor<fp16, []> var_184_to_fp16 = const()[name = tensor<string, []>("op_184_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
93
+ tensor<fp16, [1, 1500, 512]> var_194_cast_fp16 = layer_norm(axes = var_194_axes_0, beta = blocks_1_attn_ln_bias_to_fp16, epsilon = var_184_to_fp16, gamma = blocks_1_attn_ln_weight_to_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_194_cast_fp16")];
94
+ tensor<fp16, [512, 512]> var_205_to_fp16 = const()[name = tensor<string, []>("op_205_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9665024)))];
95
+ tensor<fp16, [512]> var_206_to_fp16 = const()[name = tensor<string, []>("op_206_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10189376)))];
96
+ tensor<fp16, [1, 1500, 512]> linear_6_cast_fp16 = linear(bias = var_206_to_fp16, weight = var_205_to_fp16, x = var_194_cast_fp16)[name = tensor<string, []>("linear_6_cast_fp16")];
97
+ tensor<fp16, [512, 512]> var_209_to_fp16 = const()[name = tensor<string, []>("op_209_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10190464)))];
98
+ tensor<fp16, [1, 1500, 512]> linear_7_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_209_to_fp16, x = var_194_cast_fp16)[name = tensor<string, []>("linear_7_cast_fp16")];
99
+ tensor<fp16, [512, 512]> var_213_to_fp16 = const()[name = tensor<string, []>("op_213_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10714816)))];
100
+ tensor<fp16, [512]> var_214_to_fp16 = const()[name = tensor<string, []>("op_214_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11239168)))];
101
+ tensor<fp16, [1, 1500, 512]> linear_8_cast_fp16 = linear(bias = var_214_to_fp16, weight = var_213_to_fp16, x = var_194_cast_fp16)[name = tensor<string, []>("linear_8_cast_fp16")];
102
+ tensor<int32, [4]> var_222 = const()[name = tensor<string, []>("op_222"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
103
+ tensor<fp16, [1, 1500, 8, 64]> var_223_cast_fp16 = reshape(shape = var_222, x = linear_6_cast_fp16)[name = tensor<string, []>("op_223_cast_fp16")];
104
+ tensor<fp16, [1, 1, 1, 1]> const_44_to_fp16 = const()[name = tensor<string, []>("const_44_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
105
+ tensor<fp16, [1, 1500, 8, 64]> q_7_cast_fp16 = mul(x = var_223_cast_fp16, y = const_44_to_fp16)[name = tensor<string, []>("q_7_cast_fp16")];
106
+ tensor<int32, [4]> var_229 = const()[name = tensor<string, []>("op_229"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
107
+ tensor<fp16, [1, 1500, 8, 64]> var_230_cast_fp16 = reshape(shape = var_229, x = linear_7_cast_fp16)[name = tensor<string, []>("op_230_cast_fp16")];
108
+ tensor<fp16, [1, 1, 1, 1]> const_45_to_fp16 = const()[name = tensor<string, []>("const_45_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
109
+ tensor<fp16, [1, 1500, 8, 64]> k_7_cast_fp16 = mul(x = var_230_cast_fp16, y = const_45_to_fp16)[name = tensor<string, []>("k_7_cast_fp16")];
110
+ tensor<int32, [4]> var_236 = const()[name = tensor<string, []>("op_236"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
111
+ tensor<fp16, [1, 1500, 8, 64]> var_237_cast_fp16 = reshape(shape = var_236, x = linear_8_cast_fp16)[name = tensor<string, []>("op_237_cast_fp16")];
112
+ tensor<int32, [4]> var_238 = const()[name = tensor<string, []>("op_238"), val = tensor<int32, [4]>([0, 2, 1, 3])];
113
+ tensor<bool, []> qk_3_transpose_x_0 = const()[name = tensor<string, []>("qk_3_transpose_x_0"), val = tensor<bool, []>(false)];
114
+ tensor<bool, []> qk_3_transpose_y_0 = const()[name = tensor<string, []>("qk_3_transpose_y_0"), val = tensor<bool, []>(false)];
115
+ tensor<int32, [4]> transpose_26_perm_0 = const()[name = tensor<string, []>("transpose_26_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
116
+ tensor<int32, [4]> transpose_27_perm_0 = const()[name = tensor<string, []>("transpose_27_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
117
+ tensor<fp16, [1, 8, 64, 1500]> transpose_53 = transpose(perm = transpose_27_perm_0, x = k_7_cast_fp16)[name = tensor<string, []>("transpose_53")];
118
+ tensor<fp16, [1, 8, 1500, 64]> transpose_54 = transpose(perm = transpose_26_perm_0, x = q_7_cast_fp16)[name = tensor<string, []>("transpose_54")];
119
+ tensor<fp16, [1, 8, 1500, 1500]> qk_3_cast_fp16 = matmul(transpose_x = qk_3_transpose_x_0, transpose_y = qk_3_transpose_y_0, x = transpose_54, y = transpose_53)[name = tensor<string, []>("qk_3_cast_fp16")];
120
+ tensor<fp16, [1, 8, 1500, 1500]> var_242_cast_fp16 = softmax(axis = var_178, x = qk_3_cast_fp16)[name = tensor<string, []>("op_242_cast_fp16")];
121
+ tensor<bool, []> var_244_transpose_x_0 = const()[name = tensor<string, []>("op_244_transpose_x_0"), val = tensor<bool, []>(false)];
122
+ tensor<bool, []> var_244_transpose_y_0 = const()[name = tensor<string, []>("op_244_transpose_y_0"), val = tensor<bool, []>(false)];
123
+ tensor<fp16, [1, 8, 1500, 64]> transpose_55 = transpose(perm = var_238, x = var_237_cast_fp16)[name = tensor<string, []>("transpose_55")];
124
+ tensor<fp16, [1, 8, 1500, 64]> var_244_cast_fp16 = matmul(transpose_x = var_244_transpose_x_0, transpose_y = var_244_transpose_y_0, x = var_242_cast_fp16, y = transpose_55)[name = tensor<string, []>("op_244_cast_fp16")];
125
+ tensor<int32, [4]> var_245 = const()[name = tensor<string, []>("op_245"), val = tensor<int32, [4]>([0, 2, 1, 3])];
126
+ tensor<int32, [3]> concat_1 = const()[name = tensor<string, []>("concat_1"), val = tensor<int32, [3]>([1, 1500, 512])];
127
+ tensor<fp16, [1, 1500, 8, 64]> transpose_52 = transpose(perm = var_245, x = var_244_cast_fp16)[name = tensor<string, []>("transpose_52")];
128
+ tensor<fp16, [1, 1500, 512]> x_23_cast_fp16 = reshape(shape = concat_1, x = transpose_52)[name = tensor<string, []>("x_23_cast_fp16")];
129
+ tensor<fp16, [512, 512]> var_250_to_fp16 = const()[name = tensor<string, []>("op_250_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11240256)))];
130
+ tensor<fp16, [512]> var_251_to_fp16 = const()[name = tensor<string, []>("op_251_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11764608)))];
131
+ tensor<fp16, [1, 1500, 512]> linear_9_cast_fp16 = linear(bias = var_251_to_fp16, weight = var_250_to_fp16, x = x_23_cast_fp16)[name = tensor<string, []>("linear_9_cast_fp16")];
132
+ tensor<fp16, [1, 1500, 512]> x_25_cast_fp16 = add(x = x_19_cast_fp16, y = linear_9_cast_fp16)[name = tensor<string, []>("x_25_cast_fp16")];
133
+ tensor<int32, [1]> var_258_axes_0 = const()[name = tensor<string, []>("op_258_axes_0"), val = tensor<int32, [1]>([-1])];
134
+ tensor<fp16, [512]> blocks_1_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11765696)))];
135
+ tensor<fp16, [512]> blocks_1_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11766784)))];
136
+ tensor<fp16, [1, 1500, 512]> var_258_cast_fp16 = layer_norm(axes = var_258_axes_0, beta = blocks_1_mlp_ln_bias_to_fp16, epsilon = var_184_to_fp16, gamma = blocks_1_mlp_ln_weight_to_fp16, x = x_25_cast_fp16)[name = tensor<string, []>("op_258_cast_fp16")];
137
+ tensor<fp16, [2048, 512]> var_267_to_fp16 = const()[name = tensor<string, []>("op_267_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11767872)))];
138
+ tensor<fp16, [2048]> var_268_to_fp16 = const()[name = tensor<string, []>("op_268_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13865088)))];
139
+ tensor<fp16, [1, 1500, 2048]> linear_10_cast_fp16 = linear(bias = var_268_to_fp16, weight = var_267_to_fp16, x = var_258_cast_fp16)[name = tensor<string, []>("linear_10_cast_fp16")];
140
+ tensor<string, []> x_29_mode_0 = const()[name = tensor<string, []>("x_29_mode_0"), val = tensor<string, []>("EXACT")];
141
+ tensor<fp16, [1, 1500, 2048]> x_29_cast_fp16 = gelu(mode = x_29_mode_0, x = linear_10_cast_fp16)[name = tensor<string, []>("x_29_cast_fp16")];
142
+ tensor<fp16, [512, 2048]> var_273_to_fp16 = const()[name = tensor<string, []>("op_273_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13869248)))];
143
+ tensor<fp16, [512]> var_274_to_fp16 = const()[name = tensor<string, []>("op_274_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15966464)))];
144
+ tensor<fp16, [1, 1500, 512]> linear_11_cast_fp16 = linear(bias = var_274_to_fp16, weight = var_273_to_fp16, x = x_29_cast_fp16)[name = tensor<string, []>("linear_11_cast_fp16")];
145
+ tensor<fp16, [1, 1500, 512]> x_31_cast_fp16 = add(x = x_25_cast_fp16, y = linear_11_cast_fp16)[name = tensor<string, []>("x_31_cast_fp16")];
146
+ tensor<int32, []> var_284 = const()[name = tensor<string, []>("op_284"), val = tensor<int32, []>(-1)];
147
+ tensor<int32, [1]> var_300_axes_0 = const()[name = tensor<string, []>("op_300_axes_0"), val = tensor<int32, [1]>([-1])];
148
+ tensor<fp16, [512]> blocks_2_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15967552)))];
149
+ tensor<fp16, [512]> blocks_2_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15968640)))];
150
+ tensor<fp16, []> var_290_to_fp16 = const()[name = tensor<string, []>("op_290_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
151
+ tensor<fp16, [1, 1500, 512]> var_300_cast_fp16 = layer_norm(axes = var_300_axes_0, beta = blocks_2_attn_ln_bias_to_fp16, epsilon = var_290_to_fp16, gamma = blocks_2_attn_ln_weight_to_fp16, x = x_31_cast_fp16)[name = tensor<string, []>("op_300_cast_fp16")];
152
+ tensor<fp16, [512, 512]> var_311_to_fp16 = const()[name = tensor<string, []>("op_311_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15969728)))];
153
+ tensor<fp16, [512]> var_312_to_fp16 = const()[name = tensor<string, []>("op_312_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16494080)))];
154
+ tensor<fp16, [1, 1500, 512]> linear_12_cast_fp16 = linear(bias = var_312_to_fp16, weight = var_311_to_fp16, x = var_300_cast_fp16)[name = tensor<string, []>("linear_12_cast_fp16")];
155
+ tensor<fp16, [512, 512]> var_315_to_fp16 = const()[name = tensor<string, []>("op_315_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16495168)))];
156
+ tensor<fp16, [1, 1500, 512]> linear_13_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_315_to_fp16, x = var_300_cast_fp16)[name = tensor<string, []>("linear_13_cast_fp16")];
157
+ tensor<fp16, [512, 512]> var_319_to_fp16 = const()[name = tensor<string, []>("op_319_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17019520)))];
158
+ tensor<fp16, [512]> var_320_to_fp16 = const()[name = tensor<string, []>("op_320_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17543872)))];
159
+ tensor<fp16, [1, 1500, 512]> linear_14_cast_fp16 = linear(bias = var_320_to_fp16, weight = var_319_to_fp16, x = var_300_cast_fp16)[name = tensor<string, []>("linear_14_cast_fp16")];
160
+ tensor<int32, [4]> var_328 = const()[name = tensor<string, []>("op_328"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
161
+ tensor<fp16, [1, 1500, 8, 64]> var_329_cast_fp16 = reshape(shape = var_328, x = linear_12_cast_fp16)[name = tensor<string, []>("op_329_cast_fp16")];
162
+ tensor<fp16, [1, 1, 1, 1]> const_46_to_fp16 = const()[name = tensor<string, []>("const_46_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
163
+ tensor<fp16, [1, 1500, 8, 64]> q_11_cast_fp16 = mul(x = var_329_cast_fp16, y = const_46_to_fp16)[name = tensor<string, []>("q_11_cast_fp16")];
164
+ tensor<int32, [4]> var_335 = const()[name = tensor<string, []>("op_335"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
165
+ tensor<fp16, [1, 1500, 8, 64]> var_336_cast_fp16 = reshape(shape = var_335, x = linear_13_cast_fp16)[name = tensor<string, []>("op_336_cast_fp16")];
166
+ tensor<fp16, [1, 1, 1, 1]> const_47_to_fp16 = const()[name = tensor<string, []>("const_47_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
167
+ tensor<fp16, [1, 1500, 8, 64]> k_11_cast_fp16 = mul(x = var_336_cast_fp16, y = const_47_to_fp16)[name = tensor<string, []>("k_11_cast_fp16")];
168
+ tensor<int32, [4]> var_342 = const()[name = tensor<string, []>("op_342"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
169
+ tensor<fp16, [1, 1500, 8, 64]> var_343_cast_fp16 = reshape(shape = var_342, x = linear_14_cast_fp16)[name = tensor<string, []>("op_343_cast_fp16")];
170
+ tensor<int32, [4]> var_344 = const()[name = tensor<string, []>("op_344"), val = tensor<int32, [4]>([0, 2, 1, 3])];
171
+ tensor<bool, []> qk_5_transpose_x_0 = const()[name = tensor<string, []>("qk_5_transpose_x_0"), val = tensor<bool, []>(false)];
172
+ tensor<bool, []> qk_5_transpose_y_0 = const()[name = tensor<string, []>("qk_5_transpose_y_0"), val = tensor<bool, []>(false)];
173
+ tensor<int32, [4]> transpose_28_perm_0 = const()[name = tensor<string, []>("transpose_28_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
174
+ tensor<int32, [4]> transpose_29_perm_0 = const()[name = tensor<string, []>("transpose_29_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
175
+ tensor<fp16, [1, 8, 64, 1500]> transpose_49 = transpose(perm = transpose_29_perm_0, x = k_11_cast_fp16)[name = tensor<string, []>("transpose_49")];
176
+ tensor<fp16, [1, 8, 1500, 64]> transpose_50 = transpose(perm = transpose_28_perm_0, x = q_11_cast_fp16)[name = tensor<string, []>("transpose_50")];
177
+ tensor<fp16, [1, 8, 1500, 1500]> qk_5_cast_fp16 = matmul(transpose_x = qk_5_transpose_x_0, transpose_y = qk_5_transpose_y_0, x = transpose_50, y = transpose_49)[name = tensor<string, []>("qk_5_cast_fp16")];
178
+ tensor<fp16, [1, 8, 1500, 1500]> var_348_cast_fp16 = softmax(axis = var_284, x = qk_5_cast_fp16)[name = tensor<string, []>("op_348_cast_fp16")];
179
+ tensor<bool, []> var_350_transpose_x_0 = const()[name = tensor<string, []>("op_350_transpose_x_0"), val = tensor<bool, []>(false)];
180
+ tensor<bool, []> var_350_transpose_y_0 = const()[name = tensor<string, []>("op_350_transpose_y_0"), val = tensor<bool, []>(false)];
181
+ tensor<fp16, [1, 8, 1500, 64]> transpose_51 = transpose(perm = var_344, x = var_343_cast_fp16)[name = tensor<string, []>("transpose_51")];
182
+ tensor<fp16, [1, 8, 1500, 64]> var_350_cast_fp16 = matmul(transpose_x = var_350_transpose_x_0, transpose_y = var_350_transpose_y_0, x = var_348_cast_fp16, y = transpose_51)[name = tensor<string, []>("op_350_cast_fp16")];
183
+ tensor<int32, [4]> var_351 = const()[name = tensor<string, []>("op_351"), val = tensor<int32, [4]>([0, 2, 1, 3])];
184
+ tensor<int32, [3]> concat_2 = const()[name = tensor<string, []>("concat_2"), val = tensor<int32, [3]>([1, 1500, 512])];
185
+ tensor<fp16, [1, 1500, 8, 64]> transpose_48 = transpose(perm = var_351, x = var_350_cast_fp16)[name = tensor<string, []>("transpose_48")];
186
+ tensor<fp16, [1, 1500, 512]> x_35_cast_fp16 = reshape(shape = concat_2, x = transpose_48)[name = tensor<string, []>("x_35_cast_fp16")];
187
+ tensor<fp16, [512, 512]> var_356_to_fp16 = const()[name = tensor<string, []>("op_356_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17544960)))];
188
+ tensor<fp16, [512]> var_357_to_fp16 = const()[name = tensor<string, []>("op_357_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(18069312)))];
189
+ tensor<fp16, [1, 1500, 512]> linear_15_cast_fp16 = linear(bias = var_357_to_fp16, weight = var_356_to_fp16, x = x_35_cast_fp16)[name = tensor<string, []>("linear_15_cast_fp16")];
190
+ tensor<fp16, [1, 1500, 512]> x_37_cast_fp16 = add(x = x_31_cast_fp16, y = linear_15_cast_fp16)[name = tensor<string, []>("x_37_cast_fp16")];
191
+ tensor<int32, [1]> var_364_axes_0 = const()[name = tensor<string, []>("op_364_axes_0"), val = tensor<int32, [1]>([-1])];
192
+ tensor<fp16, [512]> blocks_2_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(18070400)))];
193
+ tensor<fp16, [512]> blocks_2_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(18071488)))];
194
+ tensor<fp16, [1, 1500, 512]> var_364_cast_fp16 = layer_norm(axes = var_364_axes_0, beta = blocks_2_mlp_ln_bias_to_fp16, epsilon = var_290_to_fp16, gamma = blocks_2_mlp_ln_weight_to_fp16, x = x_37_cast_fp16)[name = tensor<string, []>("op_364_cast_fp16")];
195
+ tensor<fp16, [2048, 512]> var_373_to_fp16 = const()[name = tensor<string, []>("op_373_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(18072576)))];
196
+ tensor<fp16, [2048]> var_374_to_fp16 = const()[name = tensor<string, []>("op_374_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(20169792)))];
197
+ tensor<fp16, [1, 1500, 2048]> linear_16_cast_fp16 = linear(bias = var_374_to_fp16, weight = var_373_to_fp16, x = var_364_cast_fp16)[name = tensor<string, []>("linear_16_cast_fp16")];
198
+ tensor<string, []> x_41_mode_0 = const()[name = tensor<string, []>("x_41_mode_0"), val = tensor<string, []>("EXACT")];
199
+ tensor<fp16, [1, 1500, 2048]> x_41_cast_fp16 = gelu(mode = x_41_mode_0, x = linear_16_cast_fp16)[name = tensor<string, []>("x_41_cast_fp16")];
200
+ tensor<fp16, [512, 2048]> var_379_to_fp16 = const()[name = tensor<string, []>("op_379_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(20173952)))];
201
+ tensor<fp16, [512]> var_380_to_fp16 = const()[name = tensor<string, []>("op_380_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22271168)))];
202
+ tensor<fp16, [1, 1500, 512]> linear_17_cast_fp16 = linear(bias = var_380_to_fp16, weight = var_379_to_fp16, x = x_41_cast_fp16)[name = tensor<string, []>("linear_17_cast_fp16")];
203
+ tensor<fp16, [1, 1500, 512]> x_43_cast_fp16 = add(x = x_37_cast_fp16, y = linear_17_cast_fp16)[name = tensor<string, []>("x_43_cast_fp16")];
204
+ tensor<int32, []> var_390 = const()[name = tensor<string, []>("op_390"), val = tensor<int32, []>(-1)];
205
+ tensor<int32, [1]> var_406_axes_0 = const()[name = tensor<string, []>("op_406_axes_0"), val = tensor<int32, [1]>([-1])];
206
+ tensor<fp16, [512]> blocks_3_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_3_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22272256)))];
207
+ tensor<fp16, [512]> blocks_3_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_3_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22273344)))];
208
+ tensor<fp16, []> var_396_to_fp16 = const()[name = tensor<string, []>("op_396_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
209
+ tensor<fp16, [1, 1500, 512]> var_406_cast_fp16 = layer_norm(axes = var_406_axes_0, beta = blocks_3_attn_ln_bias_to_fp16, epsilon = var_396_to_fp16, gamma = blocks_3_attn_ln_weight_to_fp16, x = x_43_cast_fp16)[name = tensor<string, []>("op_406_cast_fp16")];
210
+ tensor<fp16, [512, 512]> var_417_to_fp16 = const()[name = tensor<string, []>("op_417_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22274432)))];
211
+ tensor<fp16, [512]> var_418_to_fp16 = const()[name = tensor<string, []>("op_418_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22798784)))];
212
+ tensor<fp16, [1, 1500, 512]> linear_18_cast_fp16 = linear(bias = var_418_to_fp16, weight = var_417_to_fp16, x = var_406_cast_fp16)[name = tensor<string, []>("linear_18_cast_fp16")];
213
+ tensor<fp16, [512, 512]> var_421_to_fp16 = const()[name = tensor<string, []>("op_421_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22799872)))];
214
+ tensor<fp16, [1, 1500, 512]> linear_19_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_421_to_fp16, x = var_406_cast_fp16)[name = tensor<string, []>("linear_19_cast_fp16")];
215
+ tensor<fp16, [512, 512]> var_425_to_fp16 = const()[name = tensor<string, []>("op_425_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23324224)))];
216
+ tensor<fp16, [512]> var_426_to_fp16 = const()[name = tensor<string, []>("op_426_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23848576)))];
217
+ tensor<fp16, [1, 1500, 512]> linear_20_cast_fp16 = linear(bias = var_426_to_fp16, weight = var_425_to_fp16, x = var_406_cast_fp16)[name = tensor<string, []>("linear_20_cast_fp16")];
218
+ tensor<int32, [4]> var_434 = const()[name = tensor<string, []>("op_434"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
219
+ tensor<fp16, [1, 1500, 8, 64]> var_435_cast_fp16 = reshape(shape = var_434, x = linear_18_cast_fp16)[name = tensor<string, []>("op_435_cast_fp16")];
220
+ tensor<fp16, [1, 1, 1, 1]> const_48_to_fp16 = const()[name = tensor<string, []>("const_48_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
221
+ tensor<fp16, [1, 1500, 8, 64]> q_15_cast_fp16 = mul(x = var_435_cast_fp16, y = const_48_to_fp16)[name = tensor<string, []>("q_15_cast_fp16")];
222
+ tensor<int32, [4]> var_441 = const()[name = tensor<string, []>("op_441"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
223
+ tensor<fp16, [1, 1500, 8, 64]> var_442_cast_fp16 = reshape(shape = var_441, x = linear_19_cast_fp16)[name = tensor<string, []>("op_442_cast_fp16")];
224
+ tensor<fp16, [1, 1, 1, 1]> const_49_to_fp16 = const()[name = tensor<string, []>("const_49_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
225
+ tensor<fp16, [1, 1500, 8, 64]> k_15_cast_fp16 = mul(x = var_442_cast_fp16, y = const_49_to_fp16)[name = tensor<string, []>("k_15_cast_fp16")];
226
+ tensor<int32, [4]> var_448 = const()[name = tensor<string, []>("op_448"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
227
+ tensor<fp16, [1, 1500, 8, 64]> var_449_cast_fp16 = reshape(shape = var_448, x = linear_20_cast_fp16)[name = tensor<string, []>("op_449_cast_fp16")];
228
+ tensor<int32, [4]> var_450 = const()[name = tensor<string, []>("op_450"), val = tensor<int32, [4]>([0, 2, 1, 3])];
229
+ tensor<bool, []> qk_7_transpose_x_0 = const()[name = tensor<string, []>("qk_7_transpose_x_0"), val = tensor<bool, []>(false)];
230
+ tensor<bool, []> qk_7_transpose_y_0 = const()[name = tensor<string, []>("qk_7_transpose_y_0"), val = tensor<bool, []>(false)];
231
+ tensor<int32, [4]> transpose_30_perm_0 = const()[name = tensor<string, []>("transpose_30_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
232
+ tensor<int32, [4]> transpose_31_perm_0 = const()[name = tensor<string, []>("transpose_31_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
233
+ tensor<fp16, [1, 8, 64, 1500]> transpose_45 = transpose(perm = transpose_31_perm_0, x = k_15_cast_fp16)[name = tensor<string, []>("transpose_45")];
234
+ tensor<fp16, [1, 8, 1500, 64]> transpose_46 = transpose(perm = transpose_30_perm_0, x = q_15_cast_fp16)[name = tensor<string, []>("transpose_46")];
235
+ tensor<fp16, [1, 8, 1500, 1500]> qk_7_cast_fp16 = matmul(transpose_x = qk_7_transpose_x_0, transpose_y = qk_7_transpose_y_0, x = transpose_46, y = transpose_45)[name = tensor<string, []>("qk_7_cast_fp16")];
236
+ tensor<fp16, [1, 8, 1500, 1500]> var_454_cast_fp16 = softmax(axis = var_390, x = qk_7_cast_fp16)[name = tensor<string, []>("op_454_cast_fp16")];
237
+ tensor<bool, []> var_456_transpose_x_0 = const()[name = tensor<string, []>("op_456_transpose_x_0"), val = tensor<bool, []>(false)];
238
+ tensor<bool, []> var_456_transpose_y_0 = const()[name = tensor<string, []>("op_456_transpose_y_0"), val = tensor<bool, []>(false)];
239
+ tensor<fp16, [1, 8, 1500, 64]> transpose_47 = transpose(perm = var_450, x = var_449_cast_fp16)[name = tensor<string, []>("transpose_47")];
240
+ tensor<fp16, [1, 8, 1500, 64]> var_456_cast_fp16 = matmul(transpose_x = var_456_transpose_x_0, transpose_y = var_456_transpose_y_0, x = var_454_cast_fp16, y = transpose_47)[name = tensor<string, []>("op_456_cast_fp16")];
241
+ tensor<int32, [4]> var_457 = const()[name = tensor<string, []>("op_457"), val = tensor<int32, [4]>([0, 2, 1, 3])];
242
+ tensor<int32, [3]> concat_3 = const()[name = tensor<string, []>("concat_3"), val = tensor<int32, [3]>([1, 1500, 512])];
243
+ tensor<fp16, [1, 1500, 8, 64]> transpose_44 = transpose(perm = var_457, x = var_456_cast_fp16)[name = tensor<string, []>("transpose_44")];
244
+ tensor<fp16, [1, 1500, 512]> x_47_cast_fp16 = reshape(shape = concat_3, x = transpose_44)[name = tensor<string, []>("x_47_cast_fp16")];
245
+ tensor<fp16, [512, 512]> var_462_to_fp16 = const()[name = tensor<string, []>("op_462_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23849664)))];
246
+ tensor<fp16, [512]> var_463_to_fp16 = const()[name = tensor<string, []>("op_463_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(24374016)))];
247
+ tensor<fp16, [1, 1500, 512]> linear_21_cast_fp16 = linear(bias = var_463_to_fp16, weight = var_462_to_fp16, x = x_47_cast_fp16)[name = tensor<string, []>("linear_21_cast_fp16")];
248
+ tensor<fp16, [1, 1500, 512]> x_49_cast_fp16 = add(x = x_43_cast_fp16, y = linear_21_cast_fp16)[name = tensor<string, []>("x_49_cast_fp16")];
249
+ tensor<int32, [1]> var_470_axes_0 = const()[name = tensor<string, []>("op_470_axes_0"), val = tensor<int32, [1]>([-1])];
250
+ tensor<fp16, [512]> blocks_3_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_3_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(24375104)))];
251
+ tensor<fp16, [512]> blocks_3_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_3_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(24376192)))];
252
+ tensor<fp16, [1, 1500, 512]> var_470_cast_fp16 = layer_norm(axes = var_470_axes_0, beta = blocks_3_mlp_ln_bias_to_fp16, epsilon = var_396_to_fp16, gamma = blocks_3_mlp_ln_weight_to_fp16, x = x_49_cast_fp16)[name = tensor<string, []>("op_470_cast_fp16")];
253
+ tensor<fp16, [2048, 512]> var_479_to_fp16 = const()[name = tensor<string, []>("op_479_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(24377280)))];
254
+ tensor<fp16, [2048]> var_480_to_fp16 = const()[name = tensor<string, []>("op_480_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(26474496)))];
255
+ tensor<fp16, [1, 1500, 2048]> linear_22_cast_fp16 = linear(bias = var_480_to_fp16, weight = var_479_to_fp16, x = var_470_cast_fp16)[name = tensor<string, []>("linear_22_cast_fp16")];
256
+ tensor<string, []> x_53_mode_0 = const()[name = tensor<string, []>("x_53_mode_0"), val = tensor<string, []>("EXACT")];
257
+ tensor<fp16, [1, 1500, 2048]> x_53_cast_fp16 = gelu(mode = x_53_mode_0, x = linear_22_cast_fp16)[name = tensor<string, []>("x_53_cast_fp16")];
258
+ tensor<fp16, [512, 2048]> var_485_to_fp16 = const()[name = tensor<string, []>("op_485_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(26478656)))];
259
+ tensor<fp16, [512]> var_486_to_fp16 = const()[name = tensor<string, []>("op_486_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(28575872)))];
260
+ tensor<fp16, [1, 1500, 512]> linear_23_cast_fp16 = linear(bias = var_486_to_fp16, weight = var_485_to_fp16, x = x_53_cast_fp16)[name = tensor<string, []>("linear_23_cast_fp16")];
261
+ tensor<fp16, [1, 1500, 512]> x_55_cast_fp16 = add(x = x_49_cast_fp16, y = linear_23_cast_fp16)[name = tensor<string, []>("x_55_cast_fp16")];
262
+ tensor<int32, []> var_496 = const()[name = tensor<string, []>("op_496"), val = tensor<int32, []>(-1)];
263
+ tensor<int32, [1]> var_512_axes_0 = const()[name = tensor<string, []>("op_512_axes_0"), val = tensor<int32, [1]>([-1])];
264
+ tensor<fp16, [512]> blocks_4_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_4_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(28576960)))];
265
+ tensor<fp16, [512]> blocks_4_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_4_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(28578048)))];
266
+ tensor<fp16, []> var_502_to_fp16 = const()[name = tensor<string, []>("op_502_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
267
+ tensor<fp16, [1, 1500, 512]> var_512_cast_fp16 = layer_norm(axes = var_512_axes_0, beta = blocks_4_attn_ln_bias_to_fp16, epsilon = var_502_to_fp16, gamma = blocks_4_attn_ln_weight_to_fp16, x = x_55_cast_fp16)[name = tensor<string, []>("op_512_cast_fp16")];
268
+ tensor<fp16, [512, 512]> var_523_to_fp16 = const()[name = tensor<string, []>("op_523_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(28579136)))];
269
+ tensor<fp16, [512]> var_524_to_fp16 = const()[name = tensor<string, []>("op_524_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(29103488)))];
270
+ tensor<fp16, [1, 1500, 512]> linear_24_cast_fp16 = linear(bias = var_524_to_fp16, weight = var_523_to_fp16, x = var_512_cast_fp16)[name = tensor<string, []>("linear_24_cast_fp16")];
271
+ tensor<fp16, [512, 512]> var_527_to_fp16 = const()[name = tensor<string, []>("op_527_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(29104576)))];
272
+ tensor<fp16, [1, 1500, 512]> linear_25_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_527_to_fp16, x = var_512_cast_fp16)[name = tensor<string, []>("linear_25_cast_fp16")];
273
+ tensor<fp16, [512, 512]> var_531_to_fp16 = const()[name = tensor<string, []>("op_531_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(29628928)))];
274
+ tensor<fp16, [512]> var_532_to_fp16 = const()[name = tensor<string, []>("op_532_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30153280)))];
275
+ tensor<fp16, [1, 1500, 512]> linear_26_cast_fp16 = linear(bias = var_532_to_fp16, weight = var_531_to_fp16, x = var_512_cast_fp16)[name = tensor<string, []>("linear_26_cast_fp16")];
276
+ tensor<int32, [4]> var_540 = const()[name = tensor<string, []>("op_540"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
277
+ tensor<fp16, [1, 1500, 8, 64]> var_541_cast_fp16 = reshape(shape = var_540, x = linear_24_cast_fp16)[name = tensor<string, []>("op_541_cast_fp16")];
278
+ tensor<fp16, [1, 1, 1, 1]> const_50_to_fp16 = const()[name = tensor<string, []>("const_50_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
279
+ tensor<fp16, [1, 1500, 8, 64]> q_19_cast_fp16 = mul(x = var_541_cast_fp16, y = const_50_to_fp16)[name = tensor<string, []>("q_19_cast_fp16")];
280
+ tensor<int32, [4]> var_547 = const()[name = tensor<string, []>("op_547"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
281
+ tensor<fp16, [1, 1500, 8, 64]> var_548_cast_fp16 = reshape(shape = var_547, x = linear_25_cast_fp16)[name = tensor<string, []>("op_548_cast_fp16")];
282
+ tensor<fp16, [1, 1, 1, 1]> const_51_to_fp16 = const()[name = tensor<string, []>("const_51_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
283
+ tensor<fp16, [1, 1500, 8, 64]> k_19_cast_fp16 = mul(x = var_548_cast_fp16, y = const_51_to_fp16)[name = tensor<string, []>("k_19_cast_fp16")];
284
+ tensor<int32, [4]> var_554 = const()[name = tensor<string, []>("op_554"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
285
+ tensor<fp16, [1, 1500, 8, 64]> var_555_cast_fp16 = reshape(shape = var_554, x = linear_26_cast_fp16)[name = tensor<string, []>("op_555_cast_fp16")];
286
+ tensor<int32, [4]> var_556 = const()[name = tensor<string, []>("op_556"), val = tensor<int32, [4]>([0, 2, 1, 3])];
287
+ tensor<bool, []> qk_9_transpose_x_0 = const()[name = tensor<string, []>("qk_9_transpose_x_0"), val = tensor<bool, []>(false)];
288
+ tensor<bool, []> qk_9_transpose_y_0 = const()[name = tensor<string, []>("qk_9_transpose_y_0"), val = tensor<bool, []>(false)];
289
+ tensor<int32, [4]> transpose_32_perm_0 = const()[name = tensor<string, []>("transpose_32_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
290
+ tensor<int32, [4]> transpose_33_perm_0 = const()[name = tensor<string, []>("transpose_33_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
291
+ tensor<fp16, [1, 8, 64, 1500]> transpose_41 = transpose(perm = transpose_33_perm_0, x = k_19_cast_fp16)[name = tensor<string, []>("transpose_41")];
292
+ tensor<fp16, [1, 8, 1500, 64]> transpose_42 = transpose(perm = transpose_32_perm_0, x = q_19_cast_fp16)[name = tensor<string, []>("transpose_42")];
293
+ tensor<fp16, [1, 8, 1500, 1500]> qk_9_cast_fp16 = matmul(transpose_x = qk_9_transpose_x_0, transpose_y = qk_9_transpose_y_0, x = transpose_42, y = transpose_41)[name = tensor<string, []>("qk_9_cast_fp16")];
294
+ tensor<fp16, [1, 8, 1500, 1500]> var_560_cast_fp16 = softmax(axis = var_496, x = qk_9_cast_fp16)[name = tensor<string, []>("op_560_cast_fp16")];
295
+ tensor<bool, []> var_562_transpose_x_0 = const()[name = tensor<string, []>("op_562_transpose_x_0"), val = tensor<bool, []>(false)];
296
+ tensor<bool, []> var_562_transpose_y_0 = const()[name = tensor<string, []>("op_562_transpose_y_0"), val = tensor<bool, []>(false)];
297
+ tensor<fp16, [1, 8, 1500, 64]> transpose_43 = transpose(perm = var_556, x = var_555_cast_fp16)[name = tensor<string, []>("transpose_43")];
298
+ tensor<fp16, [1, 8, 1500, 64]> var_562_cast_fp16 = matmul(transpose_x = var_562_transpose_x_0, transpose_y = var_562_transpose_y_0, x = var_560_cast_fp16, y = transpose_43)[name = tensor<string, []>("op_562_cast_fp16")];
299
+ tensor<int32, [4]> var_563 = const()[name = tensor<string, []>("op_563"), val = tensor<int32, [4]>([0, 2, 1, 3])];
300
+ tensor<int32, [3]> concat_4 = const()[name = tensor<string, []>("concat_4"), val = tensor<int32, [3]>([1, 1500, 512])];
301
+ tensor<fp16, [1, 1500, 8, 64]> transpose_40 = transpose(perm = var_563, x = var_562_cast_fp16)[name = tensor<string, []>("transpose_40")];
302
+ tensor<fp16, [1, 1500, 512]> x_59_cast_fp16 = reshape(shape = concat_4, x = transpose_40)[name = tensor<string, []>("x_59_cast_fp16")];
303
+ tensor<fp16, [512, 512]> var_568_to_fp16 = const()[name = tensor<string, []>("op_568_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30154368)))];
304
+ tensor<fp16, [512]> var_569_to_fp16 = const()[name = tensor<string, []>("op_569_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30678720)))];
305
+ tensor<fp16, [1, 1500, 512]> linear_27_cast_fp16 = linear(bias = var_569_to_fp16, weight = var_568_to_fp16, x = x_59_cast_fp16)[name = tensor<string, []>("linear_27_cast_fp16")];
306
+ tensor<fp16, [1, 1500, 512]> x_61_cast_fp16 = add(x = x_55_cast_fp16, y = linear_27_cast_fp16)[name = tensor<string, []>("x_61_cast_fp16")];
307
+ tensor<int32, [1]> var_576_axes_0 = const()[name = tensor<string, []>("op_576_axes_0"), val = tensor<int32, [1]>([-1])];
308
+ tensor<fp16, [512]> blocks_4_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_4_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30679808)))];
309
+ tensor<fp16, [512]> blocks_4_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_4_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30680896)))];
310
+ tensor<fp16, [1, 1500, 512]> var_576_cast_fp16 = layer_norm(axes = var_576_axes_0, beta = blocks_4_mlp_ln_bias_to_fp16, epsilon = var_502_to_fp16, gamma = blocks_4_mlp_ln_weight_to_fp16, x = x_61_cast_fp16)[name = tensor<string, []>("op_576_cast_fp16")];
311
+ tensor<fp16, [2048, 512]> var_585_to_fp16 = const()[name = tensor<string, []>("op_585_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30681984)))];
312
+ tensor<fp16, [2048]> var_586_to_fp16 = const()[name = tensor<string, []>("op_586_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(32779200)))];
313
+ tensor<fp16, [1, 1500, 2048]> linear_28_cast_fp16 = linear(bias = var_586_to_fp16, weight = var_585_to_fp16, x = var_576_cast_fp16)[name = tensor<string, []>("linear_28_cast_fp16")];
314
+ tensor<string, []> x_65_mode_0 = const()[name = tensor<string, []>("x_65_mode_0"), val = tensor<string, []>("EXACT")];
315
+ tensor<fp16, [1, 1500, 2048]> x_65_cast_fp16 = gelu(mode = x_65_mode_0, x = linear_28_cast_fp16)[name = tensor<string, []>("x_65_cast_fp16")];
316
+ tensor<fp16, [512, 2048]> var_591_to_fp16 = const()[name = tensor<string, []>("op_591_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(32783360)))];
317
+ tensor<fp16, [512]> var_592_to_fp16 = const()[name = tensor<string, []>("op_592_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(34880576)))];
318
+ tensor<fp16, [1, 1500, 512]> linear_29_cast_fp16 = linear(bias = var_592_to_fp16, weight = var_591_to_fp16, x = x_65_cast_fp16)[name = tensor<string, []>("linear_29_cast_fp16")];
319
+ tensor<fp16, [1, 1500, 512]> x_67_cast_fp16 = add(x = x_61_cast_fp16, y = linear_29_cast_fp16)[name = tensor<string, []>("x_67_cast_fp16")];
320
+ tensor<int32, []> var_602 = const()[name = tensor<string, []>("op_602"), val = tensor<int32, []>(-1)];
321
+ tensor<int32, [1]> var_618_axes_0 = const()[name = tensor<string, []>("op_618_axes_0"), val = tensor<int32, [1]>([-1])];
322
+ tensor<fp16, [512]> blocks_5_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_5_attn_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(34881664)))];
323
+ tensor<fp16, [512]> blocks_5_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_5_attn_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(34882752)))];
324
+ tensor<fp16, []> var_608_to_fp16 = const()[name = tensor<string, []>("op_608_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
325
+ tensor<fp16, [1, 1500, 512]> var_618_cast_fp16 = layer_norm(axes = var_618_axes_0, beta = blocks_5_attn_ln_bias_to_fp16, epsilon = var_608_to_fp16, gamma = blocks_5_attn_ln_weight_to_fp16, x = x_67_cast_fp16)[name = tensor<string, []>("op_618_cast_fp16")];
326
+ tensor<fp16, [512, 512]> var_629_to_fp16 = const()[name = tensor<string, []>("op_629_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(34883840)))];
327
+ tensor<fp16, [512]> var_630_to_fp16 = const()[name = tensor<string, []>("op_630_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(35408192)))];
328
+ tensor<fp16, [1, 1500, 512]> linear_30_cast_fp16 = linear(bias = var_630_to_fp16, weight = var_629_to_fp16, x = var_618_cast_fp16)[name = tensor<string, []>("linear_30_cast_fp16")];
329
+ tensor<fp16, [512, 512]> var_633_to_fp16 = const()[name = tensor<string, []>("op_633_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(35409280)))];
330
+ tensor<fp16, [1, 1500, 512]> linear_31_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_633_to_fp16, x = var_618_cast_fp16)[name = tensor<string, []>("linear_31_cast_fp16")];
331
+ tensor<fp16, [512, 512]> var_637_to_fp16 = const()[name = tensor<string, []>("op_637_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(35933632)))];
332
+ tensor<fp16, [512]> var_638_to_fp16 = const()[name = tensor<string, []>("op_638_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36457984)))];
333
+ tensor<fp16, [1, 1500, 512]> linear_32_cast_fp16 = linear(bias = var_638_to_fp16, weight = var_637_to_fp16, x = var_618_cast_fp16)[name = tensor<string, []>("linear_32_cast_fp16")];
334
+ tensor<int32, [4]> var_646 = const()[name = tensor<string, []>("op_646"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
335
+ tensor<fp16, [1, 1500, 8, 64]> var_647_cast_fp16 = reshape(shape = var_646, x = linear_30_cast_fp16)[name = tensor<string, []>("op_647_cast_fp16")];
336
+ tensor<fp16, [1, 1, 1, 1]> const_52_to_fp16 = const()[name = tensor<string, []>("const_52_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
337
+ tensor<fp16, [1, 1500, 8, 64]> q_cast_fp16 = mul(x = var_647_cast_fp16, y = const_52_to_fp16)[name = tensor<string, []>("q_cast_fp16")];
338
+ tensor<int32, [4]> var_653 = const()[name = tensor<string, []>("op_653"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
339
+ tensor<fp16, [1, 1500, 8, 64]> var_654_cast_fp16 = reshape(shape = var_653, x = linear_31_cast_fp16)[name = tensor<string, []>("op_654_cast_fp16")];
340
+ tensor<fp16, [1, 1, 1, 1]> const_53_to_fp16 = const()[name = tensor<string, []>("const_53_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
341
+ tensor<fp16, [1, 1500, 8, 64]> k_cast_fp16 = mul(x = var_654_cast_fp16, y = const_53_to_fp16)[name = tensor<string, []>("k_cast_fp16")];
342
+ tensor<int32, [4]> var_660 = const()[name = tensor<string, []>("op_660"), val = tensor<int32, [4]>([1, 1500, 8, -1])];
343
+ tensor<fp16, [1, 1500, 8, 64]> var_661_cast_fp16 = reshape(shape = var_660, x = linear_32_cast_fp16)[name = tensor<string, []>("op_661_cast_fp16")];
344
+ tensor<int32, [4]> var_662 = const()[name = tensor<string, []>("op_662"), val = tensor<int32, [4]>([0, 2, 1, 3])];
345
+ tensor<bool, []> qk_transpose_x_0 = const()[name = tensor<string, []>("qk_transpose_x_0"), val = tensor<bool, []>(false)];
346
+ tensor<bool, []> qk_transpose_y_0 = const()[name = tensor<string, []>("qk_transpose_y_0"), val = tensor<bool, []>(false)];
347
+ tensor<int32, [4]> transpose_34_perm_0 = const()[name = tensor<string, []>("transpose_34_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
348
+ tensor<int32, [4]> transpose_35_perm_0 = const()[name = tensor<string, []>("transpose_35_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
349
+ tensor<fp16, [1, 8, 64, 1500]> transpose_37 = transpose(perm = transpose_35_perm_0, x = k_cast_fp16)[name = tensor<string, []>("transpose_37")];
350
+ tensor<fp16, [1, 8, 1500, 64]> transpose_38 = transpose(perm = transpose_34_perm_0, x = q_cast_fp16)[name = tensor<string, []>("transpose_38")];
351
+ tensor<fp16, [1, 8, 1500, 1500]> qk_cast_fp16 = matmul(transpose_x = qk_transpose_x_0, transpose_y = qk_transpose_y_0, x = transpose_38, y = transpose_37)[name = tensor<string, []>("qk_cast_fp16")];
352
+ tensor<fp16, [1, 8, 1500, 1500]> var_666_cast_fp16 = softmax(axis = var_602, x = qk_cast_fp16)[name = tensor<string, []>("op_666_cast_fp16")];
353
+ tensor<bool, []> var_668_transpose_x_0 = const()[name = tensor<string, []>("op_668_transpose_x_0"), val = tensor<bool, []>(false)];
354
+ tensor<bool, []> var_668_transpose_y_0 = const()[name = tensor<string, []>("op_668_transpose_y_0"), val = tensor<bool, []>(false)];
355
+ tensor<fp16, [1, 8, 1500, 64]> transpose_39 = transpose(perm = var_662, x = var_661_cast_fp16)[name = tensor<string, []>("transpose_39")];
356
+ tensor<fp16, [1, 8, 1500, 64]> var_668_cast_fp16 = matmul(transpose_x = var_668_transpose_x_0, transpose_y = var_668_transpose_y_0, x = var_666_cast_fp16, y = transpose_39)[name = tensor<string, []>("op_668_cast_fp16")];
357
+ tensor<int32, [4]> var_669 = const()[name = tensor<string, []>("op_669"), val = tensor<int32, [4]>([0, 2, 1, 3])];
358
+ tensor<int32, [3]> concat_5 = const()[name = tensor<string, []>("concat_5"), val = tensor<int32, [3]>([1, 1500, 512])];
359
+ tensor<fp16, [1, 1500, 8, 64]> transpose_36 = transpose(perm = var_669, x = var_668_cast_fp16)[name = tensor<string, []>("transpose_36")];
360
+ tensor<fp16, [1, 1500, 512]> x_71_cast_fp16 = reshape(shape = concat_5, x = transpose_36)[name = tensor<string, []>("x_71_cast_fp16")];
361
+ tensor<fp16, [512, 512]> var_674_to_fp16 = const()[name = tensor<string, []>("op_674_to_fp16"), val = tensor<fp16, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36459072)))];
362
+ tensor<fp16, [512]> var_675_to_fp16 = const()[name = tensor<string, []>("op_675_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36983424)))];
363
+ tensor<fp16, [1, 1500, 512]> linear_33_cast_fp16 = linear(bias = var_675_to_fp16, weight = var_674_to_fp16, x = x_71_cast_fp16)[name = tensor<string, []>("linear_33_cast_fp16")];
364
+ tensor<fp16, [1, 1500, 512]> x_73_cast_fp16 = add(x = x_67_cast_fp16, y = linear_33_cast_fp16)[name = tensor<string, []>("x_73_cast_fp16")];
365
+ tensor<int32, [1]> var_682_axes_0 = const()[name = tensor<string, []>("op_682_axes_0"), val = tensor<int32, [1]>([-1])];
366
+ tensor<fp16, [512]> blocks_5_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_5_mlp_ln_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36984512)))];
367
+ tensor<fp16, [512]> blocks_5_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_5_mlp_ln_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36985600)))];
368
+ tensor<fp16, [1, 1500, 512]> var_682_cast_fp16 = layer_norm(axes = var_682_axes_0, beta = blocks_5_mlp_ln_bias_to_fp16, epsilon = var_608_to_fp16, gamma = blocks_5_mlp_ln_weight_to_fp16, x = x_73_cast_fp16)[name = tensor<string, []>("op_682_cast_fp16")];
369
+ tensor<fp16, [2048, 512]> var_691_to_fp16 = const()[name = tensor<string, []>("op_691_to_fp16"), val = tensor<fp16, [2048, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36986688)))];
370
+ tensor<fp16, [2048]> var_692_to_fp16 = const()[name = tensor<string, []>("op_692_to_fp16"), val = tensor<fp16, [2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(39083904)))];
371
+ tensor<fp16, [1, 1500, 2048]> linear_34_cast_fp16 = linear(bias = var_692_to_fp16, weight = var_691_to_fp16, x = var_682_cast_fp16)[name = tensor<string, []>("linear_34_cast_fp16")];
372
+ tensor<string, []> x_77_mode_0 = const()[name = tensor<string, []>("x_77_mode_0"), val = tensor<string, []>("EXACT")];
373
+ tensor<fp16, [1, 1500, 2048]> x_77_cast_fp16 = gelu(mode = x_77_mode_0, x = linear_34_cast_fp16)[name = tensor<string, []>("x_77_cast_fp16")];
374
+ tensor<fp16, [512, 2048]> var_697_to_fp16 = const()[name = tensor<string, []>("op_697_to_fp16"), val = tensor<fp16, [512, 2048]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(39088064)))];
375
+ tensor<fp16, [512]> var_698_to_fp16 = const()[name = tensor<string, []>("op_698_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41185280)))];
376
+ tensor<fp16, [1, 1500, 512]> linear_35_cast_fp16 = linear(bias = var_698_to_fp16, weight = var_697_to_fp16, x = x_77_cast_fp16)[name = tensor<string, []>("linear_35_cast_fp16")];
377
+ tensor<fp16, [1, 1500, 512]> x_cast_fp16 = add(x = x_73_cast_fp16, y = linear_35_cast_fp16)[name = tensor<string, []>("x_cast_fp16")];
378
+ tensor<int32, [1]> var_711_axes_0 = const()[name = tensor<string, []>("op_711_axes_0"), val = tensor<int32, [1]>([-1])];
379
+ tensor<fp16, [512]> ln_post_weight_to_fp16 = const()[name = tensor<string, []>("ln_post_weight_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41186368)))];
380
+ tensor<fp16, [512]> ln_post_bias_to_fp16 = const()[name = tensor<string, []>("ln_post_bias_to_fp16"), val = tensor<fp16, [512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(41187456)))];
381
+ tensor<fp16, []> var_702_to_fp16 = const()[name = tensor<string, []>("op_702_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
382
+ tensor<fp16, [1, 1500, 512]> output = layer_norm(axes = var_711_axes_0, beta = ln_post_bias_to_fp16, epsilon = var_702_to_fp16, gamma = ln_post_weight_to_fp16, x = x_cast_fp16)[name = tensor<string, []>("op_711_cast_fp16")];
383
+ } -> (output);
384
+ }
ggml-base-encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b072fd4aebbd60ad4e35df05d3c2ea47267c00f25ac82f4dbcf49fb38e19faec
3
+ size 41188544
ggml-base.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84ceff52bae6817d2b3c3f5c96e6cfad66352047dd390b86b8aaf0e6f30dff9c
3
+ size 105151868
ggml-large-v3-encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19ae49b39ff80abb2db554928e7ab329e37a541c7e7175329ca8ae7281cd9295
3
+ size 243
ggml-large-v3-encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dbdcd1920ddc3acd4ea90acfe938c309d56666389952650f01091e5ca40408b
3
+ size 319
ggml-large-v3-encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1500 × 1280)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1500, 1280]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 8,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios17.layerNorm" : 65,
23
+ "Ios17.reshape" : 128,
24
+ "Ios17.conv" : 2,
25
+ "Ios17.linear" : 192,
26
+ "Ios17.add" : 65,
27
+ "Ios17.matmul" : 64,
28
+ "Ios16.gelu" : 34,
29
+ "Ios16.softmax" : 32,
30
+ "Ios17.mul" : 64,
31
+ "Ios17.transpose" : 129
32
+ },
33
+ "computePrecision" : "Mixed (Float16, Int32)",
34
+ "isUpdatable" : "0",
35
+ "availability" : {
36
+ "macOS" : "14.0",
37
+ "tvOS" : "17.0",
38
+ "visionOS" : "1.0",
39
+ "watchOS" : "10.0",
40
+ "iOS" : "17.0",
41
+ "macCatalyst" : "17.0"
42
+ },
43
+ "modelType" : {
44
+ "name" : "MLModelType_mlProgram"
45
+ },
46
+ "userDefinedMetadata" : {
47
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
48
+ "com.github.apple.coremltools.source" : "torch==2.2.2",
49
+ "com.github.apple.coremltools.version" : "7.2"
50
+ },
51
+ "inputSchema" : [
52
+ {
53
+ "hasShapeFlexibility" : "0",
54
+ "isOptional" : "0",
55
+ "dataType" : "Float16",
56
+ "formattedType" : "MultiArray (Float16 1 × 128 × 3000)",
57
+ "shortDescription" : "",
58
+ "shape" : "[1, 128, 3000]",
59
+ "name" : "logmel_data",
60
+ "type" : "MultiArray"
61
+ }
62
+ ],
63
+ "generatedClassName" : "ggml_large_v3_encoder",
64
+ "method" : "predict"
65
+ }
66
+ ]
ggml-large-v3-encoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
ggml-large-v3-encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c95df3d5d14c4b12420e5a28984cc0bcaad221e7d8915993869853687a4a3dd0
3
+ size 1057618496
ggml-large-v3-q8_0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2fce6ba54dc05c0c9ff297184f0d45cc6dc8099fc57a3e3f76cf4cc4f70aa94
3
+ size 967567427
ggml-medium-encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:975604071b5e16602d99864f0c9bd082c0114ced9fea1cce6367ee7a33e99591
3
+ size 243
ggml-medium-encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e781c7fcb346060d55fa9f2066f27fdc0e58b6018c50400488144d06e6dd7109
3
+ size 318
ggml-medium-encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1500 × 1024)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1500, 1024]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 8,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios17.layerNorm" : 49,
23
+ "Ios17.reshape" : 96,
24
+ "Ios17.conv" : 2,
25
+ "Ios17.linear" : 144,
26
+ "Ios17.add" : 49,
27
+ "Ios17.matmul" : 48,
28
+ "Ios16.gelu" : 26,
29
+ "Ios16.softmax" : 24,
30
+ "Ios17.mul" : 48,
31
+ "Ios17.transpose" : 97
32
+ },
33
+ "computePrecision" : "Mixed (Float16, Int32)",
34
+ "isUpdatable" : "0",
35
+ "availability" : {
36
+ "macOS" : "14.0",
37
+ "tvOS" : "17.0",
38
+ "visionOS" : "1.0",
39
+ "watchOS" : "10.0",
40
+ "iOS" : "17.0",
41
+ "macCatalyst" : "17.0"
42
+ },
43
+ "modelType" : {
44
+ "name" : "MLModelType_mlProgram"
45
+ },
46
+ "userDefinedMetadata" : {
47
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
48
+ "com.github.apple.coremltools.source" : "torch==2.2.2",
49
+ "com.github.apple.coremltools.version" : "7.2"
50
+ },
51
+ "inputSchema" : [
52
+ {
53
+ "hasShapeFlexibility" : "0",
54
+ "isOptional" : "0",
55
+ "dataType" : "Float16",
56
+ "formattedType" : "MultiArray (Float16 1 × 80 × 3000)",
57
+ "shortDescription" : "",
58
+ "shape" : "[1, 80, 3000]",
59
+ "name" : "logmel_data",
60
+ "type" : "MultiArray"
61
+ }
62
+ ],
63
+ "generatedClassName" : "ggml_medium_encoder",
64
+ "method" : "predict"
65
+ }
66
+ ]
ggml-medium-encoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
ggml-medium-encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbbdb8a10cee8abc0d0c0d86af33ea4b1d8453380df103fd49272a28143a892b
3
+ size 614458432
ggml-medium.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:076f80d119389b32fb3ff7ee78330418de19970090183807dfa3da56a4f69fda
3
+ size 915642516
ggml-small-encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e33d81d8cf4a206337049c6f0379ed5370da2b0fc3d822a6065b2c021664a46
3
+ size 243
ggml-small-encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6643c80fd2477e972234f11c3d21cc415963809f6bc560980f83bda4c62f31b
3
+ size 318
ggml-small-encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1500 × 768)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1500, 768]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 8,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios17.layerNorm" : 25,
23
+ "Ios17.reshape" : 48,
24
+ "Ios17.conv" : 2,
25
+ "Ios17.linear" : 72,
26
+ "Ios17.add" : 25,
27
+ "Ios17.matmul" : 24,
28
+ "Ios16.gelu" : 14,
29
+ "Ios16.softmax" : 12,
30
+ "Ios17.mul" : 24,
31
+ "Ios17.transpose" : 49
32
+ },
33
+ "computePrecision" : "Mixed (Float16, Int32)",
34
+ "isUpdatable" : "0",
35
+ "availability" : {
36
+ "macOS" : "14.0",
37
+ "tvOS" : "17.0",
38
+ "visionOS" : "1.0",
39
+ "watchOS" : "10.0",
40
+ "iOS" : "17.0",
41
+ "macCatalyst" : "17.0"
42
+ },
43
+ "modelType" : {
44
+ "name" : "MLModelType_mlProgram"
45
+ },
46
+ "userDefinedMetadata" : {
47
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
48
+ "com.github.apple.coremltools.source" : "torch==2.2.2",
49
+ "com.github.apple.coremltools.version" : "7.2"
50
+ },
51
+ "inputSchema" : [
52
+ {
53
+ "hasShapeFlexibility" : "0",
54
+ "isOptional" : "0",
55
+ "dataType" : "Float16",
56
+ "formattedType" : "MultiArray (Float16 1 × 80 × 3000)",
57
+ "shortDescription" : "",
58
+ "shape" : "[1, 80, 3000]",
59
+ "name" : "logmel_data",
60
+ "type" : "MultiArray"
61
+ }
62
+ ],
63
+ "generatedClassName" : "ggml_small_encoder",
64
+ "method" : "predict"
65
+ }
66
+ ]
ggml-small-encoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
ggml-small-encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2362f377002638f1f3ec3a413d80c9d82410b537d776a1a75d6571c8e40ace6
3
+ size 176321856
ggml-small.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42ea1b1ba6ffa8cde9da77664db1ac5e24378f9d3d0363ca0104deebedac7732
3
+ size 308753476
ggml-tiny-encoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e6edb6be522938b84fd688c4aef4180f652783aaa157e274bd1ba221c2f9322
3
+ size 243
ggml-tiny-encoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a2e684990429a39584b0adf645ea565057cb6bdcfd404f2dba45a1031e4faaa
3
+ size 318
ggml-tiny-encoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1500 × 384)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1500, 384]",
13
+ "name" : "output",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 8,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios17.layerNorm" : 9,
23
+ "Ios17.reshape" : 16,
24
+ "Ios17.conv" : 2,
25
+ "Ios17.linear" : 24,
26
+ "Ios17.add" : 9,
27
+ "Ios17.matmul" : 8,
28
+ "Ios16.gelu" : 6,
29
+ "Ios16.softmax" : 4,
30
+ "Ios17.mul" : 8,
31
+ "Ios17.transpose" : 17
32
+ },
33
+ "computePrecision" : "Mixed (Float16, Int32)",
34
+ "isUpdatable" : "0",
35
+ "availability" : {
36
+ "macOS" : "14.0",
37
+ "tvOS" : "17.0",
38
+ "visionOS" : "1.0",
39
+ "watchOS" : "10.0",
40
+ "iOS" : "17.0",
41
+ "macCatalyst" : "17.0"
42
+ },
43
+ "modelType" : {
44
+ "name" : "MLModelType_mlProgram"
45
+ },
46
+ "userDefinedMetadata" : {
47
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
48
+ "com.github.apple.coremltools.source" : "torch==2.2.2",
49
+ "com.github.apple.coremltools.version" : "7.2"
50
+ },
51
+ "inputSchema" : [
52
+ {
53
+ "hasShapeFlexibility" : "0",
54
+ "isOptional" : "0",
55
+ "dataType" : "Float16",
56
+ "formattedType" : "MultiArray (Float16 1 × 80 × 3000)",
57
+ "shortDescription" : "",
58
+ "shape" : "[1, 80, 3000]",
59
+ "name" : "logmel_data",
60
+ "type" : "MultiArray"
61
+ }
62
+ ],
63
+ "generatedClassName" : "ggml_tiny_encoder",
64
+ "method" : "predict"
65
+ }
66
+ ]
ggml-tiny-encoder.mlmodelc/model.mil ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.2.2"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "7.2"}})]
3
+ {
4
+ func main<ios17>(tensor<fp16, [1, 80, 3000]> logmel_data) {
5
+ tensor<int32, []> var_16 = const()[name = tensor<string, []>("op_16"), val = tensor<int32, []>(1)];
6
+ tensor<int32, [1]> var_24 = const()[name = tensor<string, []>("op_24"), val = tensor<int32, [1]>([1])];
7
+ tensor<int32, [1]> var_26 = const()[name = tensor<string, []>("op_26"), val = tensor<int32, [1]>([1])];
8
+ tensor<string, []> var_28_pad_type_0 = const()[name = tensor<string, []>("op_28_pad_type_0"), val = tensor<string, []>("custom")];
9
+ tensor<int32, [2]> var_28_pad_0 = const()[name = tensor<string, []>("op_28_pad_0"), val = tensor<int32, [2]>([1, 1])];
10
+ tensor<fp16, [384, 80, 3]> weight_3_to_fp16 = const()[name = tensor<string, []>("weight_3_to_fp16"), val = tensor<fp16, [384, 80, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
11
+ tensor<fp16, [384]> bias_3_to_fp16 = const()[name = tensor<string, []>("bias_3_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(184448)))];
12
+ tensor<fp16, [1, 384, 3000]> var_28_cast_fp16 = conv(bias = bias_3_to_fp16, dilations = var_26, groups = var_16, pad = var_28_pad_0, pad_type = var_28_pad_type_0, strides = var_24, weight = weight_3_to_fp16, x = logmel_data)[name = tensor<string, []>("op_28_cast_fp16")];
13
+ tensor<string, []> input_1_mode_0 = const()[name = tensor<string, []>("input_1_mode_0"), val = tensor<string, []>("EXACT")];
14
+ tensor<fp16, [1, 384, 3000]> input_1_cast_fp16 = gelu(mode = input_1_mode_0, x = var_28_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
15
+ tensor<int32, []> var_33 = const()[name = tensor<string, []>("op_33"), val = tensor<int32, []>(1)];
16
+ tensor<int32, [1]> var_42 = const()[name = tensor<string, []>("op_42"), val = tensor<int32, [1]>([2])];
17
+ tensor<int32, [1]> var_44 = const()[name = tensor<string, []>("op_44"), val = tensor<int32, [1]>([1])];
18
+ tensor<string, []> var_46_pad_type_0 = const()[name = tensor<string, []>("op_46_pad_type_0"), val = tensor<string, []>("custom")];
19
+ tensor<int32, [2]> var_46_pad_0 = const()[name = tensor<string, []>("op_46_pad_0"), val = tensor<int32, [2]>([1, 1])];
20
+ tensor<fp16, [384, 384, 3]> weight_7_to_fp16 = const()[name = tensor<string, []>("weight_7_to_fp16"), val = tensor<fp16, [384, 384, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(185280)))];
21
+ tensor<fp16, [384]> bias_7_to_fp16 = const()[name = tensor<string, []>("bias_7_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1070080)))];
22
+ tensor<fp16, [1, 384, 1500]> var_46_cast_fp16 = conv(bias = bias_7_to_fp16, dilations = var_44, groups = var_33, pad = var_46_pad_0, pad_type = var_46_pad_type_0, strides = var_42, weight = weight_7_to_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("op_46_cast_fp16")];
23
+ tensor<string, []> x_3_mode_0 = const()[name = tensor<string, []>("x_3_mode_0"), val = tensor<string, []>("EXACT")];
24
+ tensor<fp16, [1, 384, 1500]> x_3_cast_fp16 = gelu(mode = x_3_mode_0, x = var_46_cast_fp16)[name = tensor<string, []>("x_3_cast_fp16")];
25
+ tensor<int32, [3]> var_52 = const()[name = tensor<string, []>("op_52"), val = tensor<int32, [3]>([0, 2, 1])];
26
+ tensor<fp16, [1500, 384]> positional_embedding_to_fp16 = const()[name = tensor<string, []>("positional_embedding_to_fp16"), val = tensor<fp16, [1500, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1070912)))];
27
+ tensor<fp16, [1, 1500, 384]> transpose_40 = transpose(perm = var_52, x = x_3_cast_fp16)[name = tensor<string, []>("transpose_40")];
28
+ tensor<fp16, [1, 1500, 384]> var_55_cast_fp16 = add(x = transpose_40, y = positional_embedding_to_fp16)[name = tensor<string, []>("op_55_cast_fp16")];
29
+ tensor<int32, []> var_67 = const()[name = tensor<string, []>("op_67"), val = tensor<int32, []>(-1)];
30
+ tensor<int32, [1]> var_83_axes_0 = const()[name = tensor<string, []>("op_83_axes_0"), val = tensor<int32, [1]>([-1])];
31
+ tensor<fp16, [384]> blocks_0_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_ln_weight_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2222976)))];
32
+ tensor<fp16, [384]> blocks_0_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_0_attn_ln_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2223808)))];
33
+ tensor<fp16, []> var_73_to_fp16 = const()[name = tensor<string, []>("op_73_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
34
+ tensor<fp16, [1, 1500, 384]> var_83_cast_fp16 = layer_norm(axes = var_83_axes_0, beta = blocks_0_attn_ln_bias_to_fp16, epsilon = var_73_to_fp16, gamma = blocks_0_attn_ln_weight_to_fp16, x = var_55_cast_fp16)[name = tensor<string, []>("op_83_cast_fp16")];
35
+ tensor<fp16, [384, 384]> var_94_to_fp16 = const()[name = tensor<string, []>("op_94_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2224640)))];
36
+ tensor<fp16, [384]> var_95_to_fp16 = const()[name = tensor<string, []>("op_95_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2519616)))];
37
+ tensor<fp16, [1, 1500, 384]> linear_0_cast_fp16 = linear(bias = var_95_to_fp16, weight = var_94_to_fp16, x = var_83_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
38
+ tensor<fp16, [384, 384]> var_98_to_fp16 = const()[name = tensor<string, []>("op_98_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2520448)))];
39
+ tensor<fp16, [384]> linear_1_bias_0_to_fp16 = const()[name = tensor<string, []>("linear_1_bias_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2815424)))];
40
+ tensor<fp16, [1, 1500, 384]> linear_1_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_98_to_fp16, x = var_83_cast_fp16)[name = tensor<string, []>("linear_1_cast_fp16")];
41
+ tensor<fp16, [384, 384]> var_102_to_fp16 = const()[name = tensor<string, []>("op_102_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2816256)))];
42
+ tensor<fp16, [384]> var_103_to_fp16 = const()[name = tensor<string, []>("op_103_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3111232)))];
43
+ tensor<fp16, [1, 1500, 384]> linear_2_cast_fp16 = linear(bias = var_103_to_fp16, weight = var_102_to_fp16, x = var_83_cast_fp16)[name = tensor<string, []>("linear_2_cast_fp16")];
44
+ tensor<int32, [4]> var_111 = const()[name = tensor<string, []>("op_111"), val = tensor<int32, [4]>([1, 1500, 6, -1])];
45
+ tensor<fp16, [1, 1500, 6, 64]> var_112_cast_fp16 = reshape(shape = var_111, x = linear_0_cast_fp16)[name = tensor<string, []>("op_112_cast_fp16")];
46
+ tensor<fp16, [1, 1, 1, 1]> const_28_to_fp16 = const()[name = tensor<string, []>("const_28_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
47
+ tensor<fp16, [1, 1500, 6, 64]> q_3_cast_fp16 = mul(x = var_112_cast_fp16, y = const_28_to_fp16)[name = tensor<string, []>("q_3_cast_fp16")];
48
+ tensor<int32, [4]> var_118 = const()[name = tensor<string, []>("op_118"), val = tensor<int32, [4]>([1, 1500, 6, -1])];
49
+ tensor<fp16, [1, 1500, 6, 64]> var_119_cast_fp16 = reshape(shape = var_118, x = linear_1_cast_fp16)[name = tensor<string, []>("op_119_cast_fp16")];
50
+ tensor<fp16, [1, 1, 1, 1]> const_29_to_fp16 = const()[name = tensor<string, []>("const_29_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
51
+ tensor<fp16, [1, 1500, 6, 64]> k_3_cast_fp16 = mul(x = var_119_cast_fp16, y = const_29_to_fp16)[name = tensor<string, []>("k_3_cast_fp16")];
52
+ tensor<int32, [4]> var_125 = const()[name = tensor<string, []>("op_125"), val = tensor<int32, [4]>([1, 1500, 6, -1])];
53
+ tensor<fp16, [1, 1500, 6, 64]> var_126_cast_fp16 = reshape(shape = var_125, x = linear_2_cast_fp16)[name = tensor<string, []>("op_126_cast_fp16")];
54
+ tensor<int32, [4]> var_127 = const()[name = tensor<string, []>("op_127"), val = tensor<int32, [4]>([0, 2, 1, 3])];
55
+ tensor<bool, []> qk_1_transpose_x_0 = const()[name = tensor<string, []>("qk_1_transpose_x_0"), val = tensor<bool, []>(false)];
56
+ tensor<bool, []> qk_1_transpose_y_0 = const()[name = tensor<string, []>("qk_1_transpose_y_0"), val = tensor<bool, []>(false)];
57
+ tensor<int32, [4]> transpose_16_perm_0 = const()[name = tensor<string, []>("transpose_16_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
58
+ tensor<int32, [4]> transpose_17_perm_0 = const()[name = tensor<string, []>("transpose_17_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
59
+ tensor<fp16, [1, 6, 64, 1500]> transpose_37 = transpose(perm = transpose_17_perm_0, x = k_3_cast_fp16)[name = tensor<string, []>("transpose_37")];
60
+ tensor<fp16, [1, 6, 1500, 64]> transpose_38 = transpose(perm = transpose_16_perm_0, x = q_3_cast_fp16)[name = tensor<string, []>("transpose_38")];
61
+ tensor<fp16, [1, 6, 1500, 1500]> qk_1_cast_fp16 = matmul(transpose_x = qk_1_transpose_x_0, transpose_y = qk_1_transpose_y_0, x = transpose_38, y = transpose_37)[name = tensor<string, []>("qk_1_cast_fp16")];
62
+ tensor<fp16, [1, 6, 1500, 1500]> var_131_cast_fp16 = softmax(axis = var_67, x = qk_1_cast_fp16)[name = tensor<string, []>("op_131_cast_fp16")];
63
+ tensor<bool, []> var_133_transpose_x_0 = const()[name = tensor<string, []>("op_133_transpose_x_0"), val = tensor<bool, []>(false)];
64
+ tensor<bool, []> var_133_transpose_y_0 = const()[name = tensor<string, []>("op_133_transpose_y_0"), val = tensor<bool, []>(false)];
65
+ tensor<fp16, [1, 6, 1500, 64]> transpose_39 = transpose(perm = var_127, x = var_126_cast_fp16)[name = tensor<string, []>("transpose_39")];
66
+ tensor<fp16, [1, 6, 1500, 64]> var_133_cast_fp16 = matmul(transpose_x = var_133_transpose_x_0, transpose_y = var_133_transpose_y_0, x = var_131_cast_fp16, y = transpose_39)[name = tensor<string, []>("op_133_cast_fp16")];
67
+ tensor<int32, [4]> var_134 = const()[name = tensor<string, []>("op_134"), val = tensor<int32, [4]>([0, 2, 1, 3])];
68
+ tensor<int32, [3]> concat_0 = const()[name = tensor<string, []>("concat_0"), val = tensor<int32, [3]>([1, 1500, 384])];
69
+ tensor<fp16, [1, 1500, 6, 64]> transpose_36 = transpose(perm = var_134, x = var_133_cast_fp16)[name = tensor<string, []>("transpose_36")];
70
+ tensor<fp16, [1, 1500, 384]> x_11_cast_fp16 = reshape(shape = concat_0, x = transpose_36)[name = tensor<string, []>("x_11_cast_fp16")];
71
+ tensor<fp16, [384, 384]> var_139_to_fp16 = const()[name = tensor<string, []>("op_139_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3112064)))];
72
+ tensor<fp16, [384]> var_140_to_fp16 = const()[name = tensor<string, []>("op_140_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3407040)))];
73
+ tensor<fp16, [1, 1500, 384]> linear_3_cast_fp16 = linear(bias = var_140_to_fp16, weight = var_139_to_fp16, x = x_11_cast_fp16)[name = tensor<string, []>("linear_3_cast_fp16")];
74
+ tensor<fp16, [1, 1500, 384]> x_13_cast_fp16 = add(x = var_55_cast_fp16, y = linear_3_cast_fp16)[name = tensor<string, []>("x_13_cast_fp16")];
75
+ tensor<int32, [1]> var_147_axes_0 = const()[name = tensor<string, []>("op_147_axes_0"), val = tensor<int32, [1]>([-1])];
76
+ tensor<fp16, [384]> blocks_0_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_ln_weight_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3407872)))];
77
+ tensor<fp16, [384]> blocks_0_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_0_mlp_ln_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3408704)))];
78
+ tensor<fp16, [1, 1500, 384]> var_147_cast_fp16 = layer_norm(axes = var_147_axes_0, beta = blocks_0_mlp_ln_bias_to_fp16, epsilon = var_73_to_fp16, gamma = blocks_0_mlp_ln_weight_to_fp16, x = x_13_cast_fp16)[name = tensor<string, []>("op_147_cast_fp16")];
79
+ tensor<fp16, [1536, 384]> var_156_to_fp16 = const()[name = tensor<string, []>("op_156_to_fp16"), val = tensor<fp16, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3409536)))];
80
+ tensor<fp16, [1536]> var_157_to_fp16 = const()[name = tensor<string, []>("op_157_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4589248)))];
81
+ tensor<fp16, [1, 1500, 1536]> linear_4_cast_fp16 = linear(bias = var_157_to_fp16, weight = var_156_to_fp16, x = var_147_cast_fp16)[name = tensor<string, []>("linear_4_cast_fp16")];
82
+ tensor<string, []> x_17_mode_0 = const()[name = tensor<string, []>("x_17_mode_0"), val = tensor<string, []>("EXACT")];
83
+ tensor<fp16, [1, 1500, 1536]> x_17_cast_fp16 = gelu(mode = x_17_mode_0, x = linear_4_cast_fp16)[name = tensor<string, []>("x_17_cast_fp16")];
84
+ tensor<fp16, [384, 1536]> var_162_to_fp16 = const()[name = tensor<string, []>("op_162_to_fp16"), val = tensor<fp16, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4592384)))];
85
+ tensor<fp16, [384]> var_163_to_fp16 = const()[name = tensor<string, []>("op_163_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5772096)))];
86
+ tensor<fp16, [1, 1500, 384]> linear_5_cast_fp16 = linear(bias = var_163_to_fp16, weight = var_162_to_fp16, x = x_17_cast_fp16)[name = tensor<string, []>("linear_5_cast_fp16")];
87
+ tensor<fp16, [1, 1500, 384]> x_19_cast_fp16 = add(x = x_13_cast_fp16, y = linear_5_cast_fp16)[name = tensor<string, []>("x_19_cast_fp16")];
88
+ tensor<int32, []> var_172 = const()[name = tensor<string, []>("op_172"), val = tensor<int32, []>(-1)];
89
+ tensor<int32, [1]> var_188_axes_0 = const()[name = tensor<string, []>("op_188_axes_0"), val = tensor<int32, [1]>([-1])];
90
+ tensor<fp16, [384]> blocks_1_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_ln_weight_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5772928)))];
91
+ tensor<fp16, [384]> blocks_1_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_1_attn_ln_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5773760)))];
92
+ tensor<fp16, []> var_178_to_fp16 = const()[name = tensor<string, []>("op_178_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
93
+ tensor<fp16, [1, 1500, 384]> var_188_cast_fp16 = layer_norm(axes = var_188_axes_0, beta = blocks_1_attn_ln_bias_to_fp16, epsilon = var_178_to_fp16, gamma = blocks_1_attn_ln_weight_to_fp16, x = x_19_cast_fp16)[name = tensor<string, []>("op_188_cast_fp16")];
94
+ tensor<fp16, [384, 384]> var_199_to_fp16 = const()[name = tensor<string, []>("op_199_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5774592)))];
95
+ tensor<fp16, [384]> var_200_to_fp16 = const()[name = tensor<string, []>("op_200_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6069568)))];
96
+ tensor<fp16, [1, 1500, 384]> linear_6_cast_fp16 = linear(bias = var_200_to_fp16, weight = var_199_to_fp16, x = var_188_cast_fp16)[name = tensor<string, []>("linear_6_cast_fp16")];
97
+ tensor<fp16, [384, 384]> var_203_to_fp16 = const()[name = tensor<string, []>("op_203_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6070400)))];
98
+ tensor<fp16, [1, 1500, 384]> linear_7_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_203_to_fp16, x = var_188_cast_fp16)[name = tensor<string, []>("linear_7_cast_fp16")];
99
+ tensor<fp16, [384, 384]> var_207_to_fp16 = const()[name = tensor<string, []>("op_207_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6365376)))];
100
+ tensor<fp16, [384]> var_208_to_fp16 = const()[name = tensor<string, []>("op_208_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6660352)))];
101
+ tensor<fp16, [1, 1500, 384]> linear_8_cast_fp16 = linear(bias = var_208_to_fp16, weight = var_207_to_fp16, x = var_188_cast_fp16)[name = tensor<string, []>("linear_8_cast_fp16")];
102
+ tensor<int32, [4]> var_216 = const()[name = tensor<string, []>("op_216"), val = tensor<int32, [4]>([1, 1500, 6, -1])];
103
+ tensor<fp16, [1, 1500, 6, 64]> var_217_cast_fp16 = reshape(shape = var_216, x = linear_6_cast_fp16)[name = tensor<string, []>("op_217_cast_fp16")];
104
+ tensor<fp16, [1, 1, 1, 1]> const_30_to_fp16 = const()[name = tensor<string, []>("const_30_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
105
+ tensor<fp16, [1, 1500, 6, 64]> q_7_cast_fp16 = mul(x = var_217_cast_fp16, y = const_30_to_fp16)[name = tensor<string, []>("q_7_cast_fp16")];
106
+ tensor<int32, [4]> var_223 = const()[name = tensor<string, []>("op_223"), val = tensor<int32, [4]>([1, 1500, 6, -1])];
107
+ tensor<fp16, [1, 1500, 6, 64]> var_224_cast_fp16 = reshape(shape = var_223, x = linear_7_cast_fp16)[name = tensor<string, []>("op_224_cast_fp16")];
108
+ tensor<fp16, [1, 1, 1, 1]> const_31_to_fp16 = const()[name = tensor<string, []>("const_31_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
109
+ tensor<fp16, [1, 1500, 6, 64]> k_7_cast_fp16 = mul(x = var_224_cast_fp16, y = const_31_to_fp16)[name = tensor<string, []>("k_7_cast_fp16")];
110
+ tensor<int32, [4]> var_230 = const()[name = tensor<string, []>("op_230"), val = tensor<int32, [4]>([1, 1500, 6, -1])];
111
+ tensor<fp16, [1, 1500, 6, 64]> var_231_cast_fp16 = reshape(shape = var_230, x = linear_8_cast_fp16)[name = tensor<string, []>("op_231_cast_fp16")];
112
+ tensor<int32, [4]> var_232 = const()[name = tensor<string, []>("op_232"), val = tensor<int32, [4]>([0, 2, 1, 3])];
113
+ tensor<bool, []> qk_3_transpose_x_0 = const()[name = tensor<string, []>("qk_3_transpose_x_0"), val = tensor<bool, []>(false)];
114
+ tensor<bool, []> qk_3_transpose_y_0 = const()[name = tensor<string, []>("qk_3_transpose_y_0"), val = tensor<bool, []>(false)];
115
+ tensor<int32, [4]> transpose_18_perm_0 = const()[name = tensor<string, []>("transpose_18_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
116
+ tensor<int32, [4]> transpose_19_perm_0 = const()[name = tensor<string, []>("transpose_19_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
117
+ tensor<fp16, [1, 6, 64, 1500]> transpose_33 = transpose(perm = transpose_19_perm_0, x = k_7_cast_fp16)[name = tensor<string, []>("transpose_33")];
118
+ tensor<fp16, [1, 6, 1500, 64]> transpose_34 = transpose(perm = transpose_18_perm_0, x = q_7_cast_fp16)[name = tensor<string, []>("transpose_34")];
119
+ tensor<fp16, [1, 6, 1500, 1500]> qk_3_cast_fp16 = matmul(transpose_x = qk_3_transpose_x_0, transpose_y = qk_3_transpose_y_0, x = transpose_34, y = transpose_33)[name = tensor<string, []>("qk_3_cast_fp16")];
120
+ tensor<fp16, [1, 6, 1500, 1500]> var_236_cast_fp16 = softmax(axis = var_172, x = qk_3_cast_fp16)[name = tensor<string, []>("op_236_cast_fp16")];
121
+ tensor<bool, []> var_238_transpose_x_0 = const()[name = tensor<string, []>("op_238_transpose_x_0"), val = tensor<bool, []>(false)];
122
+ tensor<bool, []> var_238_transpose_y_0 = const()[name = tensor<string, []>("op_238_transpose_y_0"), val = tensor<bool, []>(false)];
123
+ tensor<fp16, [1, 6, 1500, 64]> transpose_35 = transpose(perm = var_232, x = var_231_cast_fp16)[name = tensor<string, []>("transpose_35")];
124
+ tensor<fp16, [1, 6, 1500, 64]> var_238_cast_fp16 = matmul(transpose_x = var_238_transpose_x_0, transpose_y = var_238_transpose_y_0, x = var_236_cast_fp16, y = transpose_35)[name = tensor<string, []>("op_238_cast_fp16")];
125
+ tensor<int32, [4]> var_239 = const()[name = tensor<string, []>("op_239"), val = tensor<int32, [4]>([0, 2, 1, 3])];
126
+ tensor<int32, [3]> concat_1 = const()[name = tensor<string, []>("concat_1"), val = tensor<int32, [3]>([1, 1500, 384])];
127
+ tensor<fp16, [1, 1500, 6, 64]> transpose_32 = transpose(perm = var_239, x = var_238_cast_fp16)[name = tensor<string, []>("transpose_32")];
128
+ tensor<fp16, [1, 1500, 384]> x_23_cast_fp16 = reshape(shape = concat_1, x = transpose_32)[name = tensor<string, []>("x_23_cast_fp16")];
129
+ tensor<fp16, [384, 384]> var_244_to_fp16 = const()[name = tensor<string, []>("op_244_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6661184)))];
130
+ tensor<fp16, [384]> var_245_to_fp16 = const()[name = tensor<string, []>("op_245_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6956160)))];
131
+ tensor<fp16, [1, 1500, 384]> linear_9_cast_fp16 = linear(bias = var_245_to_fp16, weight = var_244_to_fp16, x = x_23_cast_fp16)[name = tensor<string, []>("linear_9_cast_fp16")];
132
+ tensor<fp16, [1, 1500, 384]> x_25_cast_fp16 = add(x = x_19_cast_fp16, y = linear_9_cast_fp16)[name = tensor<string, []>("x_25_cast_fp16")];
133
+ tensor<int32, [1]> var_252_axes_0 = const()[name = tensor<string, []>("op_252_axes_0"), val = tensor<int32, [1]>([-1])];
134
+ tensor<fp16, [384]> blocks_1_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_ln_weight_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6956992)))];
135
+ tensor<fp16, [384]> blocks_1_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_1_mlp_ln_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6957824)))];
136
+ tensor<fp16, [1, 1500, 384]> var_252_cast_fp16 = layer_norm(axes = var_252_axes_0, beta = blocks_1_mlp_ln_bias_to_fp16, epsilon = var_178_to_fp16, gamma = blocks_1_mlp_ln_weight_to_fp16, x = x_25_cast_fp16)[name = tensor<string, []>("op_252_cast_fp16")];
137
+ tensor<fp16, [1536, 384]> var_261_to_fp16 = const()[name = tensor<string, []>("op_261_to_fp16"), val = tensor<fp16, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6958656)))];
138
+ tensor<fp16, [1536]> var_262_to_fp16 = const()[name = tensor<string, []>("op_262_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8138368)))];
139
+ tensor<fp16, [1, 1500, 1536]> linear_10_cast_fp16 = linear(bias = var_262_to_fp16, weight = var_261_to_fp16, x = var_252_cast_fp16)[name = tensor<string, []>("linear_10_cast_fp16")];
140
+ tensor<string, []> x_29_mode_0 = const()[name = tensor<string, []>("x_29_mode_0"), val = tensor<string, []>("EXACT")];
141
+ tensor<fp16, [1, 1500, 1536]> x_29_cast_fp16 = gelu(mode = x_29_mode_0, x = linear_10_cast_fp16)[name = tensor<string, []>("x_29_cast_fp16")];
142
+ tensor<fp16, [384, 1536]> var_267_to_fp16 = const()[name = tensor<string, []>("op_267_to_fp16"), val = tensor<fp16, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8141504)))];
143
+ tensor<fp16, [384]> var_268_to_fp16 = const()[name = tensor<string, []>("op_268_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9321216)))];
144
+ tensor<fp16, [1, 1500, 384]> linear_11_cast_fp16 = linear(bias = var_268_to_fp16, weight = var_267_to_fp16, x = x_29_cast_fp16)[name = tensor<string, []>("linear_11_cast_fp16")];
145
+ tensor<fp16, [1, 1500, 384]> x_31_cast_fp16 = add(x = x_25_cast_fp16, y = linear_11_cast_fp16)[name = tensor<string, []>("x_31_cast_fp16")];
146
+ tensor<int32, []> var_277 = const()[name = tensor<string, []>("op_277"), val = tensor<int32, []>(-1)];
147
+ tensor<int32, [1]> var_293_axes_0 = const()[name = tensor<string, []>("op_293_axes_0"), val = tensor<int32, [1]>([-1])];
148
+ tensor<fp16, [384]> blocks_2_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_ln_weight_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9322048)))];
149
+ tensor<fp16, [384]> blocks_2_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_2_attn_ln_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9322880)))];
150
+ tensor<fp16, []> var_283_to_fp16 = const()[name = tensor<string, []>("op_283_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
151
+ tensor<fp16, [1, 1500, 384]> var_293_cast_fp16 = layer_norm(axes = var_293_axes_0, beta = blocks_2_attn_ln_bias_to_fp16, epsilon = var_283_to_fp16, gamma = blocks_2_attn_ln_weight_to_fp16, x = x_31_cast_fp16)[name = tensor<string, []>("op_293_cast_fp16")];
152
+ tensor<fp16, [384, 384]> var_304_to_fp16 = const()[name = tensor<string, []>("op_304_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9323712)))];
153
+ tensor<fp16, [384]> var_305_to_fp16 = const()[name = tensor<string, []>("op_305_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9618688)))];
154
+ tensor<fp16, [1, 1500, 384]> linear_12_cast_fp16 = linear(bias = var_305_to_fp16, weight = var_304_to_fp16, x = var_293_cast_fp16)[name = tensor<string, []>("linear_12_cast_fp16")];
155
+ tensor<fp16, [384, 384]> var_308_to_fp16 = const()[name = tensor<string, []>("op_308_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9619520)))];
156
+ tensor<fp16, [1, 1500, 384]> linear_13_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_308_to_fp16, x = var_293_cast_fp16)[name = tensor<string, []>("linear_13_cast_fp16")];
157
+ tensor<fp16, [384, 384]> var_312_to_fp16 = const()[name = tensor<string, []>("op_312_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9914496)))];
158
+ tensor<fp16, [384]> var_313_to_fp16 = const()[name = tensor<string, []>("op_313_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10209472)))];
159
+ tensor<fp16, [1, 1500, 384]> linear_14_cast_fp16 = linear(bias = var_313_to_fp16, weight = var_312_to_fp16, x = var_293_cast_fp16)[name = tensor<string, []>("linear_14_cast_fp16")];
160
+ tensor<int32, [4]> var_321 = const()[name = tensor<string, []>("op_321"), val = tensor<int32, [4]>([1, 1500, 6, -1])];
161
+ tensor<fp16, [1, 1500, 6, 64]> var_322_cast_fp16 = reshape(shape = var_321, x = linear_12_cast_fp16)[name = tensor<string, []>("op_322_cast_fp16")];
162
+ tensor<fp16, [1, 1, 1, 1]> const_32_to_fp16 = const()[name = tensor<string, []>("const_32_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
163
+ tensor<fp16, [1, 1500, 6, 64]> q_11_cast_fp16 = mul(x = var_322_cast_fp16, y = const_32_to_fp16)[name = tensor<string, []>("q_11_cast_fp16")];
164
+ tensor<int32, [4]> var_328 = const()[name = tensor<string, []>("op_328"), val = tensor<int32, [4]>([1, 1500, 6, -1])];
165
+ tensor<fp16, [1, 1500, 6, 64]> var_329_cast_fp16 = reshape(shape = var_328, x = linear_13_cast_fp16)[name = tensor<string, []>("op_329_cast_fp16")];
166
+ tensor<fp16, [1, 1, 1, 1]> const_33_to_fp16 = const()[name = tensor<string, []>("const_33_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
167
+ tensor<fp16, [1, 1500, 6, 64]> k_11_cast_fp16 = mul(x = var_329_cast_fp16, y = const_33_to_fp16)[name = tensor<string, []>("k_11_cast_fp16")];
168
+ tensor<int32, [4]> var_335 = const()[name = tensor<string, []>("op_335"), val = tensor<int32, [4]>([1, 1500, 6, -1])];
169
+ tensor<fp16, [1, 1500, 6, 64]> var_336_cast_fp16 = reshape(shape = var_335, x = linear_14_cast_fp16)[name = tensor<string, []>("op_336_cast_fp16")];
170
+ tensor<int32, [4]> var_337 = const()[name = tensor<string, []>("op_337"), val = tensor<int32, [4]>([0, 2, 1, 3])];
171
+ tensor<bool, []> qk_5_transpose_x_0 = const()[name = tensor<string, []>("qk_5_transpose_x_0"), val = tensor<bool, []>(false)];
172
+ tensor<bool, []> qk_5_transpose_y_0 = const()[name = tensor<string, []>("qk_5_transpose_y_0"), val = tensor<bool, []>(false)];
173
+ tensor<int32, [4]> transpose_20_perm_0 = const()[name = tensor<string, []>("transpose_20_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
174
+ tensor<int32, [4]> transpose_21_perm_0 = const()[name = tensor<string, []>("transpose_21_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
175
+ tensor<fp16, [1, 6, 64, 1500]> transpose_29 = transpose(perm = transpose_21_perm_0, x = k_11_cast_fp16)[name = tensor<string, []>("transpose_29")];
176
+ tensor<fp16, [1, 6, 1500, 64]> transpose_30 = transpose(perm = transpose_20_perm_0, x = q_11_cast_fp16)[name = tensor<string, []>("transpose_30")];
177
+ tensor<fp16, [1, 6, 1500, 1500]> qk_5_cast_fp16 = matmul(transpose_x = qk_5_transpose_x_0, transpose_y = qk_5_transpose_y_0, x = transpose_30, y = transpose_29)[name = tensor<string, []>("qk_5_cast_fp16")];
178
+ tensor<fp16, [1, 6, 1500, 1500]> var_341_cast_fp16 = softmax(axis = var_277, x = qk_5_cast_fp16)[name = tensor<string, []>("op_341_cast_fp16")];
179
+ tensor<bool, []> var_343_transpose_x_0 = const()[name = tensor<string, []>("op_343_transpose_x_0"), val = tensor<bool, []>(false)];
180
+ tensor<bool, []> var_343_transpose_y_0 = const()[name = tensor<string, []>("op_343_transpose_y_0"), val = tensor<bool, []>(false)];
181
+ tensor<fp16, [1, 6, 1500, 64]> transpose_31 = transpose(perm = var_337, x = var_336_cast_fp16)[name = tensor<string, []>("transpose_31")];
182
+ tensor<fp16, [1, 6, 1500, 64]> var_343_cast_fp16 = matmul(transpose_x = var_343_transpose_x_0, transpose_y = var_343_transpose_y_0, x = var_341_cast_fp16, y = transpose_31)[name = tensor<string, []>("op_343_cast_fp16")];
183
+ tensor<int32, [4]> var_344 = const()[name = tensor<string, []>("op_344"), val = tensor<int32, [4]>([0, 2, 1, 3])];
184
+ tensor<int32, [3]> concat_2 = const()[name = tensor<string, []>("concat_2"), val = tensor<int32, [3]>([1, 1500, 384])];
185
+ tensor<fp16, [1, 1500, 6, 64]> transpose_28 = transpose(perm = var_344, x = var_343_cast_fp16)[name = tensor<string, []>("transpose_28")];
186
+ tensor<fp16, [1, 1500, 384]> x_35_cast_fp16 = reshape(shape = concat_2, x = transpose_28)[name = tensor<string, []>("x_35_cast_fp16")];
187
+ tensor<fp16, [384, 384]> var_349_to_fp16 = const()[name = tensor<string, []>("op_349_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10210304)))];
188
+ tensor<fp16, [384]> var_350_to_fp16 = const()[name = tensor<string, []>("op_350_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10505280)))];
189
+ tensor<fp16, [1, 1500, 384]> linear_15_cast_fp16 = linear(bias = var_350_to_fp16, weight = var_349_to_fp16, x = x_35_cast_fp16)[name = tensor<string, []>("linear_15_cast_fp16")];
190
+ tensor<fp16, [1, 1500, 384]> x_37_cast_fp16 = add(x = x_31_cast_fp16, y = linear_15_cast_fp16)[name = tensor<string, []>("x_37_cast_fp16")];
191
+ tensor<int32, [1]> var_357_axes_0 = const()[name = tensor<string, []>("op_357_axes_0"), val = tensor<int32, [1]>([-1])];
192
+ tensor<fp16, [384]> blocks_2_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_ln_weight_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10506112)))];
193
+ tensor<fp16, [384]> blocks_2_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_2_mlp_ln_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10506944)))];
194
+ tensor<fp16, [1, 1500, 384]> var_357_cast_fp16 = layer_norm(axes = var_357_axes_0, beta = blocks_2_mlp_ln_bias_to_fp16, epsilon = var_283_to_fp16, gamma = blocks_2_mlp_ln_weight_to_fp16, x = x_37_cast_fp16)[name = tensor<string, []>("op_357_cast_fp16")];
195
+ tensor<fp16, [1536, 384]> var_366_to_fp16 = const()[name = tensor<string, []>("op_366_to_fp16"), val = tensor<fp16, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10507776)))];
196
+ tensor<fp16, [1536]> var_367_to_fp16 = const()[name = tensor<string, []>("op_367_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11687488)))];
197
+ tensor<fp16, [1, 1500, 1536]> linear_16_cast_fp16 = linear(bias = var_367_to_fp16, weight = var_366_to_fp16, x = var_357_cast_fp16)[name = tensor<string, []>("linear_16_cast_fp16")];
198
+ tensor<string, []> x_41_mode_0 = const()[name = tensor<string, []>("x_41_mode_0"), val = tensor<string, []>("EXACT")];
199
+ tensor<fp16, [1, 1500, 1536]> x_41_cast_fp16 = gelu(mode = x_41_mode_0, x = linear_16_cast_fp16)[name = tensor<string, []>("x_41_cast_fp16")];
200
+ tensor<fp16, [384, 1536]> var_372_to_fp16 = const()[name = tensor<string, []>("op_372_to_fp16"), val = tensor<fp16, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11690624)))];
201
+ tensor<fp16, [384]> var_373_to_fp16 = const()[name = tensor<string, []>("op_373_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(12870336)))];
202
+ tensor<fp16, [1, 1500, 384]> linear_17_cast_fp16 = linear(bias = var_373_to_fp16, weight = var_372_to_fp16, x = x_41_cast_fp16)[name = tensor<string, []>("linear_17_cast_fp16")];
203
+ tensor<fp16, [1, 1500, 384]> x_43_cast_fp16 = add(x = x_37_cast_fp16, y = linear_17_cast_fp16)[name = tensor<string, []>("x_43_cast_fp16")];
204
+ tensor<int32, []> var_382 = const()[name = tensor<string, []>("op_382"), val = tensor<int32, []>(-1)];
205
+ tensor<int32, [1]> var_398_axes_0 = const()[name = tensor<string, []>("op_398_axes_0"), val = tensor<int32, [1]>([-1])];
206
+ tensor<fp16, [384]> blocks_3_attn_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_3_attn_ln_weight_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(12871168)))];
207
+ tensor<fp16, [384]> blocks_3_attn_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_3_attn_ln_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(12872000)))];
208
+ tensor<fp16, []> var_388_to_fp16 = const()[name = tensor<string, []>("op_388_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
209
+ tensor<fp16, [1, 1500, 384]> var_398_cast_fp16 = layer_norm(axes = var_398_axes_0, beta = blocks_3_attn_ln_bias_to_fp16, epsilon = var_388_to_fp16, gamma = blocks_3_attn_ln_weight_to_fp16, x = x_43_cast_fp16)[name = tensor<string, []>("op_398_cast_fp16")];
210
+ tensor<fp16, [384, 384]> var_409_to_fp16 = const()[name = tensor<string, []>("op_409_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(12872832)))];
211
+ tensor<fp16, [384]> var_410_to_fp16 = const()[name = tensor<string, []>("op_410_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13167808)))];
212
+ tensor<fp16, [1, 1500, 384]> linear_18_cast_fp16 = linear(bias = var_410_to_fp16, weight = var_409_to_fp16, x = var_398_cast_fp16)[name = tensor<string, []>("linear_18_cast_fp16")];
213
+ tensor<fp16, [384, 384]> var_413_to_fp16 = const()[name = tensor<string, []>("op_413_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13168640)))];
214
+ tensor<fp16, [1, 1500, 384]> linear_19_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = var_413_to_fp16, x = var_398_cast_fp16)[name = tensor<string, []>("linear_19_cast_fp16")];
215
+ tensor<fp16, [384, 384]> var_417_to_fp16 = const()[name = tensor<string, []>("op_417_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13463616)))];
216
+ tensor<fp16, [384]> var_418_to_fp16 = const()[name = tensor<string, []>("op_418_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13758592)))];
217
+ tensor<fp16, [1, 1500, 384]> linear_20_cast_fp16 = linear(bias = var_418_to_fp16, weight = var_417_to_fp16, x = var_398_cast_fp16)[name = tensor<string, []>("linear_20_cast_fp16")];
218
+ tensor<int32, [4]> var_426 = const()[name = tensor<string, []>("op_426"), val = tensor<int32, [4]>([1, 1500, 6, -1])];
219
+ tensor<fp16, [1, 1500, 6, 64]> var_427_cast_fp16 = reshape(shape = var_426, x = linear_18_cast_fp16)[name = tensor<string, []>("op_427_cast_fp16")];
220
+ tensor<fp16, [1, 1, 1, 1]> const_34_to_fp16 = const()[name = tensor<string, []>("const_34_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
221
+ tensor<fp16, [1, 1500, 6, 64]> q_cast_fp16 = mul(x = var_427_cast_fp16, y = const_34_to_fp16)[name = tensor<string, []>("q_cast_fp16")];
222
+ tensor<int32, [4]> var_433 = const()[name = tensor<string, []>("op_433"), val = tensor<int32, [4]>([1, 1500, 6, -1])];
223
+ tensor<fp16, [1, 1500, 6, 64]> var_434_cast_fp16 = reshape(shape = var_433, x = linear_19_cast_fp16)[name = tensor<string, []>("op_434_cast_fp16")];
224
+ tensor<fp16, [1, 1, 1, 1]> const_35_to_fp16 = const()[name = tensor<string, []>("const_35_to_fp16"), val = tensor<fp16, [1, 1, 1, 1]>([[[[0x1.6ap-2]]]])];
225
+ tensor<fp16, [1, 1500, 6, 64]> k_cast_fp16 = mul(x = var_434_cast_fp16, y = const_35_to_fp16)[name = tensor<string, []>("k_cast_fp16")];
226
+ tensor<int32, [4]> var_440 = const()[name = tensor<string, []>("op_440"), val = tensor<int32, [4]>([1, 1500, 6, -1])];
227
+ tensor<fp16, [1, 1500, 6, 64]> var_441_cast_fp16 = reshape(shape = var_440, x = linear_20_cast_fp16)[name = tensor<string, []>("op_441_cast_fp16")];
228
+ tensor<int32, [4]> var_442 = const()[name = tensor<string, []>("op_442"), val = tensor<int32, [4]>([0, 2, 1, 3])];
229
+ tensor<bool, []> qk_transpose_x_0 = const()[name = tensor<string, []>("qk_transpose_x_0"), val = tensor<bool, []>(false)];
230
+ tensor<bool, []> qk_transpose_y_0 = const()[name = tensor<string, []>("qk_transpose_y_0"), val = tensor<bool, []>(false)];
231
+ tensor<int32, [4]> transpose_22_perm_0 = const()[name = tensor<string, []>("transpose_22_perm_0"), val = tensor<int32, [4]>([0, 2, -3, -1])];
232
+ tensor<int32, [4]> transpose_23_perm_0 = const()[name = tensor<string, []>("transpose_23_perm_0"), val = tensor<int32, [4]>([0, 2, -1, -3])];
233
+ tensor<fp16, [1, 6, 64, 1500]> transpose_25 = transpose(perm = transpose_23_perm_0, x = k_cast_fp16)[name = tensor<string, []>("transpose_25")];
234
+ tensor<fp16, [1, 6, 1500, 64]> transpose_26 = transpose(perm = transpose_22_perm_0, x = q_cast_fp16)[name = tensor<string, []>("transpose_26")];
235
+ tensor<fp16, [1, 6, 1500, 1500]> qk_cast_fp16 = matmul(transpose_x = qk_transpose_x_0, transpose_y = qk_transpose_y_0, x = transpose_26, y = transpose_25)[name = tensor<string, []>("qk_cast_fp16")];
236
+ tensor<fp16, [1, 6, 1500, 1500]> var_446_cast_fp16 = softmax(axis = var_382, x = qk_cast_fp16)[name = tensor<string, []>("op_446_cast_fp16")];
237
+ tensor<bool, []> var_448_transpose_x_0 = const()[name = tensor<string, []>("op_448_transpose_x_0"), val = tensor<bool, []>(false)];
238
+ tensor<bool, []> var_448_transpose_y_0 = const()[name = tensor<string, []>("op_448_transpose_y_0"), val = tensor<bool, []>(false)];
239
+ tensor<fp16, [1, 6, 1500, 64]> transpose_27 = transpose(perm = var_442, x = var_441_cast_fp16)[name = tensor<string, []>("transpose_27")];
240
+ tensor<fp16, [1, 6, 1500, 64]> var_448_cast_fp16 = matmul(transpose_x = var_448_transpose_x_0, transpose_y = var_448_transpose_y_0, x = var_446_cast_fp16, y = transpose_27)[name = tensor<string, []>("op_448_cast_fp16")];
241
+ tensor<int32, [4]> var_449 = const()[name = tensor<string, []>("op_449"), val = tensor<int32, [4]>([0, 2, 1, 3])];
242
+ tensor<int32, [3]> concat_3 = const()[name = tensor<string, []>("concat_3"), val = tensor<int32, [3]>([1, 1500, 384])];
243
+ tensor<fp16, [1, 1500, 6, 64]> transpose_24 = transpose(perm = var_449, x = var_448_cast_fp16)[name = tensor<string, []>("transpose_24")];
244
+ tensor<fp16, [1, 1500, 384]> x_47_cast_fp16 = reshape(shape = concat_3, x = transpose_24)[name = tensor<string, []>("x_47_cast_fp16")];
245
+ tensor<fp16, [384, 384]> var_454_to_fp16 = const()[name = tensor<string, []>("op_454_to_fp16"), val = tensor<fp16, [384, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13759424)))];
246
+ tensor<fp16, [384]> var_455_to_fp16 = const()[name = tensor<string, []>("op_455_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14054400)))];
247
+ tensor<fp16, [1, 1500, 384]> linear_21_cast_fp16 = linear(bias = var_455_to_fp16, weight = var_454_to_fp16, x = x_47_cast_fp16)[name = tensor<string, []>("linear_21_cast_fp16")];
248
+ tensor<fp16, [1, 1500, 384]> x_49_cast_fp16 = add(x = x_43_cast_fp16, y = linear_21_cast_fp16)[name = tensor<string, []>("x_49_cast_fp16")];
249
+ tensor<int32, [1]> var_462_axes_0 = const()[name = tensor<string, []>("op_462_axes_0"), val = tensor<int32, [1]>([-1])];
250
+ tensor<fp16, [384]> blocks_3_mlp_ln_weight_to_fp16 = const()[name = tensor<string, []>("blocks_3_mlp_ln_weight_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14055232)))];
251
+ tensor<fp16, [384]> blocks_3_mlp_ln_bias_to_fp16 = const()[name = tensor<string, []>("blocks_3_mlp_ln_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14056064)))];
252
+ tensor<fp16, [1, 1500, 384]> var_462_cast_fp16 = layer_norm(axes = var_462_axes_0, beta = blocks_3_mlp_ln_bias_to_fp16, epsilon = var_388_to_fp16, gamma = blocks_3_mlp_ln_weight_to_fp16, x = x_49_cast_fp16)[name = tensor<string, []>("op_462_cast_fp16")];
253
+ tensor<fp16, [1536, 384]> var_471_to_fp16 = const()[name = tensor<string, []>("op_471_to_fp16"), val = tensor<fp16, [1536, 384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14056896)))];
254
+ tensor<fp16, [1536]> var_472_to_fp16 = const()[name = tensor<string, []>("op_472_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15236608)))];
255
+ tensor<fp16, [1, 1500, 1536]> linear_22_cast_fp16 = linear(bias = var_472_to_fp16, weight = var_471_to_fp16, x = var_462_cast_fp16)[name = tensor<string, []>("linear_22_cast_fp16")];
256
+ tensor<string, []> x_53_mode_0 = const()[name = tensor<string, []>("x_53_mode_0"), val = tensor<string, []>("EXACT")];
257
+ tensor<fp16, [1, 1500, 1536]> x_53_cast_fp16 = gelu(mode = x_53_mode_0, x = linear_22_cast_fp16)[name = tensor<string, []>("x_53_cast_fp16")];
258
+ tensor<fp16, [384, 1536]> var_477_to_fp16 = const()[name = tensor<string, []>("op_477_to_fp16"), val = tensor<fp16, [384, 1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15239744)))];
259
+ tensor<fp16, [384]> var_478_to_fp16 = const()[name = tensor<string, []>("op_478_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16419456)))];
260
+ tensor<fp16, [1, 1500, 384]> linear_23_cast_fp16 = linear(bias = var_478_to_fp16, weight = var_477_to_fp16, x = x_53_cast_fp16)[name = tensor<string, []>("linear_23_cast_fp16")];
261
+ tensor<fp16, [1, 1500, 384]> x_cast_fp16 = add(x = x_49_cast_fp16, y = linear_23_cast_fp16)[name = tensor<string, []>("x_cast_fp16")];
262
+ tensor<int32, [1]> var_491_axes_0 = const()[name = tensor<string, []>("op_491_axes_0"), val = tensor<int32, [1]>([-1])];
263
+ tensor<fp16, [384]> ln_post_weight_to_fp16 = const()[name = tensor<string, []>("ln_post_weight_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16420288)))];
264
+ tensor<fp16, [384]> ln_post_bias_to_fp16 = const()[name = tensor<string, []>("ln_post_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16421120)))];
265
+ tensor<fp16, []> var_482_to_fp16 = const()[name = tensor<string, []>("op_482_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
266
+ tensor<fp16, [1, 1500, 384]> output = layer_norm(axes = var_491_axes_0, beta = ln_post_bias_to_fp16, epsilon = var_482_to_fp16, gamma = ln_post_weight_to_fp16, x = x_cast_fp16)[name = tensor<string, []>("op_491_cast_fp16")];
267
+ } -> (output);
268
+ }
ggml-tiny-encoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb82c7078e49e07a517e598cce7e3b6dded7397efc495be1992ef822570284a4
3
+ size 16421952
ggml-tiny.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7e4b084cefeebeed66fb9d096a29b836125edbb8456fea5a9c77b4efc085323
3
+ size 60079860
index/base ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ ggml-base-encoder.mlmodelc/weights/weight.bin
2
+ ggml-base-encoder.mlmodelc/metadata.json
3
+ ggml-base-encoder.mlmodelc/model.mil
4
+ ggml-base-encoder.mlmodelc/coremldata.bin
5
+ ggml-base-encoder.mlmodelc/analytics/coremldata.bin
6
+ ggml-base.bin
index/large-v3 ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ ggml-large-v3-encoder.mlmodelc/weights/weight.bin
2
+ ggml-large-v3-encoder.mlmodelc/metadata.json
3
+ ggml-large-v3-encoder.mlmodelc/model.mil
4
+ ggml-large-v3-encoder.mlmodelc/coremldata.bin
5
+ ggml-large-v3-encoder.mlmodelc/analytics/coremldata.bin
6
+ ggml-large-v3-q8_0.bin
index/medium ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ ggml-medium-encoder.mlmodelc/weights/weight.bin
2
+ ggml-medium-encoder.mlmodelc/metadata.json
3
+ ggml-medium-encoder.mlmodelc/model.mil
4
+ ggml-medium-encoder.mlmodelc/coremldata.bin
5
+ ggml-medium-encoder.mlmodelc/analytics/coremldata.bin
6
+ ggml-medium.bin
index/small ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ ggml-small-encoder.mlmodelc/weights/weight.bin
2
+ ggml-small-encoder.mlmodelc/metadata.json
3
+ ggml-small-encoder.mlmodelc/model.mil
4
+ ggml-small-encoder.mlmodelc/coremldata.bin
5
+ ggml-small-encoder.mlmodelc/analytics/coremldata.bin
6
+ ggml-small.bin
index/tiny ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ ggml-tiny-encoder.mlmodelc/weights/weight.bin
2
+ ggml-tiny-encoder.mlmodelc/metadata.json
3
+ ggml-tiny-encoder.mlmodelc/model.mil
4
+ ggml-tiny-encoder.mlmodelc/coremldata.bin
5
+ ggml-tiny-encoder.mlmodelc/analytics/coremldata.bin
6
+ ggml-tiny.bin