aotrih commited on
Commit
a570942
·
1 Parent(s): f219137

SpeakerKit Pro v1

Browse files
Files changed (23) hide show
  1. .gitignore +1 -0
  2. LICENSE_NOTICE.txt +7 -0
  3. README.md +24 -0
  4. speaker_embedder/pyannote-v3/LICENSE_NOTICE.txt +7 -0
  5. speaker_embedder/pyannote-v3/README.txt +6 -0
  6. speaker_embedder/pyannote-v3/SpeakerEmbedder.mlmodelc/analytics/coremldata.bin +3 -0
  7. speaker_embedder/pyannote-v3/SpeakerEmbedder.mlmodelc/coremldata.bin +3 -0
  8. speaker_embedder/pyannote-v3/SpeakerEmbedder.mlmodelc/metadata.json +86 -0
  9. speaker_embedder/pyannote-v3/SpeakerEmbedder.mlmodelc/model.mil +473 -0
  10. speaker_embedder/pyannote-v3/SpeakerEmbedder.mlmodelc/weights/weight.bin +3 -0
  11. speaker_embedder/pyannote-v3/SpeakerEmbedderPreprocessor.mlmodelc/analytics/coremldata.bin +3 -0
  12. speaker_embedder/pyannote-v3/SpeakerEmbedderPreprocessor.mlmodelc/coremldata.bin +3 -0
  13. speaker_embedder/pyannote-v3/SpeakerEmbedderPreprocessor.mlmodelc/metadata.json +77 -0
  14. speaker_embedder/pyannote-v3/SpeakerEmbedderPreprocessor.mlmodelc/model.mil +90 -0
  15. speaker_embedder/pyannote-v3/SpeakerEmbedderPreprocessor.mlmodelc/weights/weight.bin +3 -0
  16. speaker_segmenter/pyannote-v3/LICENSE_NOTICE.txt +7 -0
  17. speaker_segmenter/pyannote-v3/README.txt +6 -0
  18. speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc/LICENSE_NOTICE.txt +7 -0
  19. speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc/analytics/coremldata.bin +3 -0
  20. speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc/coremldata.bin +3 -0
  21. speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc/metadata.json +132 -0
  22. speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc/model.mil +0 -0
  23. speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc/weights/weight.bin +3 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .DS_Store
LICENSE_NOTICE.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Argmax proprietary and confidential. Under NDA.
2
+
3
+ Copyright 2024 Argmax, Inc. All rights reserved.
4
+
5
+ Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
6
+
7
+ Please contact Argmax for licensing information at [email protected].
README.md CHANGED
@@ -2,4 +2,28 @@
2
  license: other
3
  license_name: argmax-fmod-license
4
  license_link: https://huggingface.co/argmaxinc/speakerkit-pro/blob/main/LICENSE_NOTICE.txt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  ---
 
2
  license: other
3
  license_name: argmax-fmod-license
4
  license_link: https://huggingface.co/argmaxinc/speakerkit-pro/blob/main/LICENSE_NOTICE.txt
5
+ pretty_name: SpeakerKit
6
+ viewer: false
7
+ library_name: speakerkit
8
+ tags:
9
+ - speakerkit
10
+ - pyannote
11
+ - diarization
12
+ - speaker-diarization
13
+ - whisper
14
+ - whisperkit
15
+ - coreml
16
+ - asr
17
+ - quantized
18
+ - automatic-speech-recognition
19
+ extra_gated_heading: SpeakerKit Pro is now in early access!
20
+ extra_gated_description: >-
21
+ SpeakerKit Pro is Argmax's state-of-the-art on-device framework for speaker recognition tasks such as speaker diarization. Please submit your
22
+ information below to join the waitlist for early access or directly send an
23
+ email to [[email protected]](mailto:[email protected]).
24
+ extra_gated_fields:
25
+ Company: text
26
+ Work email: text
27
+ I acknowledge the license notice: checkbox
28
+ extra_gated_button_content: Submit
29
  ---
speaker_embedder/pyannote-v3/LICENSE_NOTICE.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Argmax proprietary and confidential. Under NDA.
2
+
3
+ Copyright 2024 Argmax, Inc. All rights reserved.
4
+
5
+ Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
6
+
7
+ Please contact Argmax for licensing information at [email protected].
speaker_embedder/pyannote-v3/README.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # License
2
+
3
+ Original model weights: https://github.com/wenet-e2e/wespeaker/blob/master/docs/pretrained.md#model-license
4
+ Argmax-optimized model asset (Assets with `.mlmodelc` extension): https://huggingface.co/argmaxinc/speakerkit-pro/blob/main/LICENSE_NOTICE.txt
5
+
6
+ Please contact [email protected] for licensing SpeakerKit Pro assets
speaker_embedder/pyannote-v3/SpeakerEmbedder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcf0cbfb93bef444abf2bc2646b56c0e8975e85ecf36e756ce865398c3b3f2e6
3
+ size 243
speaker_embedder/pyannote-v3/SpeakerEmbedder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:804911154844b88b7bb76e70448c6d8206dbf30664eded18ae6b60be5c81b4e4
3
+ size 370
speaker_embedder/pyannote-v3/SpeakerEmbedder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 3 × 256)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 3, 256]",
13
+ "name" : "speaker_embeddings",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 7,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Concat" : 3,
23
+ "Ios16.mul" : 12,
24
+ "SliceByIndex" : 3,
25
+ "Transpose" : 1,
26
+ "Ios16.sub" : 6,
27
+ "Ios16.sqrt" : 3,
28
+ "Stack" : 1,
29
+ "UpsampleNearestNeighbor" : 1,
30
+ "Ios16.conv" : 36,
31
+ "Ios16.add" : 22,
32
+ "Squeeze" : 1,
33
+ "Ios16.relu" : 33,
34
+ "Ios16.realDiv" : 9,
35
+ "Ios16.reduceSum" : 12,
36
+ "ExpandDims" : 8,
37
+ "Ios16.linear" : 1,
38
+ "Ios16.reshape" : 1
39
+ },
40
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
41
+ "isUpdatable" : "0",
42
+ "stateSchema" : [
43
+
44
+ ],
45
+ "availability" : {
46
+ "macOS" : "13.0",
47
+ "tvOS" : "16.0",
48
+ "visionOS" : "1.0",
49
+ "watchOS" : "9.0",
50
+ "iOS" : "16.0",
51
+ "macCatalyst" : "16.0"
52
+ },
53
+ "modelType" : {
54
+ "name" : "MLModelType_mlProgram"
55
+ },
56
+ "userDefinedMetadata" : {
57
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
58
+ "com.github.apple.coremltools.source" : "torch==2.5.1",
59
+ "com.github.apple.coremltools.version" : "8.1"
60
+ },
61
+ "inputSchema" : [
62
+ {
63
+ "hasShapeFlexibility" : "0",
64
+ "isOptional" : "0",
65
+ "dataType" : "Float16",
66
+ "formattedType" : "MultiArray (Float16 1 × 998 × 80)",
67
+ "shortDescription" : "",
68
+ "shape" : "[1, 998, 80]",
69
+ "name" : "preprocessor_output_1",
70
+ "type" : "MultiArray"
71
+ },
72
+ {
73
+ "hasShapeFlexibility" : "0",
74
+ "isOptional" : "0",
75
+ "dataType" : "Float16",
76
+ "formattedType" : "MultiArray (Float16 1 × 3 × 589)",
77
+ "shortDescription" : "",
78
+ "shape" : "[1, 3, 589]",
79
+ "name" : "speaker_masks",
80
+ "type" : "MultiArray"
81
+ }
82
+ ],
83
+ "generatedClassName" : "SpeakerEmbedding",
84
+ "method" : "predict"
85
+ }
86
+ ]
speaker_embedder/pyannote-v3/SpeakerEmbedder.mlmodelc/model.mil ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.5.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.1"}})]
3
+ {
4
+ func main<ios16>(tensor<fp16, [1, 998, 80]> preprocessor_output_1, tensor<fp16, [1, 3, 589]> speaker_masks) {
5
+ tensor<int32, []> var_12 = const()[name = tensor<string, []>("op_12"), val = tensor<int32, []>(1)];
6
+ tensor<int32, [3]> var_22 = const()[name = tensor<string, []>("op_22"), val = tensor<int32, [3]>([0, 2, 1])];
7
+ tensor<int32, [1]> input_1_axes_0 = const()[name = tensor<string, []>("input_1_axes_0"), val = tensor<int32, [1]>([1])];
8
+ tensor<fp16, [1, 80, 998]> fbank_cast_fp16 = transpose(perm = var_22, x = preprocessor_output_1)[name = tensor<string, []>("transpose_0")];
9
+ tensor<fp16, [1, 1, 80, 998]> input_1_cast_fp16 = expand_dims(axes = input_1_axes_0, x = fbank_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
10
+ tensor<string, []> input_3_pad_type_0 = const()[name = tensor<string, []>("input_3_pad_type_0"), val = tensor<string, []>("custom")];
11
+ tensor<int32, [4]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
12
+ tensor<int32, [2]> input_3_strides_0 = const()[name = tensor<string, []>("input_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
13
+ tensor<int32, [2]> input_3_dilations_0 = const()[name = tensor<string, []>("input_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
14
+ tensor<int32, []> input_3_groups_0 = const()[name = tensor<string, []>("input_3_groups_0"), val = tensor<int32, []>(1)];
15
+ tensor<fp16, [32, 1, 3, 3]> const_5_to_fp16 = const()[name = tensor<string, []>("const_5_to_fp16"), val = tensor<fp16, [32, 1, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
16
+ tensor<fp16, [32]> const_6_to_fp16 = const()[name = tensor<string, []>("const_6_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(704)))];
17
+ tensor<fp16, [1, 32, 80, 998]> input_5_cast_fp16 = conv(bias = const_6_to_fp16, dilations = input_3_dilations_0, groups = input_3_groups_0, pad = input_3_pad_0, pad_type = input_3_pad_type_0, strides = input_3_strides_0, weight = const_5_to_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
18
+ tensor<fp16, [1, 32, 80, 998]> input_7_cast_fp16 = relu(x = input_5_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
19
+ tensor<string, []> input_9_pad_type_0 = const()[name = tensor<string, []>("input_9_pad_type_0"), val = tensor<string, []>("custom")];
20
+ tensor<int32, [4]> input_9_pad_0 = const()[name = tensor<string, []>("input_9_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
21
+ tensor<int32, [2]> input_9_strides_0 = const()[name = tensor<string, []>("input_9_strides_0"), val = tensor<int32, [2]>([1, 1])];
22
+ tensor<int32, [2]> input_9_dilations_0 = const()[name = tensor<string, []>("input_9_dilations_0"), val = tensor<int32, [2]>([1, 1])];
23
+ tensor<int32, []> input_9_groups_0 = const()[name = tensor<string, []>("input_9_groups_0"), val = tensor<int32, []>(1)];
24
+ tensor<fp16, [32, 32, 3, 3]> const_7_to_fp16 = const()[name = tensor<string, []>("const_7_to_fp16"), val = tensor<fp16, [32, 32, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(832)))];
25
+ tensor<fp16, [32]> const_8_to_fp16 = const()[name = tensor<string, []>("const_8_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(19328)))];
26
+ tensor<fp16, [1, 32, 80, 998]> input_11_cast_fp16 = conv(bias = const_8_to_fp16, dilations = input_9_dilations_0, groups = input_9_groups_0, pad = input_9_pad_0, pad_type = input_9_pad_type_0, strides = input_9_strides_0, weight = const_7_to_fp16, x = input_7_cast_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
27
+ tensor<fp16, [1, 32, 80, 998]> input_13_cast_fp16 = relu(x = input_11_cast_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
28
+ tensor<string, []> input_15_pad_type_0 = const()[name = tensor<string, []>("input_15_pad_type_0"), val = tensor<string, []>("custom")];
29
+ tensor<int32, [4]> input_15_pad_0 = const()[name = tensor<string, []>("input_15_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
30
+ tensor<int32, [2]> input_15_strides_0 = const()[name = tensor<string, []>("input_15_strides_0"), val = tensor<int32, [2]>([1, 1])];
31
+ tensor<int32, [2]> input_15_dilations_0 = const()[name = tensor<string, []>("input_15_dilations_0"), val = tensor<int32, [2]>([1, 1])];
32
+ tensor<int32, []> input_15_groups_0 = const()[name = tensor<string, []>("input_15_groups_0"), val = tensor<int32, []>(1)];
33
+ tensor<fp16, [32, 32, 3, 3]> const_9_to_fp16 = const()[name = tensor<string, []>("const_9_to_fp16"), val = tensor<fp16, [32, 32, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(19456)))];
34
+ tensor<fp16, [32]> const_10_to_fp16 = const()[name = tensor<string, []>("const_10_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(37952)))];
35
+ tensor<fp16, [1, 32, 80, 998]> out_1_cast_fp16 = conv(bias = const_10_to_fp16, dilations = input_15_dilations_0, groups = input_15_groups_0, pad = input_15_pad_0, pad_type = input_15_pad_type_0, strides = input_15_strides_0, weight = const_9_to_fp16, x = input_13_cast_fp16)[name = tensor<string, []>("out_1_cast_fp16")];
36
+ tensor<fp16, [1, 32, 80, 998]> input_17_cast_fp16 = add(x = out_1_cast_fp16, y = input_7_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")];
37
+ tensor<fp16, [1, 32, 80, 998]> input_19_cast_fp16 = relu(x = input_17_cast_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
38
+ tensor<string, []> input_21_pad_type_0 = const()[name = tensor<string, []>("input_21_pad_type_0"), val = tensor<string, []>("custom")];
39
+ tensor<int32, [4]> input_21_pad_0 = const()[name = tensor<string, []>("input_21_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
40
+ tensor<int32, [2]> input_21_strides_0 = const()[name = tensor<string, []>("input_21_strides_0"), val = tensor<int32, [2]>([1, 1])];
41
+ tensor<int32, [2]> input_21_dilations_0 = const()[name = tensor<string, []>("input_21_dilations_0"), val = tensor<int32, [2]>([1, 1])];
42
+ tensor<int32, []> input_21_groups_0 = const()[name = tensor<string, []>("input_21_groups_0"), val = tensor<int32, []>(1)];
43
+ tensor<fp16, [32, 32, 3, 3]> const_11_to_fp16 = const()[name = tensor<string, []>("const_11_to_fp16"), val = tensor<fp16, [32, 32, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(38080)))];
44
+ tensor<fp16, [32]> const_12_to_fp16 = const()[name = tensor<string, []>("const_12_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56576)))];
45
+ tensor<fp16, [1, 32, 80, 998]> input_23_cast_fp16 = conv(bias = const_12_to_fp16, dilations = input_21_dilations_0, groups = input_21_groups_0, pad = input_21_pad_0, pad_type = input_21_pad_type_0, strides = input_21_strides_0, weight = const_11_to_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("input_23_cast_fp16")];
46
+ tensor<fp16, [1, 32, 80, 998]> input_25_cast_fp16 = relu(x = input_23_cast_fp16)[name = tensor<string, []>("input_25_cast_fp16")];
47
+ tensor<string, []> input_27_pad_type_0 = const()[name = tensor<string, []>("input_27_pad_type_0"), val = tensor<string, []>("custom")];
48
+ tensor<int32, [4]> input_27_pad_0 = const()[name = tensor<string, []>("input_27_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
49
+ tensor<int32, [2]> input_27_strides_0 = const()[name = tensor<string, []>("input_27_strides_0"), val = tensor<int32, [2]>([1, 1])];
50
+ tensor<int32, [2]> input_27_dilations_0 = const()[name = tensor<string, []>("input_27_dilations_0"), val = tensor<int32, [2]>([1, 1])];
51
+ tensor<int32, []> input_27_groups_0 = const()[name = tensor<string, []>("input_27_groups_0"), val = tensor<int32, []>(1)];
52
+ tensor<fp16, [32, 32, 3, 3]> const_13_to_fp16 = const()[name = tensor<string, []>("const_13_to_fp16"), val = tensor<fp16, [32, 32, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(56704)))];
53
+ tensor<fp16, [32]> const_14_to_fp16 = const()[name = tensor<string, []>("const_14_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(75200)))];
54
+ tensor<fp16, [1, 32, 80, 998]> out_3_cast_fp16 = conv(bias = const_14_to_fp16, dilations = input_27_dilations_0, groups = input_27_groups_0, pad = input_27_pad_0, pad_type = input_27_pad_type_0, strides = input_27_strides_0, weight = const_13_to_fp16, x = input_25_cast_fp16)[name = tensor<string, []>("out_3_cast_fp16")];
55
+ tensor<fp16, [1, 32, 80, 998]> input_29_cast_fp16 = add(x = out_3_cast_fp16, y = input_19_cast_fp16)[name = tensor<string, []>("input_29_cast_fp16")];
56
+ tensor<fp16, [1, 32, 80, 998]> input_31_cast_fp16 = relu(x = input_29_cast_fp16)[name = tensor<string, []>("input_31_cast_fp16")];
57
+ tensor<string, []> input_33_pad_type_0 = const()[name = tensor<string, []>("input_33_pad_type_0"), val = tensor<string, []>("custom")];
58
+ tensor<int32, [4]> input_33_pad_0 = const()[name = tensor<string, []>("input_33_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
59
+ tensor<int32, [2]> input_33_strides_0 = const()[name = tensor<string, []>("input_33_strides_0"), val = tensor<int32, [2]>([1, 1])];
60
+ tensor<int32, [2]> input_33_dilations_0 = const()[name = tensor<string, []>("input_33_dilations_0"), val = tensor<int32, [2]>([1, 1])];
61
+ tensor<int32, []> input_33_groups_0 = const()[name = tensor<string, []>("input_33_groups_0"), val = tensor<int32, []>(1)];
62
+ tensor<fp16, [32, 32, 3, 3]> const_15_to_fp16 = const()[name = tensor<string, []>("const_15_to_fp16"), val = tensor<fp16, [32, 32, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(75328)))];
63
+ tensor<fp16, [32]> const_16_to_fp16 = const()[name = tensor<string, []>("const_16_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(93824)))];
64
+ tensor<fp16, [1, 32, 80, 998]> input_35_cast_fp16 = conv(bias = const_16_to_fp16, dilations = input_33_dilations_0, groups = input_33_groups_0, pad = input_33_pad_0, pad_type = input_33_pad_type_0, strides = input_33_strides_0, weight = const_15_to_fp16, x = input_31_cast_fp16)[name = tensor<string, []>("input_35_cast_fp16")];
65
+ tensor<fp16, [1, 32, 80, 998]> input_37_cast_fp16 = relu(x = input_35_cast_fp16)[name = tensor<string, []>("input_37_cast_fp16")];
66
+ tensor<string, []> input_39_pad_type_0 = const()[name = tensor<string, []>("input_39_pad_type_0"), val = tensor<string, []>("custom")];
67
+ tensor<int32, [4]> input_39_pad_0 = const()[name = tensor<string, []>("input_39_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
68
+ tensor<int32, [2]> input_39_strides_0 = const()[name = tensor<string, []>("input_39_strides_0"), val = tensor<int32, [2]>([1, 1])];
69
+ tensor<int32, [2]> input_39_dilations_0 = const()[name = tensor<string, []>("input_39_dilations_0"), val = tensor<int32, [2]>([1, 1])];
70
+ tensor<int32, []> input_39_groups_0 = const()[name = tensor<string, []>("input_39_groups_0"), val = tensor<int32, []>(1)];
71
+ tensor<fp16, [32, 32, 3, 3]> const_17_to_fp16 = const()[name = tensor<string, []>("const_17_to_fp16"), val = tensor<fp16, [32, 32, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(93952)))];
72
+ tensor<fp16, [32]> const_18_to_fp16 = const()[name = tensor<string, []>("const_18_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(112448)))];
73
+ tensor<fp16, [1, 32, 80, 998]> out_5_cast_fp16 = conv(bias = const_18_to_fp16, dilations = input_39_dilations_0, groups = input_39_groups_0, pad = input_39_pad_0, pad_type = input_39_pad_type_0, strides = input_39_strides_0, weight = const_17_to_fp16, x = input_37_cast_fp16)[name = tensor<string, []>("out_5_cast_fp16")];
74
+ tensor<fp16, [1, 32, 80, 998]> input_41_cast_fp16 = add(x = out_5_cast_fp16, y = input_31_cast_fp16)[name = tensor<string, []>("input_41_cast_fp16")];
75
+ tensor<fp16, [1, 32, 80, 998]> input_43_cast_fp16 = relu(x = input_41_cast_fp16)[name = tensor<string, []>("input_43_cast_fp16")];
76
+ tensor<string, []> input_45_pad_type_0 = const()[name = tensor<string, []>("input_45_pad_type_0"), val = tensor<string, []>("custom")];
77
+ tensor<int32, [4]> input_45_pad_0 = const()[name = tensor<string, []>("input_45_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
78
+ tensor<int32, [2]> input_45_strides_0 = const()[name = tensor<string, []>("input_45_strides_0"), val = tensor<int32, [2]>([2, 2])];
79
+ tensor<int32, [2]> input_45_dilations_0 = const()[name = tensor<string, []>("input_45_dilations_0"), val = tensor<int32, [2]>([1, 1])];
80
+ tensor<int32, []> input_45_groups_0 = const()[name = tensor<string, []>("input_45_groups_0"), val = tensor<int32, []>(1)];
81
+ tensor<fp16, [64, 32, 3, 3]> const_19_to_fp16 = const()[name = tensor<string, []>("const_19_to_fp16"), val = tensor<fp16, [64, 32, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(112576)))];
82
+ tensor<fp16, [64]> const_20_to_fp16 = const()[name = tensor<string, []>("const_20_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(149504)))];
83
+ tensor<fp16, [1, 64, 40, 499]> input_47_cast_fp16 = conv(bias = const_20_to_fp16, dilations = input_45_dilations_0, groups = input_45_groups_0, pad = input_45_pad_0, pad_type = input_45_pad_type_0, strides = input_45_strides_0, weight = const_19_to_fp16, x = input_43_cast_fp16)[name = tensor<string, []>("input_47_cast_fp16")];
84
+ tensor<fp16, [1, 64, 40, 499]> input_49_cast_fp16 = relu(x = input_47_cast_fp16)[name = tensor<string, []>("input_49_cast_fp16")];
85
+ tensor<string, []> input_51_pad_type_0 = const()[name = tensor<string, []>("input_51_pad_type_0"), val = tensor<string, []>("custom")];
86
+ tensor<int32, [4]> input_51_pad_0 = const()[name = tensor<string, []>("input_51_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
87
+ tensor<int32, [2]> input_51_strides_0 = const()[name = tensor<string, []>("input_51_strides_0"), val = tensor<int32, [2]>([1, 1])];
88
+ tensor<int32, [2]> input_51_dilations_0 = const()[name = tensor<string, []>("input_51_dilations_0"), val = tensor<int32, [2]>([1, 1])];
89
+ tensor<int32, []> input_51_groups_0 = const()[name = tensor<string, []>("input_51_groups_0"), val = tensor<int32, []>(1)];
90
+ tensor<fp16, [64, 64, 3, 3]> const_21_to_fp16 = const()[name = tensor<string, []>("const_21_to_fp16"), val = tensor<fp16, [64, 64, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(149696)))];
91
+ tensor<fp16, [64]> const_22_to_fp16 = const()[name = tensor<string, []>("const_22_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(223488)))];
92
+ tensor<fp16, [1, 64, 40, 499]> out_7_cast_fp16 = conv(bias = const_22_to_fp16, dilations = input_51_dilations_0, groups = input_51_groups_0, pad = input_51_pad_0, pad_type = input_51_pad_type_0, strides = input_51_strides_0, weight = const_21_to_fp16, x = input_49_cast_fp16)[name = tensor<string, []>("out_7_cast_fp16")];
93
+ tensor<string, []> input_53_pad_type_0 = const()[name = tensor<string, []>("input_53_pad_type_0"), val = tensor<string, []>("valid")];
94
+ tensor<int32, [2]> input_53_strides_0 = const()[name = tensor<string, []>("input_53_strides_0"), val = tensor<int32, [2]>([2, 2])];
95
+ tensor<int32, [4]> input_53_pad_0 = const()[name = tensor<string, []>("input_53_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
96
+ tensor<int32, [2]> input_53_dilations_0 = const()[name = tensor<string, []>("input_53_dilations_0"), val = tensor<int32, [2]>([1, 1])];
97
+ tensor<int32, []> input_53_groups_0 = const()[name = tensor<string, []>("input_53_groups_0"), val = tensor<int32, []>(1)];
98
+ tensor<fp16, [64, 32, 1, 1]> const_23_to_fp16 = const()[name = tensor<string, []>("const_23_to_fp16"), val = tensor<fp16, [64, 32, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(223680)))];
99
+ tensor<fp16, [64]> const_24_to_fp16 = const()[name = tensor<string, []>("const_24_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(227840)))];
100
+ tensor<fp16, [1, 64, 40, 499]> var_171_cast_fp16 = conv(bias = const_24_to_fp16, dilations = input_53_dilations_0, groups = input_53_groups_0, pad = input_53_pad_0, pad_type = input_53_pad_type_0, strides = input_53_strides_0, weight = const_23_to_fp16, x = input_43_cast_fp16)[name = tensor<string, []>("op_171_cast_fp16")];
101
+ tensor<fp16, [1, 64, 40, 499]> input_55_cast_fp16 = add(x = out_7_cast_fp16, y = var_171_cast_fp16)[name = tensor<string, []>("input_55_cast_fp16")];
102
+ tensor<fp16, [1, 64, 40, 499]> input_57_cast_fp16 = relu(x = input_55_cast_fp16)[name = tensor<string, []>("input_57_cast_fp16")];
103
+ tensor<string, []> input_59_pad_type_0 = const()[name = tensor<string, []>("input_59_pad_type_0"), val = tensor<string, []>("custom")];
104
+ tensor<int32, [4]> input_59_pad_0 = const()[name = tensor<string, []>("input_59_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
105
+ tensor<int32, [2]> input_59_strides_0 = const()[name = tensor<string, []>("input_59_strides_0"), val = tensor<int32, [2]>([1, 1])];
106
+ tensor<int32, [2]> input_59_dilations_0 = const()[name = tensor<string, []>("input_59_dilations_0"), val = tensor<int32, [2]>([1, 1])];
107
+ tensor<int32, []> input_59_groups_0 = const()[name = tensor<string, []>("input_59_groups_0"), val = tensor<int32, []>(1)];
108
+ tensor<fp16, [64, 64, 3, 3]> const_25_to_fp16 = const()[name = tensor<string, []>("const_25_to_fp16"), val = tensor<fp16, [64, 64, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(228032)))];
109
+ tensor<fp16, [64]> const_26_to_fp16 = const()[name = tensor<string, []>("const_26_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(301824)))];
110
+ tensor<fp16, [1, 64, 40, 499]> input_61_cast_fp16 = conv(bias = const_26_to_fp16, dilations = input_59_dilations_0, groups = input_59_groups_0, pad = input_59_pad_0, pad_type = input_59_pad_type_0, strides = input_59_strides_0, weight = const_25_to_fp16, x = input_57_cast_fp16)[name = tensor<string, []>("input_61_cast_fp16")];
111
+ tensor<fp16, [1, 64, 40, 499]> input_63_cast_fp16 = relu(x = input_61_cast_fp16)[name = tensor<string, []>("input_63_cast_fp16")];
112
+ tensor<string, []> input_65_pad_type_0 = const()[name = tensor<string, []>("input_65_pad_type_0"), val = tensor<string, []>("custom")];
113
+ tensor<int32, [4]> input_65_pad_0 = const()[name = tensor<string, []>("input_65_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
114
+ tensor<int32, [2]> input_65_strides_0 = const()[name = tensor<string, []>("input_65_strides_0"), val = tensor<int32, [2]>([1, 1])];
115
+ tensor<int32, [2]> input_65_dilations_0 = const()[name = tensor<string, []>("input_65_dilations_0"), val = tensor<int32, [2]>([1, 1])];
116
+ tensor<int32, []> input_65_groups_0 = const()[name = tensor<string, []>("input_65_groups_0"), val = tensor<int32, []>(1)];
117
+ tensor<fp16, [64, 64, 3, 3]> const_27_to_fp16 = const()[name = tensor<string, []>("const_27_to_fp16"), val = tensor<fp16, [64, 64, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(302016)))];
118
+ tensor<fp16, [64]> const_28_to_fp16 = const()[name = tensor<string, []>("const_28_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(375808)))];
119
+ tensor<fp16, [1, 64, 40, 499]> out_9_cast_fp16 = conv(bias = const_28_to_fp16, dilations = input_65_dilations_0, groups = input_65_groups_0, pad = input_65_pad_0, pad_type = input_65_pad_type_0, strides = input_65_strides_0, weight = const_27_to_fp16, x = input_63_cast_fp16)[name = tensor<string, []>("out_9_cast_fp16")];
120
+ tensor<fp16, [1, 64, 40, 499]> input_67_cast_fp16 = add(x = out_9_cast_fp16, y = input_57_cast_fp16)[name = tensor<string, []>("input_67_cast_fp16")];
121
+ tensor<fp16, [1, 64, 40, 499]> input_69_cast_fp16 = relu(x = input_67_cast_fp16)[name = tensor<string, []>("input_69_cast_fp16")];
122
+ tensor<string, []> input_71_pad_type_0 = const()[name = tensor<string, []>("input_71_pad_type_0"), val = tensor<string, []>("custom")];
123
+ tensor<int32, [4]> input_71_pad_0 = const()[name = tensor<string, []>("input_71_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
124
+ tensor<int32, [2]> input_71_strides_0 = const()[name = tensor<string, []>("input_71_strides_0"), val = tensor<int32, [2]>([1, 1])];
125
+ tensor<int32, [2]> input_71_dilations_0 = const()[name = tensor<string, []>("input_71_dilations_0"), val = tensor<int32, [2]>([1, 1])];
126
+ tensor<int32, []> input_71_groups_0 = const()[name = tensor<string, []>("input_71_groups_0"), val = tensor<int32, []>(1)];
127
+ tensor<fp16, [64, 64, 3, 3]> const_29_to_fp16 = const()[name = tensor<string, []>("const_29_to_fp16"), val = tensor<fp16, [64, 64, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(376000)))];
128
+ tensor<fp16, [64]> const_30_to_fp16 = const()[name = tensor<string, []>("const_30_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(449792)))];
129
+ tensor<fp16, [1, 64, 40, 499]> input_73_cast_fp16 = conv(bias = const_30_to_fp16, dilations = input_71_dilations_0, groups = input_71_groups_0, pad = input_71_pad_0, pad_type = input_71_pad_type_0, strides = input_71_strides_0, weight = const_29_to_fp16, x = input_69_cast_fp16)[name = tensor<string, []>("input_73_cast_fp16")];
130
+ tensor<fp16, [1, 64, 40, 499]> input_75_cast_fp16 = relu(x = input_73_cast_fp16)[name = tensor<string, []>("input_75_cast_fp16")];
131
+ tensor<string, []> input_77_pad_type_0 = const()[name = tensor<string, []>("input_77_pad_type_0"), val = tensor<string, []>("custom")];
132
+ tensor<int32, [4]> input_77_pad_0 = const()[name = tensor<string, []>("input_77_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
133
+ tensor<int32, [2]> input_77_strides_0 = const()[name = tensor<string, []>("input_77_strides_0"), val = tensor<int32, [2]>([1, 1])];
134
+ tensor<int32, [2]> input_77_dilations_0 = const()[name = tensor<string, []>("input_77_dilations_0"), val = tensor<int32, [2]>([1, 1])];
135
+ tensor<int32, []> input_77_groups_0 = const()[name = tensor<string, []>("input_77_groups_0"), val = tensor<int32, []>(1)];
136
+ tensor<fp16, [64, 64, 3, 3]> const_31_to_fp16 = const()[name = tensor<string, []>("const_31_to_fp16"), val = tensor<fp16, [64, 64, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(449984)))];
137
+ tensor<fp16, [64]> const_32_to_fp16 = const()[name = tensor<string, []>("const_32_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(523776)))];
138
+ tensor<fp16, [1, 64, 40, 499]> out_11_cast_fp16 = conv(bias = const_32_to_fp16, dilations = input_77_dilations_0, groups = input_77_groups_0, pad = input_77_pad_0, pad_type = input_77_pad_type_0, strides = input_77_strides_0, weight = const_31_to_fp16, x = input_75_cast_fp16)[name = tensor<string, []>("out_11_cast_fp16")];
139
+ tensor<fp16, [1, 64, 40, 499]> input_79_cast_fp16 = add(x = out_11_cast_fp16, y = input_69_cast_fp16)[name = tensor<string, []>("input_79_cast_fp16")];
140
+ tensor<fp16, [1, 64, 40, 499]> input_81_cast_fp16 = relu(x = input_79_cast_fp16)[name = tensor<string, []>("input_81_cast_fp16")];
141
+ tensor<string, []> input_83_pad_type_0 = const()[name = tensor<string, []>("input_83_pad_type_0"), val = tensor<string, []>("custom")];
142
+ tensor<int32, [4]> input_83_pad_0 = const()[name = tensor<string, []>("input_83_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
143
+ tensor<int32, [2]> input_83_strides_0 = const()[name = tensor<string, []>("input_83_strides_0"), val = tensor<int32, [2]>([1, 1])];
144
+ tensor<int32, [2]> input_83_dilations_0 = const()[name = tensor<string, []>("input_83_dilations_0"), val = tensor<int32, [2]>([1, 1])];
145
+ tensor<int32, []> input_83_groups_0 = const()[name = tensor<string, []>("input_83_groups_0"), val = tensor<int32, []>(1)];
146
+ tensor<fp16, [64, 64, 3, 3]> const_33_to_fp16 = const()[name = tensor<string, []>("const_33_to_fp16"), val = tensor<fp16, [64, 64, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(523968)))];
147
+ tensor<fp16, [64]> const_34_to_fp16 = const()[name = tensor<string, []>("const_34_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(597760)))];
148
+ tensor<fp16, [1, 64, 40, 499]> input_85_cast_fp16 = conv(bias = const_34_to_fp16, dilations = input_83_dilations_0, groups = input_83_groups_0, pad = input_83_pad_0, pad_type = input_83_pad_type_0, strides = input_83_strides_0, weight = const_33_to_fp16, x = input_81_cast_fp16)[name = tensor<string, []>("input_85_cast_fp16")];
149
+ tensor<fp16, [1, 64, 40, 499]> input_87_cast_fp16 = relu(x = input_85_cast_fp16)[name = tensor<string, []>("input_87_cast_fp16")];
150
+ tensor<string, []> input_89_pad_type_0 = const()[name = tensor<string, []>("input_89_pad_type_0"), val = tensor<string, []>("custom")];
151
+ tensor<int32, [4]> input_89_pad_0 = const()[name = tensor<string, []>("input_89_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
152
+ tensor<int32, [2]> input_89_strides_0 = const()[name = tensor<string, []>("input_89_strides_0"), val = tensor<int32, [2]>([1, 1])];
153
+ tensor<int32, [2]> input_89_dilations_0 = const()[name = tensor<string, []>("input_89_dilations_0"), val = tensor<int32, [2]>([1, 1])];
154
+ tensor<int32, []> input_89_groups_0 = const()[name = tensor<string, []>("input_89_groups_0"), val = tensor<int32, []>(1)];
155
+ tensor<fp16, [64, 64, 3, 3]> const_35_to_fp16 = const()[name = tensor<string, []>("const_35_to_fp16"), val = tensor<fp16, [64, 64, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(597952)))];
156
+ tensor<fp16, [64]> const_36_to_fp16 = const()[name = tensor<string, []>("const_36_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(671744)))];
157
+ tensor<fp16, [1, 64, 40, 499]> out_13_cast_fp16 = conv(bias = const_36_to_fp16, dilations = input_89_dilations_0, groups = input_89_groups_0, pad = input_89_pad_0, pad_type = input_89_pad_type_0, strides = input_89_strides_0, weight = const_35_to_fp16, x = input_87_cast_fp16)[name = tensor<string, []>("out_13_cast_fp16")];
158
+ tensor<fp16, [1, 64, 40, 499]> input_91_cast_fp16 = add(x = out_13_cast_fp16, y = input_81_cast_fp16)[name = tensor<string, []>("input_91_cast_fp16")];
159
+ tensor<fp16, [1, 64, 40, 499]> input_93_cast_fp16 = relu(x = input_91_cast_fp16)[name = tensor<string, []>("input_93_cast_fp16")];
160
+ tensor<string, []> input_95_pad_type_0 = const()[name = tensor<string, []>("input_95_pad_type_0"), val = tensor<string, []>("custom")];
161
+ tensor<int32, [4]> input_95_pad_0 = const()[name = tensor<string, []>("input_95_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
162
+ tensor<int32, [2]> input_95_strides_0 = const()[name = tensor<string, []>("input_95_strides_0"), val = tensor<int32, [2]>([2, 2])];
163
+ tensor<int32, [2]> input_95_dilations_0 = const()[name = tensor<string, []>("input_95_dilations_0"), val = tensor<int32, [2]>([1, 1])];
164
+ tensor<int32, []> input_95_groups_0 = const()[name = tensor<string, []>("input_95_groups_0"), val = tensor<int32, []>(1)];
165
+ tensor<fp16, [128, 64, 3, 3]> const_37_to_fp16 = const()[name = tensor<string, []>("const_37_to_fp16"), val = tensor<fp16, [128, 64, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(671936)))];
166
+ tensor<fp16, [128]> const_38_to_fp16 = const()[name = tensor<string, []>("const_38_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(819456)))];
167
+ tensor<fp16, [1, 128, 20, 250]> input_97_cast_fp16 = conv(bias = const_38_to_fp16, dilations = input_95_dilations_0, groups = input_95_groups_0, pad = input_95_pad_0, pad_type = input_95_pad_type_0, strides = input_95_strides_0, weight = const_37_to_fp16, x = input_93_cast_fp16)[name = tensor<string, []>("input_97_cast_fp16")];
168
+ tensor<fp16, [1, 128, 20, 250]> input_99_cast_fp16 = relu(x = input_97_cast_fp16)[name = tensor<string, []>("input_99_cast_fp16")];
169
+ tensor<string, []> input_101_pad_type_0 = const()[name = tensor<string, []>("input_101_pad_type_0"), val = tensor<string, []>("custom")];
170
+ tensor<int32, [4]> input_101_pad_0 = const()[name = tensor<string, []>("input_101_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
171
+ tensor<int32, [2]> input_101_strides_0 = const()[name = tensor<string, []>("input_101_strides_0"), val = tensor<int32, [2]>([1, 1])];
172
+ tensor<int32, [2]> input_101_dilations_0 = const()[name = tensor<string, []>("input_101_dilations_0"), val = tensor<int32, [2]>([1, 1])];
173
+ tensor<int32, []> input_101_groups_0 = const()[name = tensor<string, []>("input_101_groups_0"), val = tensor<int32, []>(1)];
174
+ tensor<fp16, [128, 128, 3, 3]> const_39_to_fp16 = const()[name = tensor<string, []>("const_39_to_fp16"), val = tensor<fp16, [128, 128, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(819776)))];
175
+ tensor<fp16, [128]> const_40_to_fp16 = const()[name = tensor<string, []>("const_40_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1114752)))];
176
+ tensor<fp16, [1, 128, 20, 250]> out_15_cast_fp16 = conv(bias = const_40_to_fp16, dilations = input_101_dilations_0, groups = input_101_groups_0, pad = input_101_pad_0, pad_type = input_101_pad_type_0, strides = input_101_strides_0, weight = const_39_to_fp16, x = input_99_cast_fp16)[name = tensor<string, []>("out_15_cast_fp16")];
177
+ tensor<string, []> input_103_pad_type_0 = const()[name = tensor<string, []>("input_103_pad_type_0"), val = tensor<string, []>("valid")];
178
+ tensor<int32, [2]> input_103_strides_0 = const()[name = tensor<string, []>("input_103_strides_0"), val = tensor<int32, [2]>([2, 2])];
179
+ tensor<int32, [4]> input_103_pad_0 = const()[name = tensor<string, []>("input_103_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
180
+ tensor<int32, [2]> input_103_dilations_0 = const()[name = tensor<string, []>("input_103_dilations_0"), val = tensor<int32, [2]>([1, 1])];
181
+ tensor<int32, []> input_103_groups_0 = const()[name = tensor<string, []>("input_103_groups_0"), val = tensor<int32, []>(1)];
182
+ tensor<fp16, [128, 64, 1, 1]> const_41_to_fp16 = const()[name = tensor<string, []>("const_41_to_fp16"), val = tensor<fp16, [128, 64, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1115072)))];
183
+ tensor<fp16, [128]> const_42_to_fp16 = const()[name = tensor<string, []>("const_42_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1131520)))];
184
+ tensor<fp16, [1, 128, 20, 250]> var_307_cast_fp16 = conv(bias = const_42_to_fp16, dilations = input_103_dilations_0, groups = input_103_groups_0, pad = input_103_pad_0, pad_type = input_103_pad_type_0, strides = input_103_strides_0, weight = const_41_to_fp16, x = input_93_cast_fp16)[name = tensor<string, []>("op_307_cast_fp16")];
185
+ tensor<fp16, [1, 128, 20, 250]> input_105_cast_fp16 = add(x = out_15_cast_fp16, y = var_307_cast_fp16)[name = tensor<string, []>("input_105_cast_fp16")];
186
+ tensor<fp16, [1, 128, 20, 250]> input_107_cast_fp16 = relu(x = input_105_cast_fp16)[name = tensor<string, []>("input_107_cast_fp16")];
187
+ tensor<string, []> input_109_pad_type_0 = const()[name = tensor<string, []>("input_109_pad_type_0"), val = tensor<string, []>("custom")];
188
+ tensor<int32, [4]> input_109_pad_0 = const()[name = tensor<string, []>("input_109_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
189
+ tensor<int32, [2]> input_109_strides_0 = const()[name = tensor<string, []>("input_109_strides_0"), val = tensor<int32, [2]>([1, 1])];
190
+ tensor<int32, [2]> input_109_dilations_0 = const()[name = tensor<string, []>("input_109_dilations_0"), val = tensor<int32, [2]>([1, 1])];
191
+ tensor<int32, []> input_109_groups_0 = const()[name = tensor<string, []>("input_109_groups_0"), val = tensor<int32, []>(1)];
192
+ tensor<fp16, [128, 128, 3, 3]> const_43_to_fp16 = const()[name = tensor<string, []>("const_43_to_fp16"), val = tensor<fp16, [128, 128, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1131840)))];
193
+ tensor<fp16, [128]> const_44_to_fp16 = const()[name = tensor<string, []>("const_44_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1426816)))];
194
+ tensor<fp16, [1, 128, 20, 250]> input_111_cast_fp16 = conv(bias = const_44_to_fp16, dilations = input_109_dilations_0, groups = input_109_groups_0, pad = input_109_pad_0, pad_type = input_109_pad_type_0, strides = input_109_strides_0, weight = const_43_to_fp16, x = input_107_cast_fp16)[name = tensor<string, []>("input_111_cast_fp16")];
195
+ tensor<fp16, [1, 128, 20, 250]> input_113_cast_fp16 = relu(x = input_111_cast_fp16)[name = tensor<string, []>("input_113_cast_fp16")];
196
+ tensor<string, []> input_115_pad_type_0 = const()[name = tensor<string, []>("input_115_pad_type_0"), val = tensor<string, []>("custom")];
197
+ tensor<int32, [4]> input_115_pad_0 = const()[name = tensor<string, []>("input_115_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
198
+ tensor<int32, [2]> input_115_strides_0 = const()[name = tensor<string, []>("input_115_strides_0"), val = tensor<int32, [2]>([1, 1])];
199
+ tensor<int32, [2]> input_115_dilations_0 = const()[name = tensor<string, []>("input_115_dilations_0"), val = tensor<int32, [2]>([1, 1])];
200
+ tensor<int32, []> input_115_groups_0 = const()[name = tensor<string, []>("input_115_groups_0"), val = tensor<int32, []>(1)];
201
+ tensor<fp16, [128, 128, 3, 3]> const_45_to_fp16 = const()[name = tensor<string, []>("const_45_to_fp16"), val = tensor<fp16, [128, 128, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1427136)))];
202
+ tensor<fp16, [128]> const_46_to_fp16 = const()[name = tensor<string, []>("const_46_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1722112)))];
203
+ tensor<fp16, [1, 128, 20, 250]> out_17_cast_fp16 = conv(bias = const_46_to_fp16, dilations = input_115_dilations_0, groups = input_115_groups_0, pad = input_115_pad_0, pad_type = input_115_pad_type_0, strides = input_115_strides_0, weight = const_45_to_fp16, x = input_113_cast_fp16)[name = tensor<string, []>("out_17_cast_fp16")];
204
+ tensor<fp16, [1, 128, 20, 250]> input_117_cast_fp16 = add(x = out_17_cast_fp16, y = input_107_cast_fp16)[name = tensor<string, []>("input_117_cast_fp16")];
205
+ tensor<fp16, [1, 128, 20, 250]> input_119_cast_fp16 = relu(x = input_117_cast_fp16)[name = tensor<string, []>("input_119_cast_fp16")];
206
+ tensor<string, []> input_121_pad_type_0 = const()[name = tensor<string, []>("input_121_pad_type_0"), val = tensor<string, []>("custom")];
207
+ tensor<int32, [4]> input_121_pad_0 = const()[name = tensor<string, []>("input_121_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
208
+ tensor<int32, [2]> input_121_strides_0 = const()[name = tensor<string, []>("input_121_strides_0"), val = tensor<int32, [2]>([1, 1])];
209
+ tensor<int32, [2]> input_121_dilations_0 = const()[name = tensor<string, []>("input_121_dilations_0"), val = tensor<int32, [2]>([1, 1])];
210
+ tensor<int32, []> input_121_groups_0 = const()[name = tensor<string, []>("input_121_groups_0"), val = tensor<int32, []>(1)];
211
+ tensor<fp16, [128, 128, 3, 3]> const_47_to_fp16 = const()[name = tensor<string, []>("const_47_to_fp16"), val = tensor<fp16, [128, 128, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1722432)))];
212
+ tensor<fp16, [128]> const_48_to_fp16 = const()[name = tensor<string, []>("const_48_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2017408)))];
213
+ tensor<fp16, [1, 128, 20, 250]> input_123_cast_fp16 = conv(bias = const_48_to_fp16, dilations = input_121_dilations_0, groups = input_121_groups_0, pad = input_121_pad_0, pad_type = input_121_pad_type_0, strides = input_121_strides_0, weight = const_47_to_fp16, x = input_119_cast_fp16)[name = tensor<string, []>("input_123_cast_fp16")];
214
+ tensor<fp16, [1, 128, 20, 250]> input_125_cast_fp16 = relu(x = input_123_cast_fp16)[name = tensor<string, []>("input_125_cast_fp16")];
215
+ tensor<string, []> input_127_pad_type_0 = const()[name = tensor<string, []>("input_127_pad_type_0"), val = tensor<string, []>("custom")];
216
+ tensor<int32, [4]> input_127_pad_0 = const()[name = tensor<string, []>("input_127_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
217
+ tensor<int32, [2]> input_127_strides_0 = const()[name = tensor<string, []>("input_127_strides_0"), val = tensor<int32, [2]>([1, 1])];
218
+ tensor<int32, [2]> input_127_dilations_0 = const()[name = tensor<string, []>("input_127_dilations_0"), val = tensor<int32, [2]>([1, 1])];
219
+ tensor<int32, []> input_127_groups_0 = const()[name = tensor<string, []>("input_127_groups_0"), val = tensor<int32, []>(1)];
220
+ tensor<fp16, [128, 128, 3, 3]> const_49_to_fp16 = const()[name = tensor<string, []>("const_49_to_fp16"), val = tensor<fp16, [128, 128, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2017728)))];
221
+ tensor<fp16, [128]> const_50_to_fp16 = const()[name = tensor<string, []>("const_50_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2312704)))];
222
+ tensor<fp16, [1, 128, 20, 250]> out_19_cast_fp16 = conv(bias = const_50_to_fp16, dilations = input_127_dilations_0, groups = input_127_groups_0, pad = input_127_pad_0, pad_type = input_127_pad_type_0, strides = input_127_strides_0, weight = const_49_to_fp16, x = input_125_cast_fp16)[name = tensor<string, []>("out_19_cast_fp16")];
223
+ tensor<fp16, [1, 128, 20, 250]> input_129_cast_fp16 = add(x = out_19_cast_fp16, y = input_119_cast_fp16)[name = tensor<string, []>("input_129_cast_fp16")];
224
+ tensor<fp16, [1, 128, 20, 250]> input_131_cast_fp16 = relu(x = input_129_cast_fp16)[name = tensor<string, []>("input_131_cast_fp16")];
225
+ tensor<string, []> input_133_pad_type_0 = const()[name = tensor<string, []>("input_133_pad_type_0"), val = tensor<string, []>("custom")];
226
+ tensor<int32, [4]> input_133_pad_0 = const()[name = tensor<string, []>("input_133_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
227
+ tensor<int32, [2]> input_133_strides_0 = const()[name = tensor<string, []>("input_133_strides_0"), val = tensor<int32, [2]>([1, 1])];
228
+ tensor<int32, [2]> input_133_dilations_0 = const()[name = tensor<string, []>("input_133_dilations_0"), val = tensor<int32, [2]>([1, 1])];
229
+ tensor<int32, []> input_133_groups_0 = const()[name = tensor<string, []>("input_133_groups_0"), val = tensor<int32, []>(1)];
230
+ tensor<fp16, [128, 128, 3, 3]> const_51_to_fp16 = const()[name = tensor<string, []>("const_51_to_fp16"), val = tensor<fp16, [128, 128, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2313024)))];
231
+ tensor<fp16, [128]> const_52_to_fp16 = const()[name = tensor<string, []>("const_52_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2608000)))];
232
+ tensor<fp16, [1, 128, 20, 250]> input_135_cast_fp16 = conv(bias = const_52_to_fp16, dilations = input_133_dilations_0, groups = input_133_groups_0, pad = input_133_pad_0, pad_type = input_133_pad_type_0, strides = input_133_strides_0, weight = const_51_to_fp16, x = input_131_cast_fp16)[name = tensor<string, []>("input_135_cast_fp16")];
233
+ tensor<fp16, [1, 128, 20, 250]> input_137_cast_fp16 = relu(x = input_135_cast_fp16)[name = tensor<string, []>("input_137_cast_fp16")];
234
+ tensor<string, []> input_139_pad_type_0 = const()[name = tensor<string, []>("input_139_pad_type_0"), val = tensor<string, []>("custom")];
235
+ tensor<int32, [4]> input_139_pad_0 = const()[name = tensor<string, []>("input_139_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
236
+ tensor<int32, [2]> input_139_strides_0 = const()[name = tensor<string, []>("input_139_strides_0"), val = tensor<int32, [2]>([1, 1])];
237
+ tensor<int32, [2]> input_139_dilations_0 = const()[name = tensor<string, []>("input_139_dilations_0"), val = tensor<int32, [2]>([1, 1])];
238
+ tensor<int32, []> input_139_groups_0 = const()[name = tensor<string, []>("input_139_groups_0"), val = tensor<int32, []>(1)];
239
+ tensor<fp16, [128, 128, 3, 3]> const_53_to_fp16 = const()[name = tensor<string, []>("const_53_to_fp16"), val = tensor<fp16, [128, 128, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2608320)))];
240
+ tensor<fp16, [128]> const_54_to_fp16 = const()[name = tensor<string, []>("const_54_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2903296)))];
241
+ tensor<fp16, [1, 128, 20, 250]> out_21_cast_fp16 = conv(bias = const_54_to_fp16, dilations = input_139_dilations_0, groups = input_139_groups_0, pad = input_139_pad_0, pad_type = input_139_pad_type_0, strides = input_139_strides_0, weight = const_53_to_fp16, x = input_137_cast_fp16)[name = tensor<string, []>("out_21_cast_fp16")];
242
+ tensor<fp16, [1, 128, 20, 250]> input_141_cast_fp16 = add(x = out_21_cast_fp16, y = input_131_cast_fp16)[name = tensor<string, []>("input_141_cast_fp16")];
243
+ tensor<fp16, [1, 128, 20, 250]> input_143_cast_fp16 = relu(x = input_141_cast_fp16)[name = tensor<string, []>("input_143_cast_fp16")];
244
+ tensor<string, []> input_145_pad_type_0 = const()[name = tensor<string, []>("input_145_pad_type_0"), val = tensor<string, []>("custom")];
245
+ tensor<int32, [4]> input_145_pad_0 = const()[name = tensor<string, []>("input_145_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
246
+ tensor<int32, [2]> input_145_strides_0 = const()[name = tensor<string, []>("input_145_strides_0"), val = tensor<int32, [2]>([1, 1])];
247
+ tensor<int32, [2]> input_145_dilations_0 = const()[name = tensor<string, []>("input_145_dilations_0"), val = tensor<int32, [2]>([1, 1])];
248
+ tensor<int32, []> input_145_groups_0 = const()[name = tensor<string, []>("input_145_groups_0"), val = tensor<int32, []>(1)];
249
+ tensor<fp16, [128, 128, 3, 3]> const_55_to_fp16 = const()[name = tensor<string, []>("const_55_to_fp16"), val = tensor<fp16, [128, 128, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2903616)))];
250
+ tensor<fp16, [128]> const_56_to_fp16 = const()[name = tensor<string, []>("const_56_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3198592)))];
251
+ tensor<fp16, [1, 128, 20, 250]> input_147_cast_fp16 = conv(bias = const_56_to_fp16, dilations = input_145_dilations_0, groups = input_145_groups_0, pad = input_145_pad_0, pad_type = input_145_pad_type_0, strides = input_145_strides_0, weight = const_55_to_fp16, x = input_143_cast_fp16)[name = tensor<string, []>("input_147_cast_fp16")];
252
+ tensor<fp16, [1, 128, 20, 250]> input_149_cast_fp16 = relu(x = input_147_cast_fp16)[name = tensor<string, []>("input_149_cast_fp16")];
253
+ tensor<string, []> input_151_pad_type_0 = const()[name = tensor<string, []>("input_151_pad_type_0"), val = tensor<string, []>("custom")];
254
+ tensor<int32, [4]> input_151_pad_0 = const()[name = tensor<string, []>("input_151_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
255
+ tensor<int32, [2]> input_151_strides_0 = const()[name = tensor<string, []>("input_151_strides_0"), val = tensor<int32, [2]>([1, 1])];
256
+ tensor<int32, [2]> input_151_dilations_0 = const()[name = tensor<string, []>("input_151_dilations_0"), val = tensor<int32, [2]>([1, 1])];
257
+ tensor<int32, []> input_151_groups_0 = const()[name = tensor<string, []>("input_151_groups_0"), val = tensor<int32, []>(1)];
258
+ tensor<fp16, [128, 128, 3, 3]> const_57_to_fp16 = const()[name = tensor<string, []>("const_57_to_fp16"), val = tensor<fp16, [128, 128, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3198912)))];
259
+ tensor<fp16, [128]> const_58_to_fp16 = const()[name = tensor<string, []>("const_58_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3493888)))];
260
+ tensor<fp16, [1, 128, 20, 250]> out_23_cast_fp16 = conv(bias = const_58_to_fp16, dilations = input_151_dilations_0, groups = input_151_groups_0, pad = input_151_pad_0, pad_type = input_151_pad_type_0, strides = input_151_strides_0, weight = const_57_to_fp16, x = input_149_cast_fp16)[name = tensor<string, []>("out_23_cast_fp16")];
261
+ tensor<fp16, [1, 128, 20, 250]> input_153_cast_fp16 = add(x = out_23_cast_fp16, y = input_143_cast_fp16)[name = tensor<string, []>("input_153_cast_fp16")];
262
+ tensor<fp16, [1, 128, 20, 250]> input_155_cast_fp16 = relu(x = input_153_cast_fp16)[name = tensor<string, []>("input_155_cast_fp16")];
263
+ tensor<string, []> input_157_pad_type_0 = const()[name = tensor<string, []>("input_157_pad_type_0"), val = tensor<string, []>("custom")];
264
+ tensor<int32, [4]> input_157_pad_0 = const()[name = tensor<string, []>("input_157_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
265
+ tensor<int32, [2]> input_157_strides_0 = const()[name = tensor<string, []>("input_157_strides_0"), val = tensor<int32, [2]>([1, 1])];
266
+ tensor<int32, [2]> input_157_dilations_0 = const()[name = tensor<string, []>("input_157_dilations_0"), val = tensor<int32, [2]>([1, 1])];
267
+ tensor<int32, []> input_157_groups_0 = const()[name = tensor<string, []>("input_157_groups_0"), val = tensor<int32, []>(1)];
268
+ tensor<fp16, [128, 128, 3, 3]> const_59_to_fp16 = const()[name = tensor<string, []>("const_59_to_fp16"), val = tensor<fp16, [128, 128, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3494208)))];
269
+ tensor<fp16, [128]> const_60_to_fp16 = const()[name = tensor<string, []>("const_60_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3789184)))];
270
+ tensor<fp16, [1, 128, 20, 250]> input_159_cast_fp16 = conv(bias = const_60_to_fp16, dilations = input_157_dilations_0, groups = input_157_groups_0, pad = input_157_pad_0, pad_type = input_157_pad_type_0, strides = input_157_strides_0, weight = const_59_to_fp16, x = input_155_cast_fp16)[name = tensor<string, []>("input_159_cast_fp16")];
271
+ tensor<fp16, [1, 128, 20, 250]> input_161_cast_fp16 = relu(x = input_159_cast_fp16)[name = tensor<string, []>("input_161_cast_fp16")];
272
+ tensor<string, []> input_163_pad_type_0 = const()[name = tensor<string, []>("input_163_pad_type_0"), val = tensor<string, []>("custom")];
273
+ tensor<int32, [4]> input_163_pad_0 = const()[name = tensor<string, []>("input_163_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
274
+ tensor<int32, [2]> input_163_strides_0 = const()[name = tensor<string, []>("input_163_strides_0"), val = tensor<int32, [2]>([1, 1])];
275
+ tensor<int32, [2]> input_163_dilations_0 = const()[name = tensor<string, []>("input_163_dilations_0"), val = tensor<int32, [2]>([1, 1])];
276
+ tensor<int32, []> input_163_groups_0 = const()[name = tensor<string, []>("input_163_groups_0"), val = tensor<int32, []>(1)];
277
+ tensor<fp16, [128, 128, 3, 3]> const_61_to_fp16 = const()[name = tensor<string, []>("const_61_to_fp16"), val = tensor<fp16, [128, 128, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3789504)))];
278
+ tensor<fp16, [128]> const_62_to_fp16 = const()[name = tensor<string, []>("const_62_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4084480)))];
279
+ tensor<fp16, [1, 128, 20, 250]> out_25_cast_fp16 = conv(bias = const_62_to_fp16, dilations = input_163_dilations_0, groups = input_163_groups_0, pad = input_163_pad_0, pad_type = input_163_pad_type_0, strides = input_163_strides_0, weight = const_61_to_fp16, x = input_161_cast_fp16)[name = tensor<string, []>("out_25_cast_fp16")];
280
+ tensor<fp16, [1, 128, 20, 250]> input_165_cast_fp16 = add(x = out_25_cast_fp16, y = input_155_cast_fp16)[name = tensor<string, []>("input_165_cast_fp16")];
281
+ tensor<fp16, [1, 128, 20, 250]> input_167_cast_fp16 = relu(x = input_165_cast_fp16)[name = tensor<string, []>("input_167_cast_fp16")];
282
+ tensor<string, []> input_169_pad_type_0 = const()[name = tensor<string, []>("input_169_pad_type_0"), val = tensor<string, []>("custom")];
283
+ tensor<int32, [4]> input_169_pad_0 = const()[name = tensor<string, []>("input_169_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
284
+ tensor<int32, [2]> input_169_strides_0 = const()[name = tensor<string, []>("input_169_strides_0"), val = tensor<int32, [2]>([2, 2])];
285
+ tensor<int32, [2]> input_169_dilations_0 = const()[name = tensor<string, []>("input_169_dilations_0"), val = tensor<int32, [2]>([1, 1])];
286
+ tensor<int32, []> input_169_groups_0 = const()[name = tensor<string, []>("input_169_groups_0"), val = tensor<int32, []>(1)];
287
+ tensor<fp16, [256, 128, 3, 3]> const_63_to_fp16 = const()[name = tensor<string, []>("const_63_to_fp16"), val = tensor<fp16, [256, 128, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4084800)))];
288
+ tensor<fp16, [256]> const_64_to_fp16 = const()[name = tensor<string, []>("const_64_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4674688)))];
289
+ tensor<fp16, [1, 256, 10, 125]> input_171_cast_fp16 = conv(bias = const_64_to_fp16, dilations = input_169_dilations_0, groups = input_169_groups_0, pad = input_169_pad_0, pad_type = input_169_pad_type_0, strides = input_169_strides_0, weight = const_63_to_fp16, x = input_167_cast_fp16)[name = tensor<string, []>("input_171_cast_fp16")];
290
+ tensor<fp16, [1, 256, 10, 125]> input_173_cast_fp16 = relu(x = input_171_cast_fp16)[name = tensor<string, []>("input_173_cast_fp16")];
291
+ tensor<string, []> input_175_pad_type_0 = const()[name = tensor<string, []>("input_175_pad_type_0"), val = tensor<string, []>("custom")];
292
+ tensor<int32, [4]> input_175_pad_0 = const()[name = tensor<string, []>("input_175_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
293
+ tensor<int32, [2]> input_175_strides_0 = const()[name = tensor<string, []>("input_175_strides_0"), val = tensor<int32, [2]>([1, 1])];
294
+ tensor<int32, [2]> input_175_dilations_0 = const()[name = tensor<string, []>("input_175_dilations_0"), val = tensor<int32, [2]>([1, 1])];
295
+ tensor<int32, []> input_175_groups_0 = const()[name = tensor<string, []>("input_175_groups_0"), val = tensor<int32, []>(1)];
296
+ tensor<fp16, [256, 256, 3, 3]> const_65_to_fp16 = const()[name = tensor<string, []>("const_65_to_fp16"), val = tensor<fp16, [256, 256, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4675264)))];
297
+ tensor<fp16, [256]> const_66_to_fp16 = const()[name = tensor<string, []>("const_66_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5854976)))];
298
+ tensor<fp16, [1, 256, 10, 125]> out_27_cast_fp16 = conv(bias = const_66_to_fp16, dilations = input_175_dilations_0, groups = input_175_groups_0, pad = input_175_pad_0, pad_type = input_175_pad_type_0, strides = input_175_strides_0, weight = const_65_to_fp16, x = input_173_cast_fp16)[name = tensor<string, []>("out_27_cast_fp16")];
299
+ tensor<string, []> input_177_pad_type_0 = const()[name = tensor<string, []>("input_177_pad_type_0"), val = tensor<string, []>("valid")];
300
+ tensor<int32, [2]> input_177_strides_0 = const()[name = tensor<string, []>("input_177_strides_0"), val = tensor<int32, [2]>([2, 2])];
301
+ tensor<int32, [4]> input_177_pad_0 = const()[name = tensor<string, []>("input_177_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
302
+ tensor<int32, [2]> input_177_dilations_0 = const()[name = tensor<string, []>("input_177_dilations_0"), val = tensor<int32, [2]>([1, 1])];
303
+ tensor<int32, []> input_177_groups_0 = const()[name = tensor<string, []>("input_177_groups_0"), val = tensor<int32, []>(1)];
304
+ tensor<fp16, [256, 128, 1, 1]> const_67_to_fp16 = const()[name = tensor<string, []>("const_67_to_fp16"), val = tensor<fp16, [256, 128, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5855552)))];
305
+ tensor<fp16, [256]> const_68_to_fp16 = const()[name = tensor<string, []>("const_68_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5921152)))];
306
+ tensor<fp16, [1, 256, 10, 125]> var_498_cast_fp16 = conv(bias = const_68_to_fp16, dilations = input_177_dilations_0, groups = input_177_groups_0, pad = input_177_pad_0, pad_type = input_177_pad_type_0, strides = input_177_strides_0, weight = const_67_to_fp16, x = input_167_cast_fp16)[name = tensor<string, []>("op_498_cast_fp16")];
307
+ tensor<fp16, [1, 256, 10, 125]> input_179_cast_fp16 = add(x = out_27_cast_fp16, y = var_498_cast_fp16)[name = tensor<string, []>("input_179_cast_fp16")];
308
+ tensor<fp16, [1, 256, 10, 125]> input_181_cast_fp16 = relu(x = input_179_cast_fp16)[name = tensor<string, []>("input_181_cast_fp16")];
309
+ tensor<string, []> input_183_pad_type_0 = const()[name = tensor<string, []>("input_183_pad_type_0"), val = tensor<string, []>("custom")];
310
+ tensor<int32, [4]> input_183_pad_0 = const()[name = tensor<string, []>("input_183_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
311
+ tensor<int32, [2]> input_183_strides_0 = const()[name = tensor<string, []>("input_183_strides_0"), val = tensor<int32, [2]>([1, 1])];
312
+ tensor<int32, [2]> input_183_dilations_0 = const()[name = tensor<string, []>("input_183_dilations_0"), val = tensor<int32, [2]>([1, 1])];
313
+ tensor<int32, []> input_183_groups_0 = const()[name = tensor<string, []>("input_183_groups_0"), val = tensor<int32, []>(1)];
314
+ tensor<fp16, [256, 256, 3, 3]> const_69_to_fp16 = const()[name = tensor<string, []>("const_69_to_fp16"), val = tensor<fp16, [256, 256, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5921728)))];
315
+ tensor<fp16, [256]> const_70_to_fp16 = const()[name = tensor<string, []>("const_70_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(7101440)))];
316
+ tensor<fp16, [1, 256, 10, 125]> input_185_cast_fp16 = conv(bias = const_70_to_fp16, dilations = input_183_dilations_0, groups = input_183_groups_0, pad = input_183_pad_0, pad_type = input_183_pad_type_0, strides = input_183_strides_0, weight = const_69_to_fp16, x = input_181_cast_fp16)[name = tensor<string, []>("input_185_cast_fp16")];
317
+ tensor<fp16, [1, 256, 10, 125]> input_187_cast_fp16 = relu(x = input_185_cast_fp16)[name = tensor<string, []>("input_187_cast_fp16")];
318
+ tensor<string, []> input_189_pad_type_0 = const()[name = tensor<string, []>("input_189_pad_type_0"), val = tensor<string, []>("custom")];
319
+ tensor<int32, [4]> input_189_pad_0 = const()[name = tensor<string, []>("input_189_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
320
+ tensor<int32, [2]> input_189_strides_0 = const()[name = tensor<string, []>("input_189_strides_0"), val = tensor<int32, [2]>([1, 1])];
321
+ tensor<int32, [2]> input_189_dilations_0 = const()[name = tensor<string, []>("input_189_dilations_0"), val = tensor<int32, [2]>([1, 1])];
322
+ tensor<int32, []> input_189_groups_0 = const()[name = tensor<string, []>("input_189_groups_0"), val = tensor<int32, []>(1)];
323
+ tensor<fp16, [256, 256, 3, 3]> const_71_to_fp16 = const()[name = tensor<string, []>("const_71_to_fp16"), val = tensor<fp16, [256, 256, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(7102016)))];
324
+ tensor<fp16, [256]> const_72_to_fp16 = const()[name = tensor<string, []>("const_72_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8281728)))];
325
+ tensor<fp16, [1, 256, 10, 125]> out_29_cast_fp16 = conv(bias = const_72_to_fp16, dilations = input_189_dilations_0, groups = input_189_groups_0, pad = input_189_pad_0, pad_type = input_189_pad_type_0, strides = input_189_strides_0, weight = const_71_to_fp16, x = input_187_cast_fp16)[name = tensor<string, []>("out_29_cast_fp16")];
326
+ tensor<fp16, [1, 256, 10, 125]> input_191_cast_fp16 = add(x = out_29_cast_fp16, y = input_181_cast_fp16)[name = tensor<string, []>("input_191_cast_fp16")];
327
+ tensor<fp16, [1, 256, 10, 125]> input_193_cast_fp16 = relu(x = input_191_cast_fp16)[name = tensor<string, []>("input_193_cast_fp16")];
328
+ tensor<string, []> input_195_pad_type_0 = const()[name = tensor<string, []>("input_195_pad_type_0"), val = tensor<string, []>("custom")];
329
+ tensor<int32, [4]> input_195_pad_0 = const()[name = tensor<string, []>("input_195_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
330
+ tensor<int32, [2]> input_195_strides_0 = const()[name = tensor<string, []>("input_195_strides_0"), val = tensor<int32, [2]>([1, 1])];
331
+ tensor<int32, [2]> input_195_dilations_0 = const()[name = tensor<string, []>("input_195_dilations_0"), val = tensor<int32, [2]>([1, 1])];
332
+ tensor<int32, []> input_195_groups_0 = const()[name = tensor<string, []>("input_195_groups_0"), val = tensor<int32, []>(1)];
333
+ tensor<fp16, [256, 256, 3, 3]> const_73_to_fp16 = const()[name = tensor<string, []>("const_73_to_fp16"), val = tensor<fp16, [256, 256, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8282304)))];
334
+ tensor<fp16, [256]> const_74_to_fp16 = const()[name = tensor<string, []>("const_74_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9462016)))];
335
+ tensor<fp16, [1, 256, 10, 125]> input_197_cast_fp16 = conv(bias = const_74_to_fp16, dilations = input_195_dilations_0, groups = input_195_groups_0, pad = input_195_pad_0, pad_type = input_195_pad_type_0, strides = input_195_strides_0, weight = const_73_to_fp16, x = input_193_cast_fp16)[name = tensor<string, []>("input_197_cast_fp16")];
336
+ tensor<fp16, [1, 256, 10, 125]> input_199_cast_fp16 = relu(x = input_197_cast_fp16)[name = tensor<string, []>("input_199_cast_fp16")];
337
+ tensor<string, []> input_201_pad_type_0 = const()[name = tensor<string, []>("input_201_pad_type_0"), val = tensor<string, []>("custom")];
338
+ tensor<int32, [4]> input_201_pad_0 = const()[name = tensor<string, []>("input_201_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
339
+ tensor<int32, [2]> input_201_strides_0 = const()[name = tensor<string, []>("input_201_strides_0"), val = tensor<int32, [2]>([1, 1])];
340
+ tensor<int32, [2]> input_201_dilations_0 = const()[name = tensor<string, []>("input_201_dilations_0"), val = tensor<int32, [2]>([1, 1])];
341
+ tensor<int32, []> input_201_groups_0 = const()[name = tensor<string, []>("input_201_groups_0"), val = tensor<int32, []>(1)];
342
+ tensor<fp16, [256, 256, 3, 3]> const_75_to_fp16 = const()[name = tensor<string, []>("const_75_to_fp16"), val = tensor<fp16, [256, 256, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9462592)))];
343
+ tensor<fp16, [256]> const_76_to_fp16 = const()[name = tensor<string, []>("const_76_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10642304)))];
344
+ tensor<fp16, [1, 256, 10, 125]> out_cast_fp16 = conv(bias = const_76_to_fp16, dilations = input_201_dilations_0, groups = input_201_groups_0, pad = input_201_pad_0, pad_type = input_201_pad_type_0, strides = input_201_strides_0, weight = const_75_to_fp16, x = input_199_cast_fp16)[name = tensor<string, []>("out_cast_fp16")];
345
+ tensor<fp16, [1, 256, 10, 125]> input_203_cast_fp16 = add(x = out_cast_fp16, y = input_193_cast_fp16)[name = tensor<string, []>("input_203_cast_fp16")];
346
+ tensor<fp16, [1, 256, 10, 125]> x_cast_fp16 = relu(x = input_203_cast_fp16)[name = tensor<string, []>("x_cast_fp16")];
347
+ tensor<int32, [3]> var_577 = const()[name = tensor<string, []>("op_577"), val = tensor<int32, [3]>([1, 2560, 125])];
348
+ tensor<fp16, [1, 2560, 125]> sequences_cast_fp16 = reshape(shape = var_577, x = x_cast_fp16)[name = tensor<string, []>("sequences_cast_fp16")];
349
+ tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([3])];
350
+ tensor<fp16, [1, 3, 589, 1]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = speaker_masks)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
351
+ tensor<fp32, []> upsample_nearest_neighbor_0_scale_factor_height_0 = const()[name = tensor<string, []>("upsample_nearest_neighbor_0_scale_factor_height_0"), val = tensor<fp32, []>(0x1.b2a2a4p-3)];
352
+ tensor<fp32, []> upsample_nearest_neighbor_0_scale_factor_width_0 = const()[name = tensor<string, []>("upsample_nearest_neighbor_0_scale_factor_width_0"), val = tensor<fp32, []>(0x1p+0)];
353
+ tensor<fp16, [1, 3, 125, 1]> upsample_nearest_neighbor_0_cast_fp16 = upsample_nearest_neighbor(scale_factor_height = upsample_nearest_neighbor_0_scale_factor_height_0, scale_factor_width = upsample_nearest_neighbor_0_scale_factor_width_0, x = expand_dims_0_cast_fp16)[name = tensor<string, []>("upsample_nearest_neighbor_0_cast_fp16")];
354
+ tensor<int32, [1]> weights_1_axes_0 = const()[name = tensor<string, []>("weights_1_axes_0"), val = tensor<int32, [1]>([3])];
355
+ tensor<fp16, [1, 3, 125]> weights_1_cast_fp16 = squeeze(axes = weights_1_axes_0, x = upsample_nearest_neighbor_0_cast_fp16)[name = tensor<string, []>("weights_1_cast_fp16")];
356
+ tensor<int32, [3]> var_583_begin_0 = const()[name = tensor<string, []>("op_583_begin_0"), val = tensor<int32, [3]>([0, 0, 0])];
357
+ tensor<int32, [3]> var_583_end_0 = const()[name = tensor<string, []>("op_583_end_0"), val = tensor<int32, [3]>([1, 1, 125])];
358
+ tensor<bool, [3]> var_583_end_mask_0 = const()[name = tensor<string, []>("op_583_end_mask_0"), val = tensor<bool, [3]>([true, false, true])];
359
+ tensor<bool, [3]> var_583_squeeze_mask_0 = const()[name = tensor<string, []>("op_583_squeeze_mask_0"), val = tensor<bool, [3]>([false, true, false])];
360
+ tensor<fp16, [1, 125]> var_583_cast_fp16 = slice_by_index(begin = var_583_begin_0, end = var_583_end_0, end_mask = var_583_end_mask_0, squeeze_mask = var_583_squeeze_mask_0, x = weights_1_cast_fp16)[name = tensor<string, []>("op_583_cast_fp16")];
361
+ tensor<int32, [1]> weights_5_axes_0 = const()[name = tensor<string, []>("weights_5_axes_0"), val = tensor<int32, [1]>([1])];
362
+ tensor<fp16, [1, 1, 125]> weights_5_cast_fp16 = expand_dims(axes = weights_5_axes_0, x = var_583_cast_fp16)[name = tensor<string, []>("weights_5_cast_fp16")];
363
+ tensor<int32, [1]> var_587_axes_0 = const()[name = tensor<string, []>("op_587_axes_0"), val = tensor<int32, [1]>([2])];
364
+ tensor<bool, []> var_587_keep_dims_0 = const()[name = tensor<string, []>("op_587_keep_dims_0"), val = tensor<bool, []>(false)];
365
+ tensor<fp16, [1, 1]> var_587_cast_fp16 = reduce_sum(axes = var_587_axes_0, keep_dims = var_587_keep_dims_0, x = weights_5_cast_fp16)[name = tensor<string, []>("op_587_cast_fp16")];
366
+ tensor<fp16, []> var_588_to_fp16 = const()[name = tensor<string, []>("op_588_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
367
+ tensor<fp16, [1, 1]> v1_1_cast_fp16 = add(x = var_587_cast_fp16, y = var_588_to_fp16)[name = tensor<string, []>("v1_1_cast_fp16")];
368
+ tensor<fp16, [1, 2560, 125]> var_590_cast_fp16 = mul(x = sequences_cast_fp16, y = weights_5_cast_fp16)[name = tensor<string, []>("op_590_cast_fp16")];
369
+ tensor<int32, [1]> var_592_axes_0 = const()[name = tensor<string, []>("op_592_axes_0"), val = tensor<int32, [1]>([2])];
370
+ tensor<bool, []> var_592_keep_dims_0 = const()[name = tensor<string, []>("op_592_keep_dims_0"), val = tensor<bool, []>(false)];
371
+ tensor<fp16, [1, 2560]> var_592_cast_fp16 = reduce_sum(axes = var_592_axes_0, keep_dims = var_592_keep_dims_0, x = var_590_cast_fp16)[name = tensor<string, []>("op_592_cast_fp16")];
372
+ tensor<fp16, [1, 2560]> mean_1_cast_fp16 = real_div(x = var_592_cast_fp16, y = v1_1_cast_fp16)[name = tensor<string, []>("mean_1_cast_fp16")];
373
+ tensor<int32, [1]> var_594_axes_0 = const()[name = tensor<string, []>("op_594_axes_0"), val = tensor<int32, [1]>([2])];
374
+ tensor<fp16, [1, 2560, 1]> var_594_cast_fp16 = expand_dims(axes = var_594_axes_0, x = mean_1_cast_fp16)[name = tensor<string, []>("op_594_cast_fp16")];
375
+ tensor<fp16, [1, 2560, 125]> var_595_cast_fp16 = sub(x = sequences_cast_fp16, y = var_594_cast_fp16)[name = tensor<string, []>("op_595_cast_fp16")];
376
+ tensor<fp16, [1, 2560, 125]> dx2_1_cast_fp16 = mul(x = var_595_cast_fp16, y = var_595_cast_fp16)[name = tensor<string, []>("dx2_1_cast_fp16")];
377
+ tensor<fp16, [1, 1, 125]> var_597_cast_fp16 = mul(x = weights_5_cast_fp16, y = weights_5_cast_fp16)[name = tensor<string, []>("op_597_cast_fp16")];
378
+ tensor<int32, [1]> v2_1_axes_0 = const()[name = tensor<string, []>("v2_1_axes_0"), val = tensor<int32, [1]>([2])];
379
+ tensor<bool, []> v2_1_keep_dims_0 = const()[name = tensor<string, []>("v2_1_keep_dims_0"), val = tensor<bool, []>(false)];
380
+ tensor<fp16, [1, 1]> v2_1_cast_fp16 = reduce_sum(axes = v2_1_axes_0, keep_dims = v2_1_keep_dims_0, x = var_597_cast_fp16)[name = tensor<string, []>("v2_1_cast_fp16")];
381
+ tensor<fp16, [1, 2560, 125]> var_600_cast_fp16 = mul(x = dx2_1_cast_fp16, y = weights_5_cast_fp16)[name = tensor<string, []>("op_600_cast_fp16")];
382
+ tensor<int32, [1]> var_602_axes_0 = const()[name = tensor<string, []>("op_602_axes_0"), val = tensor<int32, [1]>([2])];
383
+ tensor<bool, []> var_602_keep_dims_0 = const()[name = tensor<string, []>("op_602_keep_dims_0"), val = tensor<bool, []>(false)];
384
+ tensor<fp16, [1, 2560]> var_602_cast_fp16 = reduce_sum(axes = var_602_axes_0, keep_dims = var_602_keep_dims_0, x = var_600_cast_fp16)[name = tensor<string, []>("op_602_cast_fp16")];
385
+ tensor<fp16, [1, 1]> var_603_cast_fp16 = real_div(x = v2_1_cast_fp16, y = v1_1_cast_fp16)[name = tensor<string, []>("op_603_cast_fp16")];
386
+ tensor<fp16, [1, 1]> var_604_cast_fp16 = sub(x = v1_1_cast_fp16, y = var_603_cast_fp16)[name = tensor<string, []>("op_604_cast_fp16")];
387
+ tensor<fp16, []> var_605_to_fp16 = const()[name = tensor<string, []>("op_605_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
388
+ tensor<fp16, [1, 1]> var_606_cast_fp16 = add(x = var_604_cast_fp16, y = var_605_to_fp16)[name = tensor<string, []>("op_606_cast_fp16")];
389
+ tensor<fp16, [1, 2560]> var_1_cast_fp16 = real_div(x = var_602_cast_fp16, y = var_606_cast_fp16)[name = tensor<string, []>("var_1_cast_fp16")];
390
+ tensor<fp16, [1, 2560]> std_1_cast_fp16 = sqrt(x = var_1_cast_fp16)[name = tensor<string, []>("std_1_cast_fp16")];
391
+ tensor<bool, []> var_610_interleave_0 = const()[name = tensor<string, []>("op_610_interleave_0"), val = tensor<bool, []>(false)];
392
+ tensor<fp16, [1, 5120]> var_610_cast_fp16 = concat(axis = var_12, interleave = var_610_interleave_0, values = (mean_1_cast_fp16, std_1_cast_fp16))[name = tensor<string, []>("op_610_cast_fp16")];
393
+ tensor<int32, [3]> var_612_begin_0 = const()[name = tensor<string, []>("op_612_begin_0"), val = tensor<int32, [3]>([0, 1, 0])];
394
+ tensor<int32, [3]> var_612_end_0 = const()[name = tensor<string, []>("op_612_end_0"), val = tensor<int32, [3]>([1, 2, 125])];
395
+ tensor<bool, [3]> var_612_end_mask_0 = const()[name = tensor<string, []>("op_612_end_mask_0"), val = tensor<bool, [3]>([true, false, true])];
396
+ tensor<bool, [3]> var_612_squeeze_mask_0 = const()[name = tensor<string, []>("op_612_squeeze_mask_0"), val = tensor<bool, [3]>([false, true, false])];
397
+ tensor<fp16, [1, 125]> var_612_cast_fp16 = slice_by_index(begin = var_612_begin_0, end = var_612_end_0, end_mask = var_612_end_mask_0, squeeze_mask = var_612_squeeze_mask_0, x = weights_1_cast_fp16)[name = tensor<string, []>("op_612_cast_fp16")];
398
+ tensor<int32, [1]> weights_9_axes_0 = const()[name = tensor<string, []>("weights_9_axes_0"), val = tensor<int32, [1]>([1])];
399
+ tensor<fp16, [1, 1, 125]> weights_9_cast_fp16 = expand_dims(axes = weights_9_axes_0, x = var_612_cast_fp16)[name = tensor<string, []>("weights_9_cast_fp16")];
400
+ tensor<int32, [1]> var_616_axes_0 = const()[name = tensor<string, []>("op_616_axes_0"), val = tensor<int32, [1]>([2])];
401
+ tensor<bool, []> var_616_keep_dims_0 = const()[name = tensor<string, []>("op_616_keep_dims_0"), val = tensor<bool, []>(false)];
402
+ tensor<fp16, [1, 1]> var_616_cast_fp16 = reduce_sum(axes = var_616_axes_0, keep_dims = var_616_keep_dims_0, x = weights_9_cast_fp16)[name = tensor<string, []>("op_616_cast_fp16")];
403
+ tensor<fp16, []> var_617_to_fp16 = const()[name = tensor<string, []>("op_617_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
404
+ tensor<fp16, [1, 1]> v1_3_cast_fp16 = add(x = var_616_cast_fp16, y = var_617_to_fp16)[name = tensor<string, []>("v1_3_cast_fp16")];
405
+ tensor<fp16, [1, 2560, 125]> var_619_cast_fp16 = mul(x = sequences_cast_fp16, y = weights_9_cast_fp16)[name = tensor<string, []>("op_619_cast_fp16")];
406
+ tensor<int32, [1]> var_621_axes_0 = const()[name = tensor<string, []>("op_621_axes_0"), val = tensor<int32, [1]>([2])];
407
+ tensor<bool, []> var_621_keep_dims_0 = const()[name = tensor<string, []>("op_621_keep_dims_0"), val = tensor<bool, []>(false)];
408
+ tensor<fp16, [1, 2560]> var_621_cast_fp16 = reduce_sum(axes = var_621_axes_0, keep_dims = var_621_keep_dims_0, x = var_619_cast_fp16)[name = tensor<string, []>("op_621_cast_fp16")];
409
+ tensor<fp16, [1, 2560]> mean_3_cast_fp16 = real_div(x = var_621_cast_fp16, y = v1_3_cast_fp16)[name = tensor<string, []>("mean_3_cast_fp16")];
410
+ tensor<int32, [1]> var_623_axes_0 = const()[name = tensor<string, []>("op_623_axes_0"), val = tensor<int32, [1]>([2])];
411
+ tensor<fp16, [1, 2560, 1]> var_623_cast_fp16 = expand_dims(axes = var_623_axes_0, x = mean_3_cast_fp16)[name = tensor<string, []>("op_623_cast_fp16")];
412
+ tensor<fp16, [1, 2560, 125]> var_624_cast_fp16 = sub(x = sequences_cast_fp16, y = var_623_cast_fp16)[name = tensor<string, []>("op_624_cast_fp16")];
413
+ tensor<fp16, [1, 2560, 125]> dx2_3_cast_fp16 = mul(x = var_624_cast_fp16, y = var_624_cast_fp16)[name = tensor<string, []>("dx2_3_cast_fp16")];
414
+ tensor<fp16, [1, 1, 125]> var_626_cast_fp16 = mul(x = weights_9_cast_fp16, y = weights_9_cast_fp16)[name = tensor<string, []>("op_626_cast_fp16")];
415
+ tensor<int32, [1]> v2_3_axes_0 = const()[name = tensor<string, []>("v2_3_axes_0"), val = tensor<int32, [1]>([2])];
416
+ tensor<bool, []> v2_3_keep_dims_0 = const()[name = tensor<string, []>("v2_3_keep_dims_0"), val = tensor<bool, []>(false)];
417
+ tensor<fp16, [1, 1]> v2_3_cast_fp16 = reduce_sum(axes = v2_3_axes_0, keep_dims = v2_3_keep_dims_0, x = var_626_cast_fp16)[name = tensor<string, []>("v2_3_cast_fp16")];
418
+ tensor<fp16, [1, 2560, 125]> var_629_cast_fp16 = mul(x = dx2_3_cast_fp16, y = weights_9_cast_fp16)[name = tensor<string, []>("op_629_cast_fp16")];
419
+ tensor<int32, [1]> var_631_axes_0 = const()[name = tensor<string, []>("op_631_axes_0"), val = tensor<int32, [1]>([2])];
420
+ tensor<bool, []> var_631_keep_dims_0 = const()[name = tensor<string, []>("op_631_keep_dims_0"), val = tensor<bool, []>(false)];
421
+ tensor<fp16, [1, 2560]> var_631_cast_fp16 = reduce_sum(axes = var_631_axes_0, keep_dims = var_631_keep_dims_0, x = var_629_cast_fp16)[name = tensor<string, []>("op_631_cast_fp16")];
422
+ tensor<fp16, [1, 1]> var_632_cast_fp16 = real_div(x = v2_3_cast_fp16, y = v1_3_cast_fp16)[name = tensor<string, []>("op_632_cast_fp16")];
423
+ tensor<fp16, [1, 1]> var_633_cast_fp16 = sub(x = v1_3_cast_fp16, y = var_632_cast_fp16)[name = tensor<string, []>("op_633_cast_fp16")];
424
+ tensor<fp16, []> var_634_to_fp16 = const()[name = tensor<string, []>("op_634_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
425
+ tensor<fp16, [1, 1]> var_635_cast_fp16 = add(x = var_633_cast_fp16, y = var_634_to_fp16)[name = tensor<string, []>("op_635_cast_fp16")];
426
+ tensor<fp16, [1, 2560]> var_3_cast_fp16 = real_div(x = var_631_cast_fp16, y = var_635_cast_fp16)[name = tensor<string, []>("var_3_cast_fp16")];
427
+ tensor<fp16, [1, 2560]> std_3_cast_fp16 = sqrt(x = var_3_cast_fp16)[name = tensor<string, []>("std_3_cast_fp16")];
428
+ tensor<bool, []> var_639_interleave_0 = const()[name = tensor<string, []>("op_639_interleave_0"), val = tensor<bool, []>(false)];
429
+ tensor<fp16, [1, 5120]> var_639_cast_fp16 = concat(axis = var_12, interleave = var_639_interleave_0, values = (mean_3_cast_fp16, std_3_cast_fp16))[name = tensor<string, []>("op_639_cast_fp16")];
430
+ tensor<int32, [3]> var_641_begin_0 = const()[name = tensor<string, []>("op_641_begin_0"), val = tensor<int32, [3]>([0, 2, 0])];
431
+ tensor<int32, [3]> var_641_end_0 = const()[name = tensor<string, []>("op_641_end_0"), val = tensor<int32, [3]>([1, 3, 125])];
432
+ tensor<bool, [3]> var_641_end_mask_0 = const()[name = tensor<string, []>("op_641_end_mask_0"), val = tensor<bool, [3]>([true, false, true])];
433
+ tensor<bool, [3]> var_641_squeeze_mask_0 = const()[name = tensor<string, []>("op_641_squeeze_mask_0"), val = tensor<bool, [3]>([false, true, false])];
434
+ tensor<fp16, [1, 125]> var_641_cast_fp16 = slice_by_index(begin = var_641_begin_0, end = var_641_end_0, end_mask = var_641_end_mask_0, squeeze_mask = var_641_squeeze_mask_0, x = weights_1_cast_fp16)[name = tensor<string, []>("op_641_cast_fp16")];
435
+ tensor<int32, [1]> weights_axes_0 = const()[name = tensor<string, []>("weights_axes_0"), val = tensor<int32, [1]>([1])];
436
+ tensor<fp16, [1, 1, 125]> weights_cast_fp16 = expand_dims(axes = weights_axes_0, x = var_641_cast_fp16)[name = tensor<string, []>("weights_cast_fp16")];
437
+ tensor<int32, [1]> var_645_axes_0 = const()[name = tensor<string, []>("op_645_axes_0"), val = tensor<int32, [1]>([2])];
438
+ tensor<bool, []> var_645_keep_dims_0 = const()[name = tensor<string, []>("op_645_keep_dims_0"), val = tensor<bool, []>(false)];
439
+ tensor<fp16, [1, 1]> var_645_cast_fp16 = reduce_sum(axes = var_645_axes_0, keep_dims = var_645_keep_dims_0, x = weights_cast_fp16)[name = tensor<string, []>("op_645_cast_fp16")];
440
+ tensor<fp16, []> var_646_to_fp16 = const()[name = tensor<string, []>("op_646_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
441
+ tensor<fp16, [1, 1]> v1_cast_fp16 = add(x = var_645_cast_fp16, y = var_646_to_fp16)[name = tensor<string, []>("v1_cast_fp16")];
442
+ tensor<fp16, [1, 2560, 125]> var_648_cast_fp16 = mul(x = sequences_cast_fp16, y = weights_cast_fp16)[name = tensor<string, []>("op_648_cast_fp16")];
443
+ tensor<int32, [1]> var_650_axes_0 = const()[name = tensor<string, []>("op_650_axes_0"), val = tensor<int32, [1]>([2])];
444
+ tensor<bool, []> var_650_keep_dims_0 = const()[name = tensor<string, []>("op_650_keep_dims_0"), val = tensor<bool, []>(false)];
445
+ tensor<fp16, [1, 2560]> var_650_cast_fp16 = reduce_sum(axes = var_650_axes_0, keep_dims = var_650_keep_dims_0, x = var_648_cast_fp16)[name = tensor<string, []>("op_650_cast_fp16")];
446
+ tensor<fp16, [1, 2560]> mean_cast_fp16 = real_div(x = var_650_cast_fp16, y = v1_cast_fp16)[name = tensor<string, []>("mean_cast_fp16")];
447
+ tensor<int32, [1]> var_652_axes_0 = const()[name = tensor<string, []>("op_652_axes_0"), val = tensor<int32, [1]>([2])];
448
+ tensor<fp16, [1, 2560, 1]> var_652_cast_fp16 = expand_dims(axes = var_652_axes_0, x = mean_cast_fp16)[name = tensor<string, []>("op_652_cast_fp16")];
449
+ tensor<fp16, [1, 2560, 125]> var_653_cast_fp16 = sub(x = sequences_cast_fp16, y = var_652_cast_fp16)[name = tensor<string, []>("op_653_cast_fp16")];
450
+ tensor<fp16, [1, 2560, 125]> dx2_cast_fp16 = mul(x = var_653_cast_fp16, y = var_653_cast_fp16)[name = tensor<string, []>("dx2_cast_fp16")];
451
+ tensor<fp16, [1, 1, 125]> var_655_cast_fp16 = mul(x = weights_cast_fp16, y = weights_cast_fp16)[name = tensor<string, []>("op_655_cast_fp16")];
452
+ tensor<int32, [1]> v2_axes_0 = const()[name = tensor<string, []>("v2_axes_0"), val = tensor<int32, [1]>([2])];
453
+ tensor<bool, []> v2_keep_dims_0 = const()[name = tensor<string, []>("v2_keep_dims_0"), val = tensor<bool, []>(false)];
454
+ tensor<fp16, [1, 1]> v2_cast_fp16 = reduce_sum(axes = v2_axes_0, keep_dims = v2_keep_dims_0, x = var_655_cast_fp16)[name = tensor<string, []>("v2_cast_fp16")];
455
+ tensor<fp16, [1, 2560, 125]> var_658_cast_fp16 = mul(x = dx2_cast_fp16, y = weights_cast_fp16)[name = tensor<string, []>("op_658_cast_fp16")];
456
+ tensor<int32, [1]> var_660_axes_0 = const()[name = tensor<string, []>("op_660_axes_0"), val = tensor<int32, [1]>([2])];
457
+ tensor<bool, []> var_660_keep_dims_0 = const()[name = tensor<string, []>("op_660_keep_dims_0"), val = tensor<bool, []>(false)];
458
+ tensor<fp16, [1, 2560]> var_660_cast_fp16 = reduce_sum(axes = var_660_axes_0, keep_dims = var_660_keep_dims_0, x = var_658_cast_fp16)[name = tensor<string, []>("op_660_cast_fp16")];
459
+ tensor<fp16, [1, 1]> var_661_cast_fp16 = real_div(x = v2_cast_fp16, y = v1_cast_fp16)[name = tensor<string, []>("op_661_cast_fp16")];
460
+ tensor<fp16, [1, 1]> var_662_cast_fp16 = sub(x = v1_cast_fp16, y = var_661_cast_fp16)[name = tensor<string, []>("op_662_cast_fp16")];
461
+ tensor<fp16, []> var_663_to_fp16 = const()[name = tensor<string, []>("op_663_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
462
+ tensor<fp16, [1, 1]> var_664_cast_fp16 = add(x = var_662_cast_fp16, y = var_663_to_fp16)[name = tensor<string, []>("op_664_cast_fp16")];
463
+ tensor<fp16, [1, 2560]> var_cast_fp16 = real_div(x = var_660_cast_fp16, y = var_664_cast_fp16)[name = tensor<string, []>("var_cast_fp16")];
464
+ tensor<fp16, [1, 2560]> std_cast_fp16 = sqrt(x = var_cast_fp16)[name = tensor<string, []>("std_cast_fp16")];
465
+ tensor<bool, []> var_668_interleave_0 = const()[name = tensor<string, []>("op_668_interleave_0"), val = tensor<bool, []>(false)];
466
+ tensor<fp16, [1, 5120]> var_668_cast_fp16 = concat(axis = var_12, interleave = var_668_interleave_0, values = (mean_cast_fp16, std_cast_fp16))[name = tensor<string, []>("op_668_cast_fp16")];
467
+ tensor<int32, []> input_axis_0 = const()[name = tensor<string, []>("input_axis_0"), val = tensor<int32, []>(1)];
468
+ tensor<fp16, [1, 3, 5120]> input_cast_fp16 = stack(axis = input_axis_0, values = (var_610_cast_fp16, var_639_cast_fp16, var_668_cast_fp16))[name = tensor<string, []>("input_cast_fp16")];
469
+ tensor<fp16, [256, 5120]> model_resnet_seg_1_weight_to_fp16 = const()[name = tensor<string, []>("model_resnet_seg_1_weight_to_fp16"), val = tensor<fp16, [256, 5120]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10642880)))];
470
+ tensor<fp16, [256]> model_resnet_seg_1_bias_to_fp16 = const()[name = tensor<string, []>("model_resnet_seg_1_bias_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13264384)))];
471
+ tensor<fp16, [1, 3, 256]> speaker_embeddings = linear(bias = model_resnet_seg_1_bias_to_fp16, weight = model_resnet_seg_1_weight_to_fp16, x = input_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
472
+ } -> (speaker_embeddings);
473
+ }
speaker_embedder/pyannote-v3/SpeakerEmbedder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dba18a57a81b1e872802ca4def29541bb7900ccff430d9b2040092cadd7d688
3
+ size 13264960
speaker_embedder/pyannote-v3/SpeakerEmbedderPreprocessor.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af0ef58ba31419e0e4d2f9714a3b2f382955fb3ee0472f2e76987934b88cebc2
3
+ size 243
speaker_embedder/pyannote-v3/SpeakerEmbedderPreprocessor.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f252f1834b495a132333af500f573a7218c2d3d1f7bfb0faaad89c51a989dac7
3
+ size 330
speaker_embedder/pyannote-v3/SpeakerEmbedderPreprocessor.mlmodelc/metadata.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float32",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 998 × 80)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 998, 80]",
13
+ "name" : "preprocessor_output_1",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 7,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios16.cast" : 2,
23
+ "Ios16.mul" : 4,
24
+ "SliceByIndex" : 2,
25
+ "Transpose" : 2,
26
+ "SlidingWindows" : 1,
27
+ "Ios16.sub" : 3,
28
+ "Ios16.log" : 1,
29
+ "Ios16.reduceMean" : 2,
30
+ "Ios16.square" : 2,
31
+ "Squeeze" : 2,
32
+ "Ios16.matmul" : 2,
33
+ "Ios16.add" : 1,
34
+ "Ios16.linear" : 1,
35
+ "ExpandDims" : 4,
36
+ "Ios16.gather" : 2,
37
+ "Ios16.maximum" : 1,
38
+ "Identity" : 1,
39
+ "Pad" : 2
40
+ },
41
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
42
+ "isUpdatable" : "0",
43
+ "stateSchema" : [
44
+
45
+ ],
46
+ "availability" : {
47
+ "macOS" : "13.0",
48
+ "tvOS" : "16.0",
49
+ "visionOS" : "1.0",
50
+ "watchOS" : "9.0",
51
+ "iOS" : "16.0",
52
+ "macCatalyst" : "16.0"
53
+ },
54
+ "modelType" : {
55
+ "name" : "MLModelType_mlProgram"
56
+ },
57
+ "userDefinedMetadata" : {
58
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
59
+ "com.github.apple.coremltools.source" : "torch==2.5.1",
60
+ "com.github.apple.coremltools.version" : "8.1"
61
+ },
62
+ "inputSchema" : [
63
+ {
64
+ "hasShapeFlexibility" : "0",
65
+ "isOptional" : "0",
66
+ "dataType" : "Float16",
67
+ "formattedType" : "MultiArray (Float16 1 × 160000)",
68
+ "shortDescription" : "",
69
+ "shape" : "[1, 160000]",
70
+ "name" : "waveforms",
71
+ "type" : "MultiArray"
72
+ }
73
+ ],
74
+ "generatedClassName" : "SpeakerEmbeddingPreprocessor",
75
+ "method" : "predict"
76
+ }
77
+ ]
speaker_embedder/pyannote-v3/SpeakerEmbedderPreprocessor.mlmodelc/model.mil ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.5.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.1"}})]
3
+ {
4
+ func main<ios16>(tensor<fp16, [1, 160000]> waveforms) {
5
+ tensor<string, []> cast_0_dtype_0 = const()[name = tensor<string, []>("cast_0_dtype_0"), val = tensor<string, []>("fp32")];
6
+ tensor<fp32, []> var_2_promoted = const()[name = tensor<string, []>("op_2_promoted"), val = tensor<fp32, []>(0x1p+15)];
7
+ tensor<fp32, [1, 160000]> cast_0 = cast(dtype = cast_0_dtype_0, x = waveforms)[name = tensor<string, []>("cast_11")];
8
+ tensor<fp32, [1, 160000]> waveform_1 = mul(x = cast_0, y = var_2_promoted)[name = tensor<string, []>("waveform_1")];
9
+ tensor<int32, [2]> var_6_begin_0 = const()[name = tensor<string, []>("op_6_begin_0"), val = tensor<int32, [2]>([0, 0])];
10
+ tensor<int32, [2]> var_6_end_0 = const()[name = tensor<string, []>("op_6_end_0"), val = tensor<int32, [2]>([1, 160000])];
11
+ tensor<bool, [2]> var_6_end_mask_0 = const()[name = tensor<string, []>("op_6_end_mask_0"), val = tensor<bool, [2]>([false, true])];
12
+ tensor<bool, [2]> var_6_squeeze_mask_0 = const()[name = tensor<string, []>("op_6_squeeze_mask_0"), val = tensor<bool, [2]>([true, false])];
13
+ tensor<fp32, [160000]> var_6 = slice_by_index(begin = var_6_begin_0, end = var_6_end_0, end_mask = var_6_end_mask_0, squeeze_mask = var_6_squeeze_mask_0, x = waveform_1)[name = tensor<string, []>("op_6")];
14
+ tensor<int32, []> sliding_windows_0_axis_0 = const()[name = tensor<string, []>("sliding_windows_0_axis_0"), val = tensor<int32, []>(0)];
15
+ tensor<int32, []> sliding_windows_0_size_0 = const()[name = tensor<string, []>("sliding_windows_0_size_0"), val = tensor<int32, []>(400)];
16
+ tensor<int32, []> sliding_windows_0_stride_0 = const()[name = tensor<string, []>("sliding_windows_0_stride_0"), val = tensor<int32, []>(160)];
17
+ tensor<fp32, [998, 400]> sliding_windows_0 = sliding_windows(axis = sliding_windows_0_axis_0, size = sliding_windows_0_size_0, stride = sliding_windows_0_stride_0, x = var_6)[name = tensor<string, []>("sliding_windows_0")];
18
+ tensor<int32, [1]> var_42_axes_0 = const()[name = tensor<string, []>("op_42_axes_0"), val = tensor<int32, [1]>([1])];
19
+ tensor<bool, []> var_42_keep_dims_0 = const()[name = tensor<string, []>("op_42_keep_dims_0"), val = tensor<bool, []>(false)];
20
+ tensor<fp32, [998]> var_42 = reduce_mean(axes = var_42_axes_0, keep_dims = var_42_keep_dims_0, x = sliding_windows_0)[name = tensor<string, []>("op_42")];
21
+ tensor<int32, [1]> row_means_axes_0 = const()[name = tensor<string, []>("row_means_axes_0"), val = tensor<int32, [1]>([1])];
22
+ tensor<fp32, [998, 1]> row_means = expand_dims(axes = row_means_axes_0, x = var_42)[name = tensor<string, []>("row_means")];
23
+ tensor<fp32, [998, 400]> strided_input_3 = sub(x = sliding_windows_0, y = row_means)[name = tensor<string, []>("strided_input_3")];
24
+ tensor<int32, [1]> input_1_axes_0 = const()[name = tensor<string, []>("input_1_axes_0"), val = tensor<int32, [1]>([0])];
25
+ tensor<fp32, [1, 998, 400]> input_1 = expand_dims(axes = input_1_axes_0, x = strided_input_3)[name = tensor<string, []>("input_1")];
26
+ tensor<fp32, []> const_2 = const()[name = tensor<string, []>("const_2"), val = tensor<fp32, []>(0x0p+0)];
27
+ tensor<int32, [6]> var_54_pad_0 = const()[name = tensor<string, []>("op_54_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 1, 0])];
28
+ tensor<string, []> var_54_mode_0 = const()[name = tensor<string, []>("op_54_mode_0"), val = tensor<string, []>("replicate")];
29
+ tensor<fp32, [1, 998, 401]> var_54 = pad(constant_val = const_2, mode = var_54_mode_0, pad = var_54_pad_0, x = input_1)[name = tensor<string, []>("op_54")];
30
+ tensor<int32, [1]> offset_strided_input_axes_0 = const()[name = tensor<string, []>("offset_strided_input_axes_0"), val = tensor<int32, [1]>([0])];
31
+ tensor<fp32, [998, 401]> offset_strided_input = squeeze(axes = offset_strided_input_axes_0, x = var_54)[name = tensor<string, []>("offset_strided_input")];
32
+ tensor<int32, [2]> var_66_begin_0 = const()[name = tensor<string, []>("op_66_begin_0"), val = tensor<int32, [2]>([0, 0])];
33
+ tensor<int32, [2]> var_66_end_0 = const()[name = tensor<string, []>("op_66_end_0"), val = tensor<int32, [2]>([998, 400])];
34
+ tensor<bool, [2]> var_66_end_mask_0 = const()[name = tensor<string, []>("op_66_end_mask_0"), val = tensor<bool, [2]>([true, false])];
35
+ tensor<fp32, [998, 400]> var_66 = slice_by_index(begin = var_66_begin_0, end = var_66_end_0, end_mask = var_66_end_mask_0, x = offset_strided_input)[name = tensor<string, []>("op_66")];
36
+ tensor<fp32, []> var_67 = const()[name = tensor<string, []>("op_67"), val = tensor<fp32, []>(0x1.f0a3d8p-1)];
37
+ tensor<fp32, [998, 400]> var_68 = mul(x = var_66, y = var_67)[name = tensor<string, []>("op_68")];
38
+ tensor<fp32, [998, 400]> strided_input_5 = sub(x = strided_input_3, y = var_68)[name = tensor<string, []>("strided_input_5")];
39
+ tensor<fp32, [1, 400]> window_function = const()[name = tensor<string, []>("window_function"), val = tensor<fp32, [1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
40
+ tensor<fp32, [998, 400]> strided_input_7 = mul(x = strided_input_5, y = window_function)[name = tensor<string, []>("strided_input_7")];
41
+ tensor<int32, [1]> input_3_axes_0 = const()[name = tensor<string, []>("input_3_axes_0"), val = tensor<int32, [1]>([0])];
42
+ tensor<fp32, [1, 998, 400]> input_3 = expand_dims(axes = input_3_axes_0, x = strided_input_7)[name = tensor<string, []>("input_3")];
43
+ tensor<fp32, []> const_3 = const()[name = tensor<string, []>("const_3"), val = tensor<fp32, []>(0x0p+0)];
44
+ tensor<int32, [6]> var_90_pad_0 = const()[name = tensor<string, []>("op_90_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 0, 112])];
45
+ tensor<string, []> var_90_mode_0 = const()[name = tensor<string, []>("op_90_mode_0"), val = tensor<string, []>("constant")];
46
+ tensor<fp32, [1, 998, 512]> var_90 = pad(constant_val = const_3, mode = var_90_mode_0, pad = var_90_pad_0, x = input_3)[name = tensor<string, []>("op_90")];
47
+ tensor<int32, [1]> strided_input_axes_0 = const()[name = tensor<string, []>("strided_input_axes_0"), val = tensor<int32, [1]>([0])];
48
+ tensor<fp32, [998, 512]> strided_input = squeeze(axes = strided_input_axes_0, x = var_90)[name = tensor<string, []>("strided_input")];
49
+ tensor<fp32, [512, 512]> cos_0 = const()[name = tensor<string, []>("cos_0"), val = tensor<fp32, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1728)))];
50
+ tensor<fp32, [512, 512]> sin_0 = const()[name = tensor<string, []>("sin_0"), val = tensor<fp32, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1050368)))];
51
+ tensor<bool, []> matmul_1_transpose_x_1 = const()[name = tensor<string, []>("matmul_1_transpose_x_1"), val = tensor<bool, []>(false)];
52
+ tensor<bool, []> matmul_1_transpose_y_1 = const()[name = tensor<string, []>("matmul_1_transpose_y_1"), val = tensor<bool, []>(true)];
53
+ tensor<fp32, [512, 998]> matmul_1 = matmul(transpose_x = matmul_1_transpose_x_1, transpose_y = matmul_1_transpose_y_1, x = cos_0, y = strided_input)[name = tensor<string, []>("matmul_1")];
54
+ tensor<bool, []> matmul_3_transpose_x_1 = const()[name = tensor<string, []>("matmul_3_transpose_x_1"), val = tensor<bool, []>(false)];
55
+ tensor<bool, []> matmul_3_transpose_y_1 = const()[name = tensor<string, []>("matmul_3_transpose_y_1"), val = tensor<bool, []>(true)];
56
+ tensor<fp32, [512, 998]> matmul_3 = matmul(transpose_x = matmul_3_transpose_x_1, transpose_y = matmul_3_transpose_y_1, x = sin_0, y = strided_input)[name = tensor<string, []>("matmul_3")];
57
+ tensor<fp32, []> mul_1_y_0 = const()[name = tensor<string, []>("mul_1_y_0"), val = tensor<fp32, []>(-0x1p+0)];
58
+ tensor<fp32, [512, 998]> mul_1 = mul(x = matmul_3, y = mul_1_y_0)[name = tensor<string, []>("mul_1")];
59
+ tensor<int32, [2]> transpose_3_perm_0 = const()[name = tensor<string, []>("transpose_3_perm_0"), val = tensor<int32, [2]>([-1, 0])];
60
+ tensor<int32, [2]> transpose_4_perm_0 = const()[name = tensor<string, []>("transpose_4_perm_0"), val = tensor<int32, [2]>([-1, 0])];
61
+ tensor<int32, [257]> range_1d_2 = const()[name = tensor<string, []>("range_1d_2"), val = tensor<int32, [257]>([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256])];
62
+ tensor<int32, []> gather_0_axis_0 = const()[name = tensor<string, []>("gather_0_axis_0"), val = tensor<int32, []>(-1)];
63
+ tensor<int32, []> gather_0_batch_dims_0 = const()[name = tensor<string, []>("gather_0_batch_dims_0"), val = tensor<int32, []>(0)];
64
+ tensor<fp32, [998, 512]> transpose_3 = transpose(perm = transpose_3_perm_0, x = matmul_1)[name = tensor<string, []>("transpose_6")];
65
+ tensor<fp32, [998, 257]> gather_0 = gather(axis = gather_0_axis_0, batch_dims = gather_0_batch_dims_0, indices = range_1d_2, x = transpose_3)[name = tensor<string, []>("gather_0")];
66
+ tensor<int32, []> gather_1_axis_0 = const()[name = tensor<string, []>("gather_1_axis_0"), val = tensor<int32, []>(-1)];
67
+ tensor<int32, []> gather_1_batch_dims_0 = const()[name = tensor<string, []>("gather_1_batch_dims_0"), val = tensor<int32, []>(0)];
68
+ tensor<fp32, [998, 512]> transpose_4 = transpose(perm = transpose_4_perm_0, x = mul_1)[name = tensor<string, []>("transpose_5")];
69
+ tensor<fp32, [998, 257]> gather_1 = gather(axis = gather_1_axis_0, batch_dims = gather_1_batch_dims_0, indices = range_1d_2, x = transpose_4)[name = tensor<string, []>("gather_1")];
70
+ tensor<fp32, [998, 257]> square_0 = square(x = gather_0)[name = tensor<string, []>("square_0")];
71
+ tensor<fp32, [998, 257]> square_1 = square(x = gather_1)[name = tensor<string, []>("square_1")];
72
+ tensor<fp32, [998, 257]> add_1 = add(x = square_0, y = square_1)[name = tensor<string, []>("add_1")];
73
+ tensor<fp32, [998, 257]> spectrum = identity(x = add_1)[name = tensor<string, []>("spectrum")];
74
+ tensor<fp32, [80, 257]> mel_energies_3 = const()[name = tensor<string, []>("mel_energies_3"), val = tensor<fp32, [80, 257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2099008)))];
75
+ tensor<fp32, [80]> mel_energies_bias_0 = const()[name = tensor<string, []>("mel_energies_bias_0"), val = tensor<fp32, [80]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2181312)))];
76
+ tensor<fp32, [998, 80]> mel_energies = linear(bias = mel_energies_bias_0, weight = mel_energies_3, x = spectrum)[name = tensor<string, []>("mel_energies")];
77
+ tensor<fp32, []> const_10 = const()[name = tensor<string, []>("const_10"), val = tensor<fp32, []>(0x1p-23)];
78
+ tensor<fp32, [998, 80]> var_186 = maximum(x = mel_energies, y = const_10)[name = tensor<string, []>("op_186")];
79
+ tensor<fp32, []> filter_banks_epsilon_0 = const()[name = tensor<string, []>("filter_banks_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
80
+ tensor<fp32, [998, 80]> filter_banks = log(epsilon = filter_banks_epsilon_0, x = var_186)[name = tensor<string, []>("filter_banks")];
81
+ tensor<int32, [1]> var_192_axes_0 = const()[name = tensor<string, []>("op_192_axes_0"), val = tensor<int32, [1]>([0])];
82
+ tensor<bool, []> var_192_keep_dims_0 = const()[name = tensor<string, []>("op_192_keep_dims_0"), val = tensor<bool, []>(true)];
83
+ tensor<fp32, [1, 80]> var_192 = reduce_mean(axes = var_192_axes_0, keep_dims = var_192_keep_dims_0, x = filter_banks)[name = tensor<string, []>("op_192")];
84
+ tensor<fp32, [998, 80]> var_194 = sub(x = filter_banks, y = var_192)[name = tensor<string, []>("op_194")];
85
+ tensor<int32, [1]> var_196_axes_0 = const()[name = tensor<string, []>("op_196_axes_0"), val = tensor<int32, [1]>([0])];
86
+ tensor<fp32, [1, 998, 80]> preprocessor_output_1_type_fp32 = expand_dims(axes = var_196_axes_0, x = var_194)[name = tensor<string, []>("op_196")];
87
+ tensor<string, []> cast_9_dtype_0 = const()[name = tensor<string, []>("cast_9_dtype_0"), val = tensor<string, []>("fp16")];
88
+ tensor<fp16, [1, 998, 80]> preprocessor_output_1 = cast(dtype = cast_9_dtype_0, x = preprocessor_output_1_type_fp32)[name = tensor<string, []>("cast_10")];
89
+ } -> (preprocessor_output_1);
90
+ }
speaker_embedder/pyannote-v3/SpeakerEmbedderPreprocessor.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f2c284bd22f1f7ab76901c1c6e57f82d4ebbf057fa0b924aad057f124f77a89
3
+ size 2181696
speaker_segmenter/pyannote-v3/LICENSE_NOTICE.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Argmax proprietary and confidential. Under NDA.
2
+
3
+ Copyright 2024 Argmax, Inc. All rights reserved.
4
+
5
+ Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
6
+
7
+ Please contact Argmax for licensing information at [email protected].
speaker_segmenter/pyannote-v3/README.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # License
2
+
3
+ Original model weights: https://huggingface.co/pyannote/segmentation-3.0/blob/main/LICENSE
4
+ Argmax-optimized model asset (Assets with `.mlmodelc` extension): https://huggingface.co/argmaxinc/speakerkit-pro/blob/main/LICENSE_NOTICE.txt
5
+
6
+ Please contact [email protected] for licensing SpeakerKit Pro assets
speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc/LICENSE_NOTICE.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Argmax proprietary and confidential. Under NDA.
2
+
3
+ Copyright 2024 Argmax, Inc. All rights reserved.
4
+
5
+ Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
6
+
7
+ Please contact Argmax for licensing information at [email protected].
speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42e2434809899abce6a3947f4f3b1365af7c8d3762e4c4bfc0df886f1dca8347
3
+ size 243
speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed53832ecc7af1c0eb6bc1bc8a475c369d6103ea08f21a40302a57f06966c6c8
3
+ size 497
speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc/metadata.json ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Mixed (Float16, Float32)",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 21 × 589 × 3)",
11
+ "shortDescription" : "",
12
+ "shape" : "[21, 589, 3]",
13
+ "name" : "speaker_probs",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 21 × 589 × 3)",
21
+ "shortDescription" : "",
22
+ "shape" : "[21, 589, 3]",
23
+ "name" : "speaker_ids",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 21 × 3)",
31
+ "shortDescription" : "",
32
+ "shape" : "[21, 3]",
33
+ "name" : "speaker_activity",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 21 × 589)",
41
+ "shortDescription" : "",
42
+ "shape" : "[21, 589]",
43
+ "name" : "overlapped_speaker_activity",
44
+ "type" : "MultiArray"
45
+ },
46
+ {
47
+ "hasShapeFlexibility" : "0",
48
+ "isOptional" : "0",
49
+ "dataType" : "Float16",
50
+ "formattedType" : "MultiArray (Float16 1767)",
51
+ "shortDescription" : "",
52
+ "shape" : "[1767]",
53
+ "name" : "voice_activity",
54
+ "type" : "MultiArray"
55
+ },
56
+ {
57
+ "hasShapeFlexibility" : "0",
58
+ "isOptional" : "0",
59
+ "dataType" : "Float16",
60
+ "formattedType" : "MultiArray (Float16 21 × 1 × 160000)",
61
+ "shortDescription" : "",
62
+ "shape" : "[21, 1, 160000]",
63
+ "name" : "sliding_window_waveform",
64
+ "type" : "MultiArray"
65
+ }
66
+ ],
67
+ "modelParameters" : [
68
+
69
+ ],
70
+ "specificationVersion" : 7,
71
+ "mlProgramOperationTypeHistogram" : {
72
+ "Transpose" : 2,
73
+ "Ios16.maxPool" : 3,
74
+ "Ios16.exp" : 1,
75
+ "Ios16.softmax" : 1,
76
+ "SlidingWindows" : 1,
77
+ "Ios16.linear" : 5,
78
+ "Ios16.add" : 40,
79
+ "Ios16.realDiv" : 1,
80
+ "Ios16.reduceMax" : 1,
81
+ "Ios16.reduceSum" : 2,
82
+ "Ios16.reduceArgmax" : 1,
83
+ "Ios16.greater" : 1,
84
+ "Ios16.log" : 1,
85
+ "ExpandDims" : 1,
86
+ "Ios16.instanceNorm" : 4,
87
+ "Ios16.cast" : 4,
88
+ "Ios16.conv" : 3,
89
+ "Ios16.lstm" : 4,
90
+ "OneHot" : 1,
91
+ "Ios16.scatter" : 42,
92
+ "SliceByIndex" : 61,
93
+ "Ios16.abs" : 1,
94
+ "Ios16.leakyRelu" : 5
95
+ },
96
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
97
+ "isUpdatable" : "0",
98
+ "stateSchema" : [
99
+
100
+ ],
101
+ "availability" : {
102
+ "macOS" : "13.0",
103
+ "tvOS" : "16.0",
104
+ "visionOS" : "1.0",
105
+ "watchOS" : "9.0",
106
+ "iOS" : "16.0",
107
+ "macCatalyst" : "16.0"
108
+ },
109
+ "modelType" : {
110
+ "name" : "MLModelType_mlProgram"
111
+ },
112
+ "userDefinedMetadata" : {
113
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
114
+ "com.github.apple.coremltools.source" : "torch==2.5.1",
115
+ "com.github.apple.coremltools.version" : "8.0"
116
+ },
117
+ "inputSchema" : [
118
+ {
119
+ "hasShapeFlexibility" : "0",
120
+ "isOptional" : "0",
121
+ "dataType" : "Float16",
122
+ "formattedType" : "MultiArray (Float16 480000)",
123
+ "shortDescription" : "",
124
+ "shape" : "[480000]",
125
+ "name" : "waveform",
126
+ "type" : "MultiArray"
127
+ }
128
+ ],
129
+ "generatedClassName" : "SpeakerSegmenter",
130
+ "method" : "predict"
131
+ }
132
+ ]
speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1584619d1180ef89807b66c2c96605720365c02d4fbdcc9be02bbad91d188128
3
+ size 5760346