Compressed whisper-small.en, whisper-small, large-v3-v20240930 (#3)
Browse files- Compressed whisper-small.en, whisper-small, large-v3-v20240930 (3248e06e9a21d64d592bafe28f793f8df85e743f)
This view is limited to 50 files because it contains too many changes.
See raw diff
- openai_whisper-large-v3-v20240930_626MB/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-large-v3-v20240930_626MB/AudioEncoder.mlmodelc/coremldata.bin +3 -0
- openai_whisper-large-v3-v20240930_626MB/AudioEncoder.mlmodelc/metadata.json +92 -0
- openai_whisper-large-v3-v20240930_626MB/AudioEncoder.mlmodelc/model.mil +0 -0
- openai_whisper-large-v3-v20240930_626MB/AudioEncoder.mlmodelc/model.mlmodel +3 -0
- openai_whisper-large-v3-v20240930_626MB/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-large-v3-v20240930_626MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-large-v3-v20240930_626MB/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
- openai_whisper-large-v3-v20240930_626MB/MelSpectrogram.mlmodelc/metadata.json +74 -0
- openai_whisper-large-v3-v20240930_626MB/MelSpectrogram.mlmodelc/model.mil +66 -0
- openai_whisper-large-v3-v20240930_626MB/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-large-v3-v20240930_626MB/TextDecoder.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-large-v3-v20240930_626MB/TextDecoder.mlmodelc/coremldata.bin +3 -0
- openai_whisper-large-v3-v20240930_626MB/TextDecoder.mlmodelc/metadata.json +185 -0
- openai_whisper-large-v3-v20240930_626MB/TextDecoder.mlmodelc/model.mil +0 -0
- openai_whisper-large-v3-v20240930_626MB/TextDecoder.mlmodelc/model.mlmodel +3 -0
- openai_whisper-large-v3-v20240930_626MB/TextDecoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/coremldata.bin +3 -0
- openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/metadata.json +92 -0
- openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/model.mil +0 -0
- openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/model.mlmodel +3 -0
- openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
- openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/metadata.json +74 -0
- openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/model.mil +66 -0
- openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-small.en_217MB/TextDecoder.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-small.en_217MB/TextDecoder.mlmodelc/coremldata.bin +3 -0
- openai_whisper-small.en_217MB/TextDecoder.mlmodelc/metadata.json +185 -0
- openai_whisper-small.en_217MB/TextDecoder.mlmodelc/model.mil +0 -0
- openai_whisper-small.en_217MB/TextDecoder.mlmodelc/model.mlmodel +3 -0
- openai_whisper-small.en_217MB/TextDecoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-small_216MB/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-small_216MB/AudioEncoder.mlmodelc/coremldata.bin +3 -0
- openai_whisper-small_216MB/AudioEncoder.mlmodelc/metadata.json +92 -0
- openai_whisper-small_216MB/AudioEncoder.mlmodelc/model.mil +0 -0
- openai_whisper-small_216MB/AudioEncoder.mlmodelc/model.mlmodel +3 -0
- openai_whisper-small_216MB/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-small_216MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-small_216MB/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
- openai_whisper-small_216MB/MelSpectrogram.mlmodelc/metadata.json +74 -0
- openai_whisper-small_216MB/MelSpectrogram.mlmodelc/model.mil +66 -0
- openai_whisper-small_216MB/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
- openai_whisper-small_216MB/TextDecoder.mlmodelc/analytics/coremldata.bin +3 -0
- openai_whisper-small_216MB/TextDecoder.mlmodelc/coremldata.bin +3 -0
- openai_whisper-small_216MB/TextDecoder.mlmodelc/metadata.json +185 -0
- openai_whisper-small_216MB/TextDecoder.mlmodelc/model.mil +0 -0
- openai_whisper-small_216MB/TextDecoder.mlmodelc/model.mlmodel +3 -0
openai_whisper-large-v3-v20240930_626MB/AudioEncoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5804542da569c631a754ccc129c66669acc58ea46cf1f8b6802147a2d5528bb9
|
3 |
+
size 243
|
openai_whisper-large-v3-v20240930_626MB/AudioEncoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d8eb697b6f277c3262741c03978dac85c9d74efeb56269e949d7410cb80ab84
|
3 |
+
size 434
|
openai_whisper-large-v3-v20240930_626MB/AudioEncoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits), Sparse)",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 1280 × 1 × 1500)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 1280, 1, 1500]",
|
13 |
+
"name" : "encoder_output_embeds",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 4 × 1280 × 1 × 1536)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[4, 1280, 1, 1536]",
|
23 |
+
"name" : "encoder_attn_key_cache",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 4 × 1280 × 1 × 1536)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[4, 1280, 1, 1536]",
|
33 |
+
"name" : "encoder_attn_value_cache",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
}
|
36 |
+
],
|
37 |
+
"modelParameters" : [
|
38 |
+
|
39 |
+
],
|
40 |
+
"specificationVersion" : 9,
|
41 |
+
"mlProgramOperationTypeHistogram" : {
|
42 |
+
"Ios18.constexprLutToDense" : 202,
|
43 |
+
"Ios18.constexprSparseToDense" : 200,
|
44 |
+
"Ios18.conv" : 404,
|
45 |
+
"Ios18.matmul" : 64,
|
46 |
+
"Ios18.batchNorm" : 65,
|
47 |
+
"Pad" : 2,
|
48 |
+
"Ios18.gelu" : 34,
|
49 |
+
"Ios18.concat" : 2,
|
50 |
+
"Ios18.add" : 267,
|
51 |
+
"Ios18.softmax" : 32,
|
52 |
+
"Ios18.layerNorm" : 65,
|
53 |
+
"Ios18.reshape" : 128,
|
54 |
+
"Ios18.mul" : 32
|
55 |
+
},
|
56 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
57 |
+
"isUpdatable" : "0",
|
58 |
+
"stateSchema" : [
|
59 |
+
|
60 |
+
],
|
61 |
+
"availability" : {
|
62 |
+
"macOS" : "15.0",
|
63 |
+
"tvOS" : "18.0",
|
64 |
+
"visionOS" : "2.0",
|
65 |
+
"watchOS" : "11.0",
|
66 |
+
"iOS" : "18.0",
|
67 |
+
"macCatalyst" : "18.0"
|
68 |
+
},
|
69 |
+
"modelType" : {
|
70 |
+
"name" : "MLModelType_mlProgram"
|
71 |
+
},
|
72 |
+
"userDefinedMetadata" : {
|
73 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
74 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1",
|
75 |
+
"com.github.apple.coremltools.version" : "8.0"
|
76 |
+
},
|
77 |
+
"inputSchema" : [
|
78 |
+
{
|
79 |
+
"hasShapeFlexibility" : "0",
|
80 |
+
"isOptional" : "0",
|
81 |
+
"dataType" : "Float16",
|
82 |
+
"formattedType" : "MultiArray (Float16 1 × 128 × 1 × 3000)",
|
83 |
+
"shortDescription" : "",
|
84 |
+
"shape" : "[1, 128, 1, 3000]",
|
85 |
+
"name" : "melspectrogram_features",
|
86 |
+
"type" : "MultiArray"
|
87 |
+
}
|
88 |
+
],
|
89 |
+
"generatedClassName" : "AudioEncoderStateful",
|
90 |
+
"method" : "predict"
|
91 |
+
}
|
92 |
+
]
|
openai_whisper-large-v3-v20240930_626MB/AudioEncoder.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
openai_whisper-large-v3-v20240930_626MB/AudioEncoder.mlmodelc/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9ff86aadad04b5a33905359c151be0c2bca6fca7212b6856376508716072906
|
3 |
+
size 798066
|
openai_whisper-large-v3-v20240930_626MB/AudioEncoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1513c45d5252aafd4b7a949d783c5e80786766432017a62227d131958c9a30c
|
3 |
+
size 430375168
|
openai_whisper-large-v3-v20240930_626MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0980462db89a546e1e90888ea38e0a5ddf1f1fec84608802cdbb12f8a5cc7215
|
3 |
+
size 243
|
openai_whisper-large-v3-v20240930_626MB/MelSpectrogram.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6475c6649047ce609e3fe84b2525843c03342820662404540baf28146c174014
|
3 |
+
size 329
|
openai_whisper-large-v3-v20240930_626MB/MelSpectrogram.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Float16",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 128 × 1 × 3000)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 128, 1, 3000]",
|
13 |
+
"name" : "melspectrogram_features",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"modelParameters" : [
|
18 |
+
|
19 |
+
],
|
20 |
+
"specificationVersion" : 9,
|
21 |
+
"mlProgramOperationTypeHistogram" : {
|
22 |
+
"Ios18.mul" : 2,
|
23 |
+
"Ios18.square" : 2,
|
24 |
+
"Ios18.conv" : 2,
|
25 |
+
"Ios18.matmul" : 1,
|
26 |
+
"Ios18.expandDims" : 4,
|
27 |
+
"Ios18.sub" : 1,
|
28 |
+
"Ios18.log" : 1,
|
29 |
+
"Ios18.add" : 3,
|
30 |
+
"Ios18.sliceByIndex" : 1,
|
31 |
+
"Ios18.maximum" : 1,
|
32 |
+
"Ios18.squeeze" : 2,
|
33 |
+
"Ios18.reshape" : 2,
|
34 |
+
"Ios16.reduceMax" : 1,
|
35 |
+
"Identity" : 1,
|
36 |
+
"Pad" : 1
|
37 |
+
},
|
38 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
39 |
+
"isUpdatable" : "0",
|
40 |
+
"stateSchema" : [
|
41 |
+
|
42 |
+
],
|
43 |
+
"availability" : {
|
44 |
+
"macOS" : "15.0",
|
45 |
+
"tvOS" : "18.0",
|
46 |
+
"visionOS" : "2.0",
|
47 |
+
"watchOS" : "11.0",
|
48 |
+
"iOS" : "18.0",
|
49 |
+
"macCatalyst" : "18.0"
|
50 |
+
},
|
51 |
+
"modelType" : {
|
52 |
+
"name" : "MLModelType_mlProgram"
|
53 |
+
},
|
54 |
+
"userDefinedMetadata" : {
|
55 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
56 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1",
|
57 |
+
"com.github.apple.coremltools.version" : "8.0"
|
58 |
+
},
|
59 |
+
"inputSchema" : [
|
60 |
+
{
|
61 |
+
"hasShapeFlexibility" : "0",
|
62 |
+
"isOptional" : "0",
|
63 |
+
"dataType" : "Float16",
|
64 |
+
"formattedType" : "MultiArray (Float16 480000)",
|
65 |
+
"shortDescription" : "",
|
66 |
+
"shape" : "[480000]",
|
67 |
+
"name" : "audio",
|
68 |
+
"type" : "MultiArray"
|
69 |
+
}
|
70 |
+
],
|
71 |
+
"generatedClassName" : "MelSpectrogram",
|
72 |
+
"method" : "predict"
|
73 |
+
}
|
74 |
+
]
|
openai_whisper-large-v3-v20240930_626MB/MelSpectrogram.mlmodelc/model.mil
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.3)
|
2 |
+
[buildInfo = dict<string, string>({{"coremlc-component-MIL", "3400.43.1"}, {"coremlc-version", "3400.58.2"}, {"coremltools-component-torch", "2.5.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
|
3 |
+
{
|
4 |
+
func main<ios18>(tensor<fp16, [480000]> audio) {
|
5 |
+
tensor<int32, [3]> var_10 = const()[name = string("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
|
6 |
+
tensor<fp16, [1, 1, 480000]> input_1_cast_fp16 = reshape(shape = var_10, x = audio)[name = string("input_1_cast_fp16")];
|
7 |
+
tensor<int32, [6]> input_3_pad_0 = const()[name = string("input_3_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 200, 200])];
|
8 |
+
string input_3_mode_0 = const()[name = string("input_3_mode_0"), val = string("reflect")];
|
9 |
+
fp16 const_1_to_fp16 = const()[name = string("const_1_to_fp16"), val = fp16(0x0p+0)];
|
10 |
+
tensor<fp16, [1, 1, 480400]> input_3_cast_fp16 = pad(constant_val = const_1_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = string("input_3_cast_fp16")];
|
11 |
+
tensor<int32, [1]> var_22 = const()[name = string("op_22"), val = tensor<int32, [1]>([480400])];
|
12 |
+
tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x = input_3_cast_fp16)[name = string("input_cast_fp16")];
|
13 |
+
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = string("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
|
14 |
+
tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = string("expand_dims_0_cast_fp16")];
|
15 |
+
tensor<int32, [1]> expand_dims_3 = const()[name = string("expand_dims_3"), val = tensor<int32, [1]>([160])];
|
16 |
+
tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = string("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
|
17 |
+
tensor<fp16, [1, 1, 480400]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = string("expand_dims_4_cast_fp16")];
|
18 |
+
string conv_0_pad_type_0 = const()[name = string("conv_0_pad_type_0"), val = string("valid")];
|
19 |
+
tensor<int32, [2]> conv_0_pad_0 = const()[name = string("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
20 |
+
tensor<int32, [1]> conv_0_dilations_0 = const()[name = string("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
|
21 |
+
int32 conv_0_groups_0 = const()[name = string("conv_0_groups_0"), val = int32(1)];
|
22 |
+
tensor<fp16, [201, 1, 400]> expand_dims_1_to_fp16 = const()[name = string("expand_dims_1_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))];
|
23 |
+
tensor<fp16, [1, 201, 3001]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = string("conv_0_cast_fp16")];
|
24 |
+
string conv_1_pad_type_0 = const()[name = string("conv_1_pad_type_0"), val = string("valid")];
|
25 |
+
tensor<int32, [2]> conv_1_pad_0 = const()[name = string("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
26 |
+
tensor<int32, [1]> conv_1_dilations_0 = const()[name = string("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
|
27 |
+
int32 conv_1_groups_0 = const()[name = string("conv_1_groups_0"), val = int32(1)];
|
28 |
+
tensor<fp16, [201, 1, 400]> expand_dims_2_to_fp16 = const()[name = string("expand_dims_2_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(160960)))];
|
29 |
+
tensor<fp16, [1, 201, 3001]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = string("conv_1_cast_fp16")];
|
30 |
+
tensor<int32, [1]> squeeze_0_axes_0 = const()[name = string("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
|
31 |
+
tensor<fp16, [201, 3001]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = string("squeeze_0_cast_fp16")];
|
32 |
+
tensor<int32, [1]> squeeze_1_axes_0 = const()[name = string("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
|
33 |
+
tensor<fp16, [201, 3001]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = string("squeeze_1_cast_fp16")];
|
34 |
+
tensor<fp16, [201, 3001]> square_0_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = string("square_0_cast_fp16")];
|
35 |
+
tensor<fp16, [201, 3001]> square_1_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = string("square_1_cast_fp16")];
|
36 |
+
tensor<fp16, [201, 3001]> add_1_cast_fp16 = add(x = square_0_cast_fp16, y = square_1_cast_fp16)[name = string("add_1_cast_fp16")];
|
37 |
+
tensor<fp16, [201, 3001]> magnitudes_1_cast_fp16 = identity(x = add_1_cast_fp16)[name = string("magnitudes_1_cast_fp16")];
|
38 |
+
tensor<int32, [2]> magnitudes_begin_0 = const()[name = string("magnitudes_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
39 |
+
tensor<int32, [2]> magnitudes_end_0 = const()[name = string("magnitudes_end_0"), val = tensor<int32, [2]>([201, 3000])];
|
40 |
+
tensor<bool, [2]> magnitudes_end_mask_0 = const()[name = string("magnitudes_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
41 |
+
tensor<fp16, [201, 3000]> magnitudes_cast_fp16 = slice_by_index(begin = magnitudes_begin_0, end = magnitudes_end_0, end_mask = magnitudes_end_mask_0, x = magnitudes_1_cast_fp16)[name = string("magnitudes_cast_fp16")];
|
42 |
+
bool mel_spec_1_transpose_x_0 = const()[name = string("mel_spec_1_transpose_x_0"), val = bool(false)];
|
43 |
+
bool mel_spec_1_transpose_y_0 = const()[name = string("mel_spec_1_transpose_y_0"), val = bool(false)];
|
44 |
+
tensor<fp16, [128, 201]> mel_filters_to_fp16 = const()[name = string("mel_filters_to_fp16"), val = tensor<fp16, [128, 201]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(321856)))];
|
45 |
+
tensor<fp16, [128, 3000]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16, y = magnitudes_cast_fp16)[name = string("mel_spec_1_cast_fp16")];
|
46 |
+
fp16 var_41_to_fp16 = const()[name = string("op_41_to_fp16"), val = fp16(0x1p-24)];
|
47 |
+
tensor<fp16, [128, 3000]> mel_spec_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_41_to_fp16)[name = string("mel_spec_cast_fp16")];
|
48 |
+
fp32 log_0_epsilon_0 = const()[name = string("log_0_epsilon_0"), val = fp32(0x1p-149)];
|
49 |
+
tensor<fp16, [128, 3000]> log_0_cast_fp16 = log(epsilon = log_0_epsilon_0, x = mel_spec_cast_fp16)[name = string("log_0_cast_fp16")];
|
50 |
+
fp16 mul_0_y_0_to_fp16 = const()[name = string("mul_0_y_0_to_fp16"), val = fp16(0x1.bccp-2)];
|
51 |
+
tensor<fp16, [128, 3000]> mul_0_cast_fp16 = mul(x = log_0_cast_fp16, y = mul_0_y_0_to_fp16)[name = string("mul_0_cast_fp16")];
|
52 |
+
bool var_44_keep_dims_0 = const()[name = string("op_44_keep_dims_0"), val = bool(false)];
|
53 |
+
fp16 var_44_cast_fp16 = reduce_max(keep_dims = var_44_keep_dims_0, x = mul_0_cast_fp16)[name = string("op_44_cast_fp16")];
|
54 |
+
fp16 var_46_to_fp16 = const()[name = string("op_46_to_fp16"), val = fp16(0x1p+3)];
|
55 |
+
fp16 var_47_cast_fp16 = sub(x = var_44_cast_fp16, y = var_46_to_fp16)[name = string("op_47_cast_fp16")];
|
56 |
+
tensor<fp16, [128, 3000]> log_spec_3_cast_fp16 = maximum(x = mul_0_cast_fp16, y = var_47_cast_fp16)[name = string("log_spec_3_cast_fp16")];
|
57 |
+
fp16 var_50_to_fp16 = const()[name = string("op_50_to_fp16"), val = fp16(0x1p+2)];
|
58 |
+
tensor<fp16, [128, 3000]> var_51_cast_fp16 = add(x = log_spec_3_cast_fp16, y = var_50_to_fp16)[name = string("op_51_cast_fp16")];
|
59 |
+
fp16 _inversed_log_spec_y_0_to_fp16 = const()[name = string("_inversed_log_spec_y_0_to_fp16"), val = fp16(0x1p-2)];
|
60 |
+
tensor<fp16, [128, 3000]> _inversed_log_spec_cast_fp16 = mul(x = var_51_cast_fp16, y = _inversed_log_spec_y_0_to_fp16)[name = string("_inversed_log_spec_cast_fp16")];
|
61 |
+
tensor<int32, [1]> var_55_axes_0 = const()[name = string("op_55_axes_0"), val = tensor<int32, [1]>([0])];
|
62 |
+
tensor<fp16, [1, 128, 3000]> var_55_cast_fp16 = expand_dims(axes = var_55_axes_0, x = _inversed_log_spec_cast_fp16)[name = string("op_55_cast_fp16")];
|
63 |
+
tensor<int32, [1]> var_62_axes_0 = const()[name = string("op_62_axes_0"), val = tensor<int32, [1]>([2])];
|
64 |
+
tensor<fp16, [1, 128, 1, 3000]> melspectrogram_features = expand_dims(axes = var_62_axes_0, x = var_55_cast_fp16)[name = string("op_62_cast_fp16")];
|
65 |
+
} -> (melspectrogram_features);
|
66 |
+
}
|
openai_whisper-large-v3-v20240930_626MB/MelSpectrogram.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:009d9fb8f6b589accfa08cebf1c712ef07c3405229ce3cfb3a57ee033c9d8a49
|
3 |
+
size 373376
|
openai_whisper-large-v3-v20240930_626MB/TextDecoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e77577edea445aa84e0b74bedc71812d8321b3861d62fd1ceee924f6f920f20a
|
3 |
+
size 243
|
openai_whisper-large-v3-v20240930_626MB/TextDecoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c4901c5e4249e42e8f37325412eb6fcf9ca9c5e22660271613675afed77cff8f
|
3 |
+
size 754
|
openai_whisper-large-v3-v20240930_626MB/TextDecoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits), Sparse)",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 51866)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 1, 51866]",
|
13 |
+
"name" : "logits",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 1 × 5120 × 1 × 1)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1, 5120, 1, 1]",
|
23 |
+
"name" : "key_cache_updates",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 1 × 5120 × 1 × 1)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[1, 5120, 1, 1]",
|
33 |
+
"name" : "value_cache_updates",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"hasShapeFlexibility" : "0",
|
38 |
+
"isOptional" : "0",
|
39 |
+
"dataType" : "Float16",
|
40 |
+
"formattedType" : "MultiArray (Float16 1 × 1536)",
|
41 |
+
"shortDescription" : "",
|
42 |
+
"shape" : "[1, 1536]",
|
43 |
+
"name" : "alignment_heads_weights",
|
44 |
+
"type" : "MultiArray"
|
45 |
+
}
|
46 |
+
],
|
47 |
+
"modelParameters" : [
|
48 |
+
|
49 |
+
],
|
50 |
+
"specificationVersion" : 9,
|
51 |
+
"mlProgramOperationTypeHistogram" : {
|
52 |
+
"Ios18.expandDims" : 8,
|
53 |
+
"Ios18.softmax" : 8,
|
54 |
+
"Ios18.mul" : 16,
|
55 |
+
"Ios18.matmul" : 16,
|
56 |
+
"Ios18.batchNorm" : 13,
|
57 |
+
"Ios16.reduceMean" : 1,
|
58 |
+
"Split" : 2,
|
59 |
+
"Ios18.readState" : 5,
|
60 |
+
"Ios18.gather" : 3,
|
61 |
+
"Ios18.add" : 62,
|
62 |
+
"Ios18.layerNorm" : 13,
|
63 |
+
"Ios18.reshape" : 32,
|
64 |
+
"Ios18.constexprLutToDense" : 32,
|
65 |
+
"Ios18.constexprSparseToDense" : 33,
|
66 |
+
"Ios18.conv" : 64,
|
67 |
+
"Ios18.gelu" : 4,
|
68 |
+
"Ios18.linear" : 1,
|
69 |
+
"Ios18.cast" : 1,
|
70 |
+
"Ios18.transpose" : 1,
|
71 |
+
"Ios18.concat" : 3,
|
72 |
+
"Ios18.sliceByIndex" : 20,
|
73 |
+
"Ios18.squeeze" : 1
|
74 |
+
},
|
75 |
+
"computePrecision" : "Mixed (Float16, Int32, UInt16)",
|
76 |
+
"isUpdatable" : "0",
|
77 |
+
"stateSchema" : [
|
78 |
+
{
|
79 |
+
"dataType" : "Float16",
|
80 |
+
"isOptional" : "0",
|
81 |
+
"formattedType" : "State (Float16 1 × 1536)",
|
82 |
+
"shortDescription" : "",
|
83 |
+
"shape" : "[1, 1536]",
|
84 |
+
"name" : "encoder_attn_key_padding_mask",
|
85 |
+
"type" : "State"
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"dataType" : "Float16",
|
89 |
+
"isOptional" : "0",
|
90 |
+
"formattedType" : "State (Float16 4 × 1280 × 1 × 1536)",
|
91 |
+
"shortDescription" : "",
|
92 |
+
"shape" : "[4, 1280, 1, 1536]",
|
93 |
+
"name" : "encoder_attn_key_cache",
|
94 |
+
"type" : "State"
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"dataType" : "Float16",
|
98 |
+
"isOptional" : "0",
|
99 |
+
"formattedType" : "State (Float16 4 × 1280 × 1 × 1536)",
|
100 |
+
"shortDescription" : "",
|
101 |
+
"shape" : "[4, 1280, 1, 1536]",
|
102 |
+
"name" : "encoder_attn_value_cache",
|
103 |
+
"type" : "State"
|
104 |
+
},
|
105 |
+
{
|
106 |
+
"dataType" : "Float16",
|
107 |
+
"isOptional" : "0",
|
108 |
+
"formattedType" : "State (Float16 4 × 1280 × 1 × 448)",
|
109 |
+
"shortDescription" : "",
|
110 |
+
"shape" : "[4, 1280, 1, 448]",
|
111 |
+
"name" : "self_attn_key_cache",
|
112 |
+
"type" : "State"
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"dataType" : "Float16",
|
116 |
+
"isOptional" : "0",
|
117 |
+
"formattedType" : "State (Float16 4 × 1280 × 1 × 448)",
|
118 |
+
"shortDescription" : "",
|
119 |
+
"shape" : "[4, 1280, 1, 448]",
|
120 |
+
"name" : "self_attn_value_cache",
|
121 |
+
"type" : "State"
|
122 |
+
}
|
123 |
+
],
|
124 |
+
"availability" : {
|
125 |
+
"macOS" : "15.0",
|
126 |
+
"tvOS" : "18.0",
|
127 |
+
"visionOS" : "2.0",
|
128 |
+
"watchOS" : "11.0",
|
129 |
+
"iOS" : "18.0",
|
130 |
+
"macCatalyst" : "18.0"
|
131 |
+
},
|
132 |
+
"modelType" : {
|
133 |
+
"name" : "MLModelType_mlProgram"
|
134 |
+
},
|
135 |
+
"userDefinedMetadata" : {
|
136 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
137 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1",
|
138 |
+
"com.github.apple.coremltools.version" : "8.0"
|
139 |
+
},
|
140 |
+
"inputSchema" : [
|
141 |
+
{
|
142 |
+
"hasShapeFlexibility" : "0",
|
143 |
+
"isOptional" : "0",
|
144 |
+
"dataType" : "Int32",
|
145 |
+
"formattedType" : "MultiArray (Int32 1)",
|
146 |
+
"shortDescription" : "",
|
147 |
+
"shape" : "[1]",
|
148 |
+
"name" : "input_ids",
|
149 |
+
"type" : "MultiArray"
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"hasShapeFlexibility" : "0",
|
153 |
+
"isOptional" : "0",
|
154 |
+
"dataType" : "Int32",
|
155 |
+
"formattedType" : "MultiArray (Int32 1)",
|
156 |
+
"shortDescription" : "",
|
157 |
+
"shape" : "[1]",
|
158 |
+
"name" : "cache_length",
|
159 |
+
"type" : "MultiArray"
|
160 |
+
},
|
161 |
+
{
|
162 |
+
"hasShapeFlexibility" : "0",
|
163 |
+
"isOptional" : "0",
|
164 |
+
"dataType" : "Float16",
|
165 |
+
"formattedType" : "MultiArray (Float16 1 × 448)",
|
166 |
+
"shortDescription" : "",
|
167 |
+
"shape" : "[1, 448]",
|
168 |
+
"name" : "kv_cache_update_mask",
|
169 |
+
"type" : "MultiArray"
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"hasShapeFlexibility" : "0",
|
173 |
+
"isOptional" : "0",
|
174 |
+
"dataType" : "Float16",
|
175 |
+
"formattedType" : "MultiArray (Float16 1 × 448)",
|
176 |
+
"shortDescription" : "",
|
177 |
+
"shape" : "[1, 448]",
|
178 |
+
"name" : "decoder_key_padding_mask",
|
179 |
+
"type" : "MultiArray"
|
180 |
+
}
|
181 |
+
],
|
182 |
+
"generatedClassName" : "TextDecoderStateful",
|
183 |
+
"method" : "predict"
|
184 |
+
}
|
185 |
+
]
|
openai_whisper-large-v3-v20240930_626MB/TextDecoder.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
openai_whisper-large-v3-v20240930_626MB/TextDecoder.mlmodelc/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:beb44115c7f126e0a9ea1733afa4b3a50609d361c43b14853b89a2da8000d220
|
3 |
+
size 163308
|
openai_whisper-large-v3-v20240930_626MB/TextDecoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:43abea4ea336b9e458398af192796b78180565f3711530c0d8d8d7f192e199ff
|
3 |
+
size 193154996
|
openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:45601c764f7bc51711b42670d55580fa949cf76bbeebc328c60882c048499bf2
|
3 |
+
size 243
|
openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a078e65c9369ce8a4a687a2bbb0a8befbd4ed459250c0442176824906fa95ee1
|
3 |
+
size 433
|
openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits), Sparse)",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 768 × 1 × 1500)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 768, 1, 1500]",
|
13 |
+
"name" : "encoder_output_embeds",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 12 × 768 × 1 × 1536)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[12, 768, 1, 1536]",
|
23 |
+
"name" : "encoder_attn_key_cache",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 12 × 768 × 1 × 1536)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[12, 768, 1, 1536]",
|
33 |
+
"name" : "encoder_attn_value_cache",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
}
|
36 |
+
],
|
37 |
+
"modelParameters" : [
|
38 |
+
|
39 |
+
],
|
40 |
+
"specificationVersion" : 9,
|
41 |
+
"mlProgramOperationTypeHistogram" : {
|
42 |
+
"Ios18.constexprLutToDense" : 98,
|
43 |
+
"Ios18.constexprSparseToDense" : 96,
|
44 |
+
"Ios18.conv" : 196,
|
45 |
+
"Ios18.matmul" : 24,
|
46 |
+
"Ios18.batchNorm" : 25,
|
47 |
+
"Pad" : 2,
|
48 |
+
"Ios18.gelu" : 14,
|
49 |
+
"Ios18.concat" : 2,
|
50 |
+
"Ios18.add" : 123,
|
51 |
+
"Ios18.softmax" : 12,
|
52 |
+
"Ios18.layerNorm" : 25,
|
53 |
+
"Ios18.reshape" : 48,
|
54 |
+
"Ios18.mul" : 12
|
55 |
+
},
|
56 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
57 |
+
"isUpdatable" : "0",
|
58 |
+
"stateSchema" : [
|
59 |
+
|
60 |
+
],
|
61 |
+
"availability" : {
|
62 |
+
"macOS" : "15.0",
|
63 |
+
"tvOS" : "18.0",
|
64 |
+
"visionOS" : "2.0",
|
65 |
+
"watchOS" : "11.0",
|
66 |
+
"iOS" : "18.0",
|
67 |
+
"macCatalyst" : "18.0"
|
68 |
+
},
|
69 |
+
"modelType" : {
|
70 |
+
"name" : "MLModelType_mlProgram"
|
71 |
+
},
|
72 |
+
"userDefinedMetadata" : {
|
73 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
74 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1",
|
75 |
+
"com.github.apple.coremltools.version" : "8.0"
|
76 |
+
},
|
77 |
+
"inputSchema" : [
|
78 |
+
{
|
79 |
+
"hasShapeFlexibility" : "0",
|
80 |
+
"isOptional" : "0",
|
81 |
+
"dataType" : "Float16",
|
82 |
+
"formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
|
83 |
+
"shortDescription" : "",
|
84 |
+
"shape" : "[1, 80, 1, 3000]",
|
85 |
+
"name" : "melspectrogram_features",
|
86 |
+
"type" : "MultiArray"
|
87 |
+
}
|
88 |
+
],
|
89 |
+
"generatedClassName" : "AudioEncoderStateful",
|
90 |
+
"method" : "predict"
|
91 |
+
}
|
92 |
+
]
|
openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d08f8c4c63c48ed098e6536f5620273f3e661c17523e0861780a99d01a1a3749
|
3 |
+
size 370371
|
openai_whisper-small.en_217MB/AudioEncoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:265b53be8d21fd319531a81bf38db77112bfe068b7b681cbf47b26faccfbee55
|
3 |
+
size 71137600
|
openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:efc05e563ee0c556e3f578e04be5fb67b4e7520124403f2561f39102f0f2b33d
|
3 |
+
size 243
|
openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e4ef11ea703011eab03287ec661f999e19c2c78cf67d531b5e6afa02e18f913d
|
3 |
+
size 328
|
openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Float16",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 80, 1, 3000]",
|
13 |
+
"name" : "melspectrogram_features",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"modelParameters" : [
|
18 |
+
|
19 |
+
],
|
20 |
+
"specificationVersion" : 9,
|
21 |
+
"mlProgramOperationTypeHistogram" : {
|
22 |
+
"Ios18.mul" : 2,
|
23 |
+
"Ios18.square" : 2,
|
24 |
+
"Ios18.conv" : 2,
|
25 |
+
"Ios18.matmul" : 1,
|
26 |
+
"Ios18.expandDims" : 4,
|
27 |
+
"Ios18.sub" : 1,
|
28 |
+
"Ios18.log" : 1,
|
29 |
+
"Ios18.add" : 3,
|
30 |
+
"Ios18.sliceByIndex" : 1,
|
31 |
+
"Ios18.maximum" : 1,
|
32 |
+
"Ios18.squeeze" : 2,
|
33 |
+
"Ios18.reshape" : 2,
|
34 |
+
"Ios16.reduceMax" : 1,
|
35 |
+
"Identity" : 1,
|
36 |
+
"Pad" : 1
|
37 |
+
},
|
38 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
39 |
+
"isUpdatable" : "0",
|
40 |
+
"stateSchema" : [
|
41 |
+
|
42 |
+
],
|
43 |
+
"availability" : {
|
44 |
+
"macOS" : "15.0",
|
45 |
+
"tvOS" : "18.0",
|
46 |
+
"visionOS" : "2.0",
|
47 |
+
"watchOS" : "11.0",
|
48 |
+
"iOS" : "18.0",
|
49 |
+
"macCatalyst" : "18.0"
|
50 |
+
},
|
51 |
+
"modelType" : {
|
52 |
+
"name" : "MLModelType_mlProgram"
|
53 |
+
},
|
54 |
+
"userDefinedMetadata" : {
|
55 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
56 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1",
|
57 |
+
"com.github.apple.coremltools.version" : "8.0"
|
58 |
+
},
|
59 |
+
"inputSchema" : [
|
60 |
+
{
|
61 |
+
"hasShapeFlexibility" : "0",
|
62 |
+
"isOptional" : "0",
|
63 |
+
"dataType" : "Float16",
|
64 |
+
"formattedType" : "MultiArray (Float16 480000)",
|
65 |
+
"shortDescription" : "",
|
66 |
+
"shape" : "[480000]",
|
67 |
+
"name" : "audio",
|
68 |
+
"type" : "MultiArray"
|
69 |
+
}
|
70 |
+
],
|
71 |
+
"generatedClassName" : "MelSpectrogram",
|
72 |
+
"method" : "predict"
|
73 |
+
}
|
74 |
+
]
|
openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/model.mil
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.3)
|
2 |
+
[buildInfo = dict<string, string>({{"coremlc-component-MIL", "3400.43.1"}, {"coremlc-version", "3400.58.2"}, {"coremltools-component-torch", "2.5.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
|
3 |
+
{
|
4 |
+
func main<ios18>(tensor<fp16, [480000]> audio) {
|
5 |
+
tensor<int32, [3]> var_10 = const()[name = string("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
|
6 |
+
tensor<fp16, [1, 1, 480000]> input_1_cast_fp16 = reshape(shape = var_10, x = audio)[name = string("input_1_cast_fp16")];
|
7 |
+
tensor<int32, [6]> input_3_pad_0 = const()[name = string("input_3_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 200, 200])];
|
8 |
+
string input_3_mode_0 = const()[name = string("input_3_mode_0"), val = string("reflect")];
|
9 |
+
fp16 const_1_to_fp16 = const()[name = string("const_1_to_fp16"), val = fp16(0x0p+0)];
|
10 |
+
tensor<fp16, [1, 1, 480400]> input_3_cast_fp16 = pad(constant_val = const_1_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = string("input_3_cast_fp16")];
|
11 |
+
tensor<int32, [1]> var_22 = const()[name = string("op_22"), val = tensor<int32, [1]>([480400])];
|
12 |
+
tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x = input_3_cast_fp16)[name = string("input_cast_fp16")];
|
13 |
+
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = string("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
|
14 |
+
tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = string("expand_dims_0_cast_fp16")];
|
15 |
+
tensor<int32, [1]> expand_dims_3 = const()[name = string("expand_dims_3"), val = tensor<int32, [1]>([160])];
|
16 |
+
tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = string("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
|
17 |
+
tensor<fp16, [1, 1, 480400]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = string("expand_dims_4_cast_fp16")];
|
18 |
+
string conv_0_pad_type_0 = const()[name = string("conv_0_pad_type_0"), val = string("valid")];
|
19 |
+
tensor<int32, [2]> conv_0_pad_0 = const()[name = string("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
20 |
+
tensor<int32, [1]> conv_0_dilations_0 = const()[name = string("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
|
21 |
+
int32 conv_0_groups_0 = const()[name = string("conv_0_groups_0"), val = int32(1)];
|
22 |
+
tensor<fp16, [201, 1, 400]> expand_dims_1_to_fp16 = const()[name = string("expand_dims_1_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))];
|
23 |
+
tensor<fp16, [1, 201, 3001]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = string("conv_0_cast_fp16")];
|
24 |
+
string conv_1_pad_type_0 = const()[name = string("conv_1_pad_type_0"), val = string("valid")];
|
25 |
+
tensor<int32, [2]> conv_1_pad_0 = const()[name = string("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
26 |
+
tensor<int32, [1]> conv_1_dilations_0 = const()[name = string("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
|
27 |
+
int32 conv_1_groups_0 = const()[name = string("conv_1_groups_0"), val = int32(1)];
|
28 |
+
tensor<fp16, [201, 1, 400]> expand_dims_2_to_fp16 = const()[name = string("expand_dims_2_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(160960)))];
|
29 |
+
tensor<fp16, [1, 201, 3001]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = string("conv_1_cast_fp16")];
|
30 |
+
tensor<int32, [1]> squeeze_0_axes_0 = const()[name = string("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
|
31 |
+
tensor<fp16, [201, 3001]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = string("squeeze_0_cast_fp16")];
|
32 |
+
tensor<int32, [1]> squeeze_1_axes_0 = const()[name = string("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
|
33 |
+
tensor<fp16, [201, 3001]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = string("squeeze_1_cast_fp16")];
|
34 |
+
tensor<fp16, [201, 3001]> square_0_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = string("square_0_cast_fp16")];
|
35 |
+
tensor<fp16, [201, 3001]> square_1_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = string("square_1_cast_fp16")];
|
36 |
+
tensor<fp16, [201, 3001]> add_1_cast_fp16 = add(x = square_0_cast_fp16, y = square_1_cast_fp16)[name = string("add_1_cast_fp16")];
|
37 |
+
tensor<fp16, [201, 3001]> magnitudes_1_cast_fp16 = identity(x = add_1_cast_fp16)[name = string("magnitudes_1_cast_fp16")];
|
38 |
+
tensor<int32, [2]> magnitudes_begin_0 = const()[name = string("magnitudes_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
39 |
+
tensor<int32, [2]> magnitudes_end_0 = const()[name = string("magnitudes_end_0"), val = tensor<int32, [2]>([201, 3000])];
|
40 |
+
tensor<bool, [2]> magnitudes_end_mask_0 = const()[name = string("magnitudes_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
41 |
+
tensor<fp16, [201, 3000]> magnitudes_cast_fp16 = slice_by_index(begin = magnitudes_begin_0, end = magnitudes_end_0, end_mask = magnitudes_end_mask_0, x = magnitudes_1_cast_fp16)[name = string("magnitudes_cast_fp16")];
|
42 |
+
bool mel_spec_1_transpose_x_0 = const()[name = string("mel_spec_1_transpose_x_0"), val = bool(false)];
|
43 |
+
bool mel_spec_1_transpose_y_0 = const()[name = string("mel_spec_1_transpose_y_0"), val = bool(false)];
|
44 |
+
tensor<fp16, [80, 201]> mel_filters_to_fp16 = const()[name = string("mel_filters_to_fp16"), val = tensor<fp16, [80, 201]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(321856)))];
|
45 |
+
tensor<fp16, [80, 3000]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16, y = magnitudes_cast_fp16)[name = string("mel_spec_1_cast_fp16")];
|
46 |
+
fp16 var_41_to_fp16 = const()[name = string("op_41_to_fp16"), val = fp16(0x1p-24)];
|
47 |
+
tensor<fp16, [80, 3000]> mel_spec_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_41_to_fp16)[name = string("mel_spec_cast_fp16")];
|
48 |
+
fp32 log_0_epsilon_0 = const()[name = string("log_0_epsilon_0"), val = fp32(0x1p-149)];
|
49 |
+
tensor<fp16, [80, 3000]> log_0_cast_fp16 = log(epsilon = log_0_epsilon_0, x = mel_spec_cast_fp16)[name = string("log_0_cast_fp16")];
|
50 |
+
fp16 mul_0_y_0_to_fp16 = const()[name = string("mul_0_y_0_to_fp16"), val = fp16(0x1.bccp-2)];
|
51 |
+
tensor<fp16, [80, 3000]> mul_0_cast_fp16 = mul(x = log_0_cast_fp16, y = mul_0_y_0_to_fp16)[name = string("mul_0_cast_fp16")];
|
52 |
+
bool var_44_keep_dims_0 = const()[name = string("op_44_keep_dims_0"), val = bool(false)];
|
53 |
+
fp16 var_44_cast_fp16 = reduce_max(keep_dims = var_44_keep_dims_0, x = mul_0_cast_fp16)[name = string("op_44_cast_fp16")];
|
54 |
+
fp16 var_46_to_fp16 = const()[name = string("op_46_to_fp16"), val = fp16(0x1p+3)];
|
55 |
+
fp16 var_47_cast_fp16 = sub(x = var_44_cast_fp16, y = var_46_to_fp16)[name = string("op_47_cast_fp16")];
|
56 |
+
tensor<fp16, [80, 3000]> log_spec_3_cast_fp16 = maximum(x = mul_0_cast_fp16, y = var_47_cast_fp16)[name = string("log_spec_3_cast_fp16")];
|
57 |
+
fp16 var_50_to_fp16 = const()[name = string("op_50_to_fp16"), val = fp16(0x1p+2)];
|
58 |
+
tensor<fp16, [80, 3000]> var_51_cast_fp16 = add(x = log_spec_3_cast_fp16, y = var_50_to_fp16)[name = string("op_51_cast_fp16")];
|
59 |
+
fp16 _inversed_log_spec_y_0_to_fp16 = const()[name = string("_inversed_log_spec_y_0_to_fp16"), val = fp16(0x1p-2)];
|
60 |
+
tensor<fp16, [80, 3000]> _inversed_log_spec_cast_fp16 = mul(x = var_51_cast_fp16, y = _inversed_log_spec_y_0_to_fp16)[name = string("_inversed_log_spec_cast_fp16")];
|
61 |
+
tensor<int32, [1]> var_55_axes_0 = const()[name = string("op_55_axes_0"), val = tensor<int32, [1]>([0])];
|
62 |
+
tensor<fp16, [1, 80, 3000]> var_55_cast_fp16 = expand_dims(axes = var_55_axes_0, x = _inversed_log_spec_cast_fp16)[name = string("op_55_cast_fp16")];
|
63 |
+
tensor<int32, [1]> var_62_axes_0 = const()[name = string("op_62_axes_0"), val = tensor<int32, [1]>([2])];
|
64 |
+
tensor<fp16, [1, 80, 1, 3000]> melspectrogram_features = expand_dims(axes = var_62_axes_0, x = var_55_cast_fp16)[name = string("op_62_cast_fp16")];
|
65 |
+
} -> (melspectrogram_features);
|
66 |
+
}
|
openai_whisper-small.en_217MB/MelSpectrogram.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:801024dbc7a89c677be1f8b285de3409e35f7d1786c9c8d9d0d6842ac57a1c83
|
3 |
+
size 354080
|
openai_whisper-small.en_217MB/TextDecoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bba8806a80559fb597de37faaa2740fa1c3a464e2941dac2cd2139dbd5ea70ff
|
3 |
+
size 243
|
openai_whisper-small.en_217MB/TextDecoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7af00de6eebff972d49c192df21fdcc49dc037f8043e50e89fdd8e3831e1a8e8
|
3 |
+
size 754
|
openai_whisper-small.en_217MB/TextDecoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits), Sparse)",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 51864)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 1, 51864]",
|
13 |
+
"name" : "logits",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 1)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1, 9216, 1, 1]",
|
23 |
+
"name" : "key_cache_updates",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 1)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[1, 9216, 1, 1]",
|
33 |
+
"name" : "value_cache_updates",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"hasShapeFlexibility" : "0",
|
38 |
+
"isOptional" : "0",
|
39 |
+
"dataType" : "Float16",
|
40 |
+
"formattedType" : "MultiArray (Float16 1 × 1536)",
|
41 |
+
"shortDescription" : "",
|
42 |
+
"shape" : "[1, 1536]",
|
43 |
+
"name" : "alignment_heads_weights",
|
44 |
+
"type" : "MultiArray"
|
45 |
+
}
|
46 |
+
],
|
47 |
+
"modelParameters" : [
|
48 |
+
|
49 |
+
],
|
50 |
+
"specificationVersion" : 9,
|
51 |
+
"mlProgramOperationTypeHistogram" : {
|
52 |
+
"Ios18.expandDims" : 8,
|
53 |
+
"Ios18.softmax" : 24,
|
54 |
+
"Ios18.mul" : 48,
|
55 |
+
"Ios18.matmul" : 48,
|
56 |
+
"Ios18.batchNorm" : 37,
|
57 |
+
"Ios16.reduceMean" : 1,
|
58 |
+
"Split" : 2,
|
59 |
+
"Ios18.readState" : 5,
|
60 |
+
"Ios18.gather" : 3,
|
61 |
+
"Ios18.add" : 182,
|
62 |
+
"Ios18.layerNorm" : 37,
|
63 |
+
"Ios18.reshape" : 96,
|
64 |
+
"Ios18.constexprLutToDense" : 96,
|
65 |
+
"Ios18.constexprSparseToDense" : 97,
|
66 |
+
"Ios18.conv" : 192,
|
67 |
+
"Ios18.gelu" : 12,
|
68 |
+
"Ios18.linear" : 1,
|
69 |
+
"Ios18.cast" : 1,
|
70 |
+
"Ios18.transpose" : 1,
|
71 |
+
"Ios18.concat" : 3,
|
72 |
+
"Ios18.sliceByIndex" : 62,
|
73 |
+
"Ios18.squeeze" : 1
|
74 |
+
},
|
75 |
+
"computePrecision" : "Mixed (Float16, Int32, UInt16)",
|
76 |
+
"isUpdatable" : "0",
|
77 |
+
"stateSchema" : [
|
78 |
+
{
|
79 |
+
"dataType" : "Float16",
|
80 |
+
"isOptional" : "0",
|
81 |
+
"formattedType" : "State (Float16 1 × 1536)",
|
82 |
+
"shortDescription" : "",
|
83 |
+
"shape" : "[1, 1536]",
|
84 |
+
"name" : "encoder_attn_key_padding_mask",
|
85 |
+
"type" : "State"
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"dataType" : "Float16",
|
89 |
+
"isOptional" : "0",
|
90 |
+
"formattedType" : "State (Float16 12 × 768 × 1 × 1536)",
|
91 |
+
"shortDescription" : "",
|
92 |
+
"shape" : "[12, 768, 1, 1536]",
|
93 |
+
"name" : "encoder_attn_key_cache",
|
94 |
+
"type" : "State"
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"dataType" : "Float16",
|
98 |
+
"isOptional" : "0",
|
99 |
+
"formattedType" : "State (Float16 12 × 768 × 1 × 1536)",
|
100 |
+
"shortDescription" : "",
|
101 |
+
"shape" : "[12, 768, 1, 1536]",
|
102 |
+
"name" : "encoder_attn_value_cache",
|
103 |
+
"type" : "State"
|
104 |
+
},
|
105 |
+
{
|
106 |
+
"dataType" : "Float16",
|
107 |
+
"isOptional" : "0",
|
108 |
+
"formattedType" : "State (Float16 12 × 768 × 1 × 448)",
|
109 |
+
"shortDescription" : "",
|
110 |
+
"shape" : "[12, 768, 1, 448]",
|
111 |
+
"name" : "self_attn_key_cache",
|
112 |
+
"type" : "State"
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"dataType" : "Float16",
|
116 |
+
"isOptional" : "0",
|
117 |
+
"formattedType" : "State (Float16 12 × 768 × 1 × 448)",
|
118 |
+
"shortDescription" : "",
|
119 |
+
"shape" : "[12, 768, 1, 448]",
|
120 |
+
"name" : "self_attn_value_cache",
|
121 |
+
"type" : "State"
|
122 |
+
}
|
123 |
+
],
|
124 |
+
"availability" : {
|
125 |
+
"macOS" : "15.0",
|
126 |
+
"tvOS" : "18.0",
|
127 |
+
"visionOS" : "2.0",
|
128 |
+
"watchOS" : "11.0",
|
129 |
+
"iOS" : "18.0",
|
130 |
+
"macCatalyst" : "18.0"
|
131 |
+
},
|
132 |
+
"modelType" : {
|
133 |
+
"name" : "MLModelType_mlProgram"
|
134 |
+
},
|
135 |
+
"userDefinedMetadata" : {
|
136 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
137 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1",
|
138 |
+
"com.github.apple.coremltools.version" : "8.0"
|
139 |
+
},
|
140 |
+
"inputSchema" : [
|
141 |
+
{
|
142 |
+
"hasShapeFlexibility" : "0",
|
143 |
+
"isOptional" : "0",
|
144 |
+
"dataType" : "Int32",
|
145 |
+
"formattedType" : "MultiArray (Int32 1)",
|
146 |
+
"shortDescription" : "",
|
147 |
+
"shape" : "[1]",
|
148 |
+
"name" : "input_ids",
|
149 |
+
"type" : "MultiArray"
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"hasShapeFlexibility" : "0",
|
153 |
+
"isOptional" : "0",
|
154 |
+
"dataType" : "Int32",
|
155 |
+
"formattedType" : "MultiArray (Int32 1)",
|
156 |
+
"shortDescription" : "",
|
157 |
+
"shape" : "[1]",
|
158 |
+
"name" : "cache_length",
|
159 |
+
"type" : "MultiArray"
|
160 |
+
},
|
161 |
+
{
|
162 |
+
"hasShapeFlexibility" : "0",
|
163 |
+
"isOptional" : "0",
|
164 |
+
"dataType" : "Float16",
|
165 |
+
"formattedType" : "MultiArray (Float16 1 × 448)",
|
166 |
+
"shortDescription" : "",
|
167 |
+
"shape" : "[1, 448]",
|
168 |
+
"name" : "kv_cache_update_mask",
|
169 |
+
"type" : "MultiArray"
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"hasShapeFlexibility" : "0",
|
173 |
+
"isOptional" : "0",
|
174 |
+
"dataType" : "Float16",
|
175 |
+
"formattedType" : "MultiArray (Float16 1 × 448)",
|
176 |
+
"shortDescription" : "",
|
177 |
+
"shape" : "[1, 448]",
|
178 |
+
"name" : "decoder_key_padding_mask",
|
179 |
+
"type" : "MultiArray"
|
180 |
+
}
|
181 |
+
],
|
182 |
+
"generatedClassName" : "TextDecoderStateful",
|
183 |
+
"method" : "predict"
|
184 |
+
}
|
185 |
+
]
|
openai_whisper-small.en_217MB/TextDecoder.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
openai_whisper-small.en_217MB/TextDecoder.mlmodelc/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3523fca50d554f8489c77b650a2ad67bbae52dc273cc7ae065f42d9ed203a61a
|
3 |
+
size 471306
|
openai_whisper-small.en_217MB/TextDecoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d081733de41795cc1b645efddb2e087d0aee20ec0d4827de1afe9d12968fddf2
|
3 |
+
size 144553072
|
openai_whisper-small_216MB/AudioEncoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9013b810ca238cedd09069c765cdf0937f84e1ed74c90df4c4d05b602fdcc7ac
|
3 |
+
size 243
|
openai_whisper-small_216MB/AudioEncoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a078e65c9369ce8a4a687a2bbb0a8befbd4ed459250c0442176824906fa95ee1
|
3 |
+
size 433
|
openai_whisper-small_216MB/AudioEncoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits), Sparse)",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 768 × 1 × 1500)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 768, 1, 1500]",
|
13 |
+
"name" : "encoder_output_embeds",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 12 × 768 × 1 × 1536)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[12, 768, 1, 1536]",
|
23 |
+
"name" : "encoder_attn_key_cache",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 12 × 768 × 1 × 1536)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[12, 768, 1, 1536]",
|
33 |
+
"name" : "encoder_attn_value_cache",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
}
|
36 |
+
],
|
37 |
+
"modelParameters" : [
|
38 |
+
|
39 |
+
],
|
40 |
+
"specificationVersion" : 9,
|
41 |
+
"mlProgramOperationTypeHistogram" : {
|
42 |
+
"Ios18.constexprLutToDense" : 98,
|
43 |
+
"Ios18.constexprSparseToDense" : 96,
|
44 |
+
"Ios18.conv" : 196,
|
45 |
+
"Ios18.matmul" : 24,
|
46 |
+
"Ios18.batchNorm" : 25,
|
47 |
+
"Pad" : 2,
|
48 |
+
"Ios18.gelu" : 14,
|
49 |
+
"Ios18.concat" : 2,
|
50 |
+
"Ios18.add" : 123,
|
51 |
+
"Ios18.softmax" : 12,
|
52 |
+
"Ios18.layerNorm" : 25,
|
53 |
+
"Ios18.reshape" : 48,
|
54 |
+
"Ios18.mul" : 12
|
55 |
+
},
|
56 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
57 |
+
"isUpdatable" : "0",
|
58 |
+
"stateSchema" : [
|
59 |
+
|
60 |
+
],
|
61 |
+
"availability" : {
|
62 |
+
"macOS" : "15.0",
|
63 |
+
"tvOS" : "18.0",
|
64 |
+
"visionOS" : "2.0",
|
65 |
+
"watchOS" : "11.0",
|
66 |
+
"iOS" : "18.0",
|
67 |
+
"macCatalyst" : "18.0"
|
68 |
+
},
|
69 |
+
"modelType" : {
|
70 |
+
"name" : "MLModelType_mlProgram"
|
71 |
+
},
|
72 |
+
"userDefinedMetadata" : {
|
73 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
74 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1",
|
75 |
+
"com.github.apple.coremltools.version" : "8.0"
|
76 |
+
},
|
77 |
+
"inputSchema" : [
|
78 |
+
{
|
79 |
+
"hasShapeFlexibility" : "0",
|
80 |
+
"isOptional" : "0",
|
81 |
+
"dataType" : "Float16",
|
82 |
+
"formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
|
83 |
+
"shortDescription" : "",
|
84 |
+
"shape" : "[1, 80, 1, 3000]",
|
85 |
+
"name" : "melspectrogram_features",
|
86 |
+
"type" : "MultiArray"
|
87 |
+
}
|
88 |
+
],
|
89 |
+
"generatedClassName" : "AudioEncoderStateful_mixedBitPalettized_4_bit",
|
90 |
+
"method" : "predict"
|
91 |
+
}
|
92 |
+
]
|
openai_whisper-small_216MB/AudioEncoder.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
openai_whisper-small_216MB/AudioEncoder.mlmodelc/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1860cdf3b8cb60f09c2ba261670bac3f5dee04f59c47908ceda7b3c7c32b7c6b
|
3 |
+
size 370367
|
openai_whisper-small_216MB/AudioEncoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:abd18b26e136cc4cdaae94985b204a64cf3dd1b47db0096d0a137cbd734986fd
|
3 |
+
size 71115776
|
openai_whisper-small_216MB/MelSpectrogram.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:efc05e563ee0c556e3f578e04be5fb67b4e7520124403f2561f39102f0f2b33d
|
3 |
+
size 243
|
openai_whisper-small_216MB/MelSpectrogram.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e4ef11ea703011eab03287ec661f999e19c2c78cf67d531b5e6afa02e18f913d
|
3 |
+
size 328
|
openai_whisper-small_216MB/MelSpectrogram.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Float16",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 80 × 1 × 3000)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 80, 1, 3000]",
|
13 |
+
"name" : "melspectrogram_features",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"modelParameters" : [
|
18 |
+
|
19 |
+
],
|
20 |
+
"specificationVersion" : 9,
|
21 |
+
"mlProgramOperationTypeHistogram" : {
|
22 |
+
"Ios18.mul" : 2,
|
23 |
+
"Ios18.square" : 2,
|
24 |
+
"Ios18.conv" : 2,
|
25 |
+
"Ios18.matmul" : 1,
|
26 |
+
"Ios18.expandDims" : 4,
|
27 |
+
"Ios18.sub" : 1,
|
28 |
+
"Ios18.log" : 1,
|
29 |
+
"Ios18.add" : 3,
|
30 |
+
"Ios18.sliceByIndex" : 1,
|
31 |
+
"Ios18.maximum" : 1,
|
32 |
+
"Ios18.squeeze" : 2,
|
33 |
+
"Ios18.reshape" : 2,
|
34 |
+
"Ios16.reduceMax" : 1,
|
35 |
+
"Identity" : 1,
|
36 |
+
"Pad" : 1
|
37 |
+
},
|
38 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
39 |
+
"isUpdatable" : "0",
|
40 |
+
"stateSchema" : [
|
41 |
+
|
42 |
+
],
|
43 |
+
"availability" : {
|
44 |
+
"macOS" : "15.0",
|
45 |
+
"tvOS" : "18.0",
|
46 |
+
"visionOS" : "2.0",
|
47 |
+
"watchOS" : "11.0",
|
48 |
+
"iOS" : "18.0",
|
49 |
+
"macCatalyst" : "18.0"
|
50 |
+
},
|
51 |
+
"modelType" : {
|
52 |
+
"name" : "MLModelType_mlProgram"
|
53 |
+
},
|
54 |
+
"userDefinedMetadata" : {
|
55 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
56 |
+
"com.github.apple.coremltools.version" : "8.0",
|
57 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1"
|
58 |
+
},
|
59 |
+
"inputSchema" : [
|
60 |
+
{
|
61 |
+
"hasShapeFlexibility" : "0",
|
62 |
+
"isOptional" : "0",
|
63 |
+
"dataType" : "Float16",
|
64 |
+
"formattedType" : "MultiArray (Float16 480000)",
|
65 |
+
"shortDescription" : "",
|
66 |
+
"shape" : "[480000]",
|
67 |
+
"name" : "audio",
|
68 |
+
"type" : "MultiArray"
|
69 |
+
}
|
70 |
+
],
|
71 |
+
"generatedClassName" : "MelSpectrogram",
|
72 |
+
"method" : "predict"
|
73 |
+
}
|
74 |
+
]
|
openai_whisper-small_216MB/MelSpectrogram.mlmodelc/model.mil
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.3)
|
2 |
+
[buildInfo = dict<string, string>({{"coremlc-component-MIL", "3400.43.1"}, {"coremlc-version", "3400.58.2"}, {"coremltools-component-torch", "2.5.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
|
3 |
+
{
|
4 |
+
func main<ios18>(tensor<fp16, [480000]> audio) {
|
5 |
+
tensor<int32, [3]> var_10 = const()[name = string("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
|
6 |
+
tensor<fp16, [1, 1, 480000]> input_1_cast_fp16 = reshape(shape = var_10, x = audio)[name = string("input_1_cast_fp16")];
|
7 |
+
tensor<int32, [6]> input_3_pad_0 = const()[name = string("input_3_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 200, 200])];
|
8 |
+
string input_3_mode_0 = const()[name = string("input_3_mode_0"), val = string("reflect")];
|
9 |
+
fp16 const_1_to_fp16 = const()[name = string("const_1_to_fp16"), val = fp16(0x0p+0)];
|
10 |
+
tensor<fp16, [1, 1, 480400]> input_3_cast_fp16 = pad(constant_val = const_1_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = string("input_3_cast_fp16")];
|
11 |
+
tensor<int32, [1]> var_22 = const()[name = string("op_22"), val = tensor<int32, [1]>([480400])];
|
12 |
+
tensor<fp16, [480400]> input_cast_fp16 = reshape(shape = var_22, x = input_3_cast_fp16)[name = string("input_cast_fp16")];
|
13 |
+
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = string("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
|
14 |
+
tensor<fp16, [1, 480400]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = string("expand_dims_0_cast_fp16")];
|
15 |
+
tensor<int32, [1]> expand_dims_3 = const()[name = string("expand_dims_3"), val = tensor<int32, [1]>([160])];
|
16 |
+
tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = string("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
|
17 |
+
tensor<fp16, [1, 1, 480400]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = string("expand_dims_4_cast_fp16")];
|
18 |
+
string conv_0_pad_type_0 = const()[name = string("conv_0_pad_type_0"), val = string("valid")];
|
19 |
+
tensor<int32, [2]> conv_0_pad_0 = const()[name = string("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
20 |
+
tensor<int32, [1]> conv_0_dilations_0 = const()[name = string("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
|
21 |
+
int32 conv_0_groups_0 = const()[name = string("conv_0_groups_0"), val = int32(1)];
|
22 |
+
tensor<fp16, [201, 1, 400]> expand_dims_1_to_fp16 = const()[name = string("expand_dims_1_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))];
|
23 |
+
tensor<fp16, [1, 201, 3001]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = string("conv_0_cast_fp16")];
|
24 |
+
string conv_1_pad_type_0 = const()[name = string("conv_1_pad_type_0"), val = string("valid")];
|
25 |
+
tensor<int32, [2]> conv_1_pad_0 = const()[name = string("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
26 |
+
tensor<int32, [1]> conv_1_dilations_0 = const()[name = string("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
|
27 |
+
int32 conv_1_groups_0 = const()[name = string("conv_1_groups_0"), val = int32(1)];
|
28 |
+
tensor<fp16, [201, 1, 400]> expand_dims_2_to_fp16 = const()[name = string("expand_dims_2_to_fp16"), val = tensor<fp16, [201, 1, 400]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(160960)))];
|
29 |
+
tensor<fp16, [1, 201, 3001]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = string("conv_1_cast_fp16")];
|
30 |
+
tensor<int32, [1]> squeeze_0_axes_0 = const()[name = string("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
|
31 |
+
tensor<fp16, [201, 3001]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = string("squeeze_0_cast_fp16")];
|
32 |
+
tensor<int32, [1]> squeeze_1_axes_0 = const()[name = string("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
|
33 |
+
tensor<fp16, [201, 3001]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = string("squeeze_1_cast_fp16")];
|
34 |
+
tensor<fp16, [201, 3001]> square_0_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = string("square_0_cast_fp16")];
|
35 |
+
tensor<fp16, [201, 3001]> square_1_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = string("square_1_cast_fp16")];
|
36 |
+
tensor<fp16, [201, 3001]> add_1_cast_fp16 = add(x = square_0_cast_fp16, y = square_1_cast_fp16)[name = string("add_1_cast_fp16")];
|
37 |
+
tensor<fp16, [201, 3001]> magnitudes_1_cast_fp16 = identity(x = add_1_cast_fp16)[name = string("magnitudes_1_cast_fp16")];
|
38 |
+
tensor<int32, [2]> magnitudes_begin_0 = const()[name = string("magnitudes_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
39 |
+
tensor<int32, [2]> magnitudes_end_0 = const()[name = string("magnitudes_end_0"), val = tensor<int32, [2]>([201, 3000])];
|
40 |
+
tensor<bool, [2]> magnitudes_end_mask_0 = const()[name = string("magnitudes_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
41 |
+
tensor<fp16, [201, 3000]> magnitudes_cast_fp16 = slice_by_index(begin = magnitudes_begin_0, end = magnitudes_end_0, end_mask = magnitudes_end_mask_0, x = magnitudes_1_cast_fp16)[name = string("magnitudes_cast_fp16")];
|
42 |
+
bool mel_spec_1_transpose_x_0 = const()[name = string("mel_spec_1_transpose_x_0"), val = bool(false)];
|
43 |
+
bool mel_spec_1_transpose_y_0 = const()[name = string("mel_spec_1_transpose_y_0"), val = bool(false)];
|
44 |
+
tensor<fp16, [80, 201]> mel_filters_to_fp16 = const()[name = string("mel_filters_to_fp16"), val = tensor<fp16, [80, 201]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(321856)))];
|
45 |
+
tensor<fp16, [80, 3000]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16, y = magnitudes_cast_fp16)[name = string("mel_spec_1_cast_fp16")];
|
46 |
+
fp16 var_41_to_fp16 = const()[name = string("op_41_to_fp16"), val = fp16(0x1p-24)];
|
47 |
+
tensor<fp16, [80, 3000]> mel_spec_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_41_to_fp16)[name = string("mel_spec_cast_fp16")];
|
48 |
+
fp32 log_0_epsilon_0 = const()[name = string("log_0_epsilon_0"), val = fp32(0x1p-149)];
|
49 |
+
tensor<fp16, [80, 3000]> log_0_cast_fp16 = log(epsilon = log_0_epsilon_0, x = mel_spec_cast_fp16)[name = string("log_0_cast_fp16")];
|
50 |
+
fp16 mul_0_y_0_to_fp16 = const()[name = string("mul_0_y_0_to_fp16"), val = fp16(0x1.bccp-2)];
|
51 |
+
tensor<fp16, [80, 3000]> mul_0_cast_fp16 = mul(x = log_0_cast_fp16, y = mul_0_y_0_to_fp16)[name = string("mul_0_cast_fp16")];
|
52 |
+
bool var_44_keep_dims_0 = const()[name = string("op_44_keep_dims_0"), val = bool(false)];
|
53 |
+
fp16 var_44_cast_fp16 = reduce_max(keep_dims = var_44_keep_dims_0, x = mul_0_cast_fp16)[name = string("op_44_cast_fp16")];
|
54 |
+
fp16 var_46_to_fp16 = const()[name = string("op_46_to_fp16"), val = fp16(0x1p+3)];
|
55 |
+
fp16 var_47_cast_fp16 = sub(x = var_44_cast_fp16, y = var_46_to_fp16)[name = string("op_47_cast_fp16")];
|
56 |
+
tensor<fp16, [80, 3000]> log_spec_3_cast_fp16 = maximum(x = mul_0_cast_fp16, y = var_47_cast_fp16)[name = string("log_spec_3_cast_fp16")];
|
57 |
+
fp16 var_50_to_fp16 = const()[name = string("op_50_to_fp16"), val = fp16(0x1p+2)];
|
58 |
+
tensor<fp16, [80, 3000]> var_51_cast_fp16 = add(x = log_spec_3_cast_fp16, y = var_50_to_fp16)[name = string("op_51_cast_fp16")];
|
59 |
+
fp16 _inversed_log_spec_y_0_to_fp16 = const()[name = string("_inversed_log_spec_y_0_to_fp16"), val = fp16(0x1p-2)];
|
60 |
+
tensor<fp16, [80, 3000]> _inversed_log_spec_cast_fp16 = mul(x = var_51_cast_fp16, y = _inversed_log_spec_y_0_to_fp16)[name = string("_inversed_log_spec_cast_fp16")];
|
61 |
+
tensor<int32, [1]> var_55_axes_0 = const()[name = string("op_55_axes_0"), val = tensor<int32, [1]>([0])];
|
62 |
+
tensor<fp16, [1, 80, 3000]> var_55_cast_fp16 = expand_dims(axes = var_55_axes_0, x = _inversed_log_spec_cast_fp16)[name = string("op_55_cast_fp16")];
|
63 |
+
tensor<int32, [1]> var_62_axes_0 = const()[name = string("op_62_axes_0"), val = tensor<int32, [1]>([2])];
|
64 |
+
tensor<fp16, [1, 80, 1, 3000]> melspectrogram_features = expand_dims(axes = var_62_axes_0, x = var_55_cast_fp16)[name = string("op_62_cast_fp16")];
|
65 |
+
} -> (melspectrogram_features);
|
66 |
+
}
|
openai_whisper-small_216MB/MelSpectrogram.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:801024dbc7a89c677be1f8b285de3409e35f7d1786c9c8d9d0d6842ac57a1c83
|
3 |
+
size 354080
|
openai_whisper-small_216MB/TextDecoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7df280e3624592b68f28b53486f5b5774bc21282f6a172d6babfcfc2f5fdf139
|
3 |
+
size 243
|
openai_whisper-small_216MB/TextDecoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3015f3429f673bc4a311b034f2dd12abd86a84e3653afa6f166654e6e6478aeb
|
3 |
+
size 754
|
openai_whisper-small_216MB/TextDecoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (4 bits), Sparse)",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 51865)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 1, 51865]",
|
13 |
+
"name" : "logits",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 1)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1, 9216, 1, 1]",
|
23 |
+
"name" : "key_cache_updates",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 1 × 9216 × 1 × 1)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[1, 9216, 1, 1]",
|
33 |
+
"name" : "value_cache_updates",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"hasShapeFlexibility" : "0",
|
38 |
+
"isOptional" : "0",
|
39 |
+
"dataType" : "Float16",
|
40 |
+
"formattedType" : "MultiArray (Float16 1 × 1536)",
|
41 |
+
"shortDescription" : "",
|
42 |
+
"shape" : "[1, 1536]",
|
43 |
+
"name" : "alignment_heads_weights",
|
44 |
+
"type" : "MultiArray"
|
45 |
+
}
|
46 |
+
],
|
47 |
+
"modelParameters" : [
|
48 |
+
|
49 |
+
],
|
50 |
+
"specificationVersion" : 9,
|
51 |
+
"mlProgramOperationTypeHistogram" : {
|
52 |
+
"Ios18.expandDims" : 8,
|
53 |
+
"Ios18.softmax" : 24,
|
54 |
+
"Ios18.mul" : 48,
|
55 |
+
"Ios18.matmul" : 48,
|
56 |
+
"Ios18.batchNorm" : 37,
|
57 |
+
"Ios16.reduceMean" : 1,
|
58 |
+
"Split" : 2,
|
59 |
+
"Ios18.readState" : 5,
|
60 |
+
"Ios18.gather" : 3,
|
61 |
+
"Ios18.add" : 182,
|
62 |
+
"Ios18.layerNorm" : 37,
|
63 |
+
"Ios18.reshape" : 96,
|
64 |
+
"Ios18.constexprLutToDense" : 96,
|
65 |
+
"Ios18.constexprSparseToDense" : 97,
|
66 |
+
"Ios18.conv" : 192,
|
67 |
+
"Ios18.gelu" : 12,
|
68 |
+
"Ios18.linear" : 1,
|
69 |
+
"Ios18.cast" : 1,
|
70 |
+
"Ios18.transpose" : 1,
|
71 |
+
"Ios18.concat" : 3,
|
72 |
+
"Ios18.sliceByIndex" : 44,
|
73 |
+
"Ios18.squeeze" : 1
|
74 |
+
},
|
75 |
+
"computePrecision" : "Mixed (Float16, Int32, UInt16)",
|
76 |
+
"isUpdatable" : "0",
|
77 |
+
"stateSchema" : [
|
78 |
+
{
|
79 |
+
"dataType" : "Float16",
|
80 |
+
"isOptional" : "0",
|
81 |
+
"formattedType" : "State (Float16 1 × 1536)",
|
82 |
+
"shortDescription" : "",
|
83 |
+
"shape" : "[1, 1536]",
|
84 |
+
"name" : "encoder_attn_key_padding_mask",
|
85 |
+
"type" : "State"
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"dataType" : "Float16",
|
89 |
+
"isOptional" : "0",
|
90 |
+
"formattedType" : "State (Float16 12 × 768 × 1 × 1536)",
|
91 |
+
"shortDescription" : "",
|
92 |
+
"shape" : "[12, 768, 1, 1536]",
|
93 |
+
"name" : "encoder_attn_key_cache",
|
94 |
+
"type" : "State"
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"dataType" : "Float16",
|
98 |
+
"isOptional" : "0",
|
99 |
+
"formattedType" : "State (Float16 12 × 768 × 1 × 1536)",
|
100 |
+
"shortDescription" : "",
|
101 |
+
"shape" : "[12, 768, 1, 1536]",
|
102 |
+
"name" : "encoder_attn_value_cache",
|
103 |
+
"type" : "State"
|
104 |
+
},
|
105 |
+
{
|
106 |
+
"dataType" : "Float16",
|
107 |
+
"isOptional" : "0",
|
108 |
+
"formattedType" : "State (Float16 12 × 768 × 1 × 448)",
|
109 |
+
"shortDescription" : "",
|
110 |
+
"shape" : "[12, 768, 1, 448]",
|
111 |
+
"name" : "self_attn_key_cache",
|
112 |
+
"type" : "State"
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"dataType" : "Float16",
|
116 |
+
"isOptional" : "0",
|
117 |
+
"formattedType" : "State (Float16 12 × 768 × 1 × 448)",
|
118 |
+
"shortDescription" : "",
|
119 |
+
"shape" : "[12, 768, 1, 448]",
|
120 |
+
"name" : "self_attn_value_cache",
|
121 |
+
"type" : "State"
|
122 |
+
}
|
123 |
+
],
|
124 |
+
"availability" : {
|
125 |
+
"macOS" : "15.0",
|
126 |
+
"tvOS" : "18.0",
|
127 |
+
"visionOS" : "2.0",
|
128 |
+
"watchOS" : "11.0",
|
129 |
+
"iOS" : "18.0",
|
130 |
+
"macCatalyst" : "18.0"
|
131 |
+
},
|
132 |
+
"modelType" : {
|
133 |
+
"name" : "MLModelType_mlProgram"
|
134 |
+
},
|
135 |
+
"userDefinedMetadata" : {
|
136 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
137 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1",
|
138 |
+
"com.github.apple.coremltools.version" : "8.0"
|
139 |
+
},
|
140 |
+
"inputSchema" : [
|
141 |
+
{
|
142 |
+
"hasShapeFlexibility" : "0",
|
143 |
+
"isOptional" : "0",
|
144 |
+
"dataType" : "Int32",
|
145 |
+
"formattedType" : "MultiArray (Int32 1)",
|
146 |
+
"shortDescription" : "",
|
147 |
+
"shape" : "[1]",
|
148 |
+
"name" : "input_ids",
|
149 |
+
"type" : "MultiArray"
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"hasShapeFlexibility" : "0",
|
153 |
+
"isOptional" : "0",
|
154 |
+
"dataType" : "Int32",
|
155 |
+
"formattedType" : "MultiArray (Int32 1)",
|
156 |
+
"shortDescription" : "",
|
157 |
+
"shape" : "[1]",
|
158 |
+
"name" : "cache_length",
|
159 |
+
"type" : "MultiArray"
|
160 |
+
},
|
161 |
+
{
|
162 |
+
"hasShapeFlexibility" : "0",
|
163 |
+
"isOptional" : "0",
|
164 |
+
"dataType" : "Float16",
|
165 |
+
"formattedType" : "MultiArray (Float16 1 × 448)",
|
166 |
+
"shortDescription" : "",
|
167 |
+
"shape" : "[1, 448]",
|
168 |
+
"name" : "kv_cache_update_mask",
|
169 |
+
"type" : "MultiArray"
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"hasShapeFlexibility" : "0",
|
173 |
+
"isOptional" : "0",
|
174 |
+
"dataType" : "Float16",
|
175 |
+
"formattedType" : "MultiArray (Float16 1 × 448)",
|
176 |
+
"shortDescription" : "",
|
177 |
+
"shape" : "[1, 448]",
|
178 |
+
"name" : "decoder_key_padding_mask",
|
179 |
+
"type" : "MultiArray"
|
180 |
+
}
|
181 |
+
],
|
182 |
+
"generatedClassName" : "TextDecoderStateful_mixedBitPalettized_4_bit",
|
183 |
+
"method" : "predict"
|
184 |
+
}
|
185 |
+
]
|
openai_whisper-small_216MB/TextDecoder.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
openai_whisper-small_216MB/TextDecoder.mlmodelc/model.mlmodel
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66a95d0b170da859b01abe1b7dbe8bf75a4344775b1b7f95b3ebf702fd454bba
|
3 |
+
size 458896
|