csukuangfj commited on
Commit
a93c67c
·
1 Parent(s): 8cac9ed

Add onnxruntime.xcframework 1.19.0

Browse files
1.19.0/onnxruntime.xcframework/Headers/coreml_provider_factory.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+ #pragma once
4
+
5
+ #include "onnxruntime_c_api.h"
6
+
7
+ // COREMLFlags are bool options we want to set for CoreML EP
8
+ // This enum is defined as bit flags, and cannot have negative value
9
+ // To generate an uint32_t coreml_flags for using with OrtSessionOptionsAppendExecutionProvider_CoreML below,
10
+ // uint32_t coreml_flags = 0;
11
+ // coreml_flags |= COREML_FLAG_USE_CPU_ONLY;
12
+ enum COREMLFlags {
13
+ COREML_FLAG_USE_NONE = 0x000,
14
+
15
+ // Using CPU only in CoreML EP, this may decrease the perf but will provide
16
+ // reference output value without precision loss, which is useful for validation
17
+ COREML_FLAG_USE_CPU_ONLY = 0x001,
18
+
19
+ // Enable CoreML EP on subgraph
20
+ COREML_FLAG_ENABLE_ON_SUBGRAPH = 0x002,
21
+
22
+ // By default CoreML Execution provider will be enabled for all compatible Apple devices
23
+ // Enable this option will only enable CoreML EP for Apple devices with ANE (Apple Neural Engine)
24
+ // Please note, enable this option does not guarantee the entire model to be executed using ANE only
25
+ COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE = 0x004,
26
+
27
+ // Only allow CoreML EP to take nodes with inputs with static shapes. By default it will also allow inputs with
28
+ // dynamic shapes. However, the performance may be negatively impacted if inputs have dynamic shapes.
29
+ COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES = 0x008,
30
+
31
+ // Create an MLProgram. By default it will create a NeuralNetwork model. Requires Core ML 5 or later.
32
+ COREML_FLAG_CREATE_MLPROGRAM = 0x010,
33
+
34
+ // Keep COREML_FLAG_LAST at the end of the enum definition
35
+ // And assign the last COREMLFlag to it
36
+ COREML_FLAG_LAST = COREML_FLAG_CREATE_MLPROGRAM,
37
+ };
38
+
39
+ #ifdef __cplusplus
40
+ extern "C" {
41
+ #endif
42
+
43
+ ORT_EXPORT ORT_API_STATUS(OrtSessionOptionsAppendExecutionProvider_CoreML,
44
+ _In_ OrtSessionOptions* options, uint32_t coreml_flags);
45
+
46
+ #ifdef __cplusplus
47
+ }
48
+ #endif
1.19.0/onnxruntime.xcframework/Headers/cpu_provider_factory.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ #include "onnxruntime_c_api.h"
5
+
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+ /**
11
+ * \param use_arena zero: false. non-zero: true.
12
+ */
13
+ ORT_EXPORT
14
+ ORT_API_STATUS(OrtSessionOptionsAppendExecutionProvider_CPU, _In_ OrtSessionOptions* options, int use_arena)
15
+ ORT_ALL_ARGS_NONNULL;
16
+
17
+ #ifdef __cplusplus
18
+ }
19
+ #endif
1.19.0/onnxruntime.xcframework/Headers/onnxruntime_c_api.h ADDED
The diff for this file is too large to render. See raw diff
 
1.19.0/onnxruntime.xcframework/Headers/onnxruntime_cxx_api.h ADDED
The diff for this file is too large to render. See raw diff
 
1.19.0/onnxruntime.xcframework/Headers/onnxruntime_cxx_inline.h ADDED
@@ -0,0 +1,2128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ // Do not include this file directly. Please include "onnxruntime_cxx_api.h" instead.
5
+ // If interested in trying out features of the new experimental C++ API, include "experimental_onnxruntime_cxx_api.h" instead.
6
+ //
7
+ // These are the inline implementations of the C++ header APIs. They're in this separate file as to not clutter
8
+ // the main C++ file with implementation details.
9
+
10
+ #include <algorithm>
11
+ #include <functional>
12
+ #include <iterator>
13
+ #include <type_traits>
14
+
15
+ // Convert OrtStatus to Ort::Status and return
16
+ // instead of throwing
17
+ #define ORT_CXX_RETURN_ON_API_FAIL(expression) \
18
+ { \
19
+ auto ort_status = (expression); \
20
+ if (ort_status) { \
21
+ return Ort::Status(ort_status); \
22
+ } \
23
+ }
24
+
25
+ #ifdef __cpp_if_constexpr
26
+ #define ORT_CXX_IF_CONSTEXPR if constexpr
27
+ #else
28
+ #define ORT_CXX_IF_CONSTEXPR if
29
+ #endif
30
+
31
+ namespace Ort {
32
+
33
+ namespace detail {
34
+ inline void ThrowStatus(const Status& st) {
35
+ std::string error_message = st.GetErrorMessage();
36
+ OrtErrorCode error_code = st.GetErrorCode();
37
+ ORT_CXX_API_THROW(std::move(error_message), error_code);
38
+ }
39
+ } // namespace detail
40
+
41
+ inline void ThrowOnError(OrtStatus* ort_status) {
42
+ if (ort_status) {
43
+ Ort::Status st(ort_status);
44
+ detail::ThrowStatus(st);
45
+ }
46
+ }
47
+
48
+ inline void ThrowOnError(const Status& st) {
49
+ if (st) {
50
+ detail::ThrowStatus(st);
51
+ }
52
+ }
53
+
54
+ inline Status::Status(OrtStatus* status) noexcept : Base<OrtStatus>{status} {
55
+ }
56
+
57
+ inline Status::Status(const std::exception& e) noexcept {
58
+ p_ = GetApi().CreateStatus(ORT_FAIL, e.what());
59
+ }
60
+
61
+ inline Status::Status(const Exception& e) noexcept {
62
+ p_ = GetApi().CreateStatus(e.GetOrtErrorCode(), e.what());
63
+ }
64
+
65
+ inline Status::Status(const char* message, OrtErrorCode code) noexcept {
66
+ p_ = GetApi().CreateStatus(code, message);
67
+ }
68
+
69
+ inline std::string Status::GetErrorMessage() const {
70
+ std::string message(GetApi().GetErrorMessage(p_));
71
+ return message;
72
+ }
73
+
74
+ inline OrtErrorCode Status::GetErrorCode() const {
75
+ return GetApi().GetErrorCode(p_);
76
+ }
77
+
78
+ inline bool Status::IsOK() const noexcept {
79
+ return (p_ == nullptr);
80
+ }
81
+
82
+ // This template converts a C++ type into it's ONNXTensorElementDataType
83
+ template <typename T>
84
+ struct TypeToTensorType;
85
+ template <>
86
+ struct TypeToTensorType<float> {
87
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT;
88
+ };
89
+ template <>
90
+ struct TypeToTensorType<Float16_t> {
91
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16;
92
+ };
93
+ template <>
94
+ struct TypeToTensorType<BFloat16_t> {
95
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16;
96
+ };
97
+ template <>
98
+ struct TypeToTensorType<double> {
99
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE;
100
+ };
101
+ template <>
102
+ struct TypeToTensorType<int8_t> {
103
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8;
104
+ };
105
+ template <>
106
+ struct TypeToTensorType<int16_t> {
107
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16;
108
+ };
109
+ template <>
110
+ struct TypeToTensorType<int32_t> {
111
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32;
112
+ };
113
+ template <>
114
+ struct TypeToTensorType<int64_t> {
115
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64;
116
+ };
117
+ template <>
118
+ struct TypeToTensorType<uint8_t> {
119
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8;
120
+ };
121
+ template <>
122
+ struct TypeToTensorType<uint16_t> {
123
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16;
124
+ };
125
+ template <>
126
+ struct TypeToTensorType<uint32_t> {
127
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32;
128
+ };
129
+ template <>
130
+ struct TypeToTensorType<uint64_t> {
131
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64;
132
+ };
133
+ template <>
134
+ struct TypeToTensorType<bool> {
135
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL;
136
+ };
137
+
138
+ template <>
139
+ struct TypeToTensorType<Float8E4M3FN_t> {
140
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FN;
141
+ };
142
+ template <>
143
+ struct TypeToTensorType<Float8E4M3FNUZ_t> {
144
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FNUZ;
145
+ };
146
+ template <>
147
+ struct TypeToTensorType<Float8E5M2_t> {
148
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2;
149
+ };
150
+ template <>
151
+ struct TypeToTensorType<Float8E5M2FNUZ_t> {
152
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2FNUZ;
153
+ };
154
+
155
+ inline bool BFloat16_t::operator==(const BFloat16_t& rhs) const noexcept {
156
+ if (IsNaN() || rhs.IsNaN()) {
157
+ // IEEE defines that NaN is not equal to anything, including itself.
158
+ return false;
159
+ }
160
+ return val == rhs.val;
161
+ }
162
+
163
+ inline bool BFloat16_t::operator<(const BFloat16_t& rhs) const noexcept {
164
+ if (IsNaN() || rhs.IsNaN()) {
165
+ // IEEE defines that NaN is unordered with respect to everything, including itself.
166
+ return false;
167
+ }
168
+
169
+ const bool left_is_negative = IsNegative();
170
+ if (left_is_negative != rhs.IsNegative()) {
171
+ // When the signs of left and right differ, we know that left is less than right if it is
172
+ // the negative value. The exception to this is if both values are zero, in which case IEEE
173
+ // says they should be equal, even if the signs differ.
174
+ return left_is_negative && !AreZero(*this, rhs);
175
+ }
176
+ return (val != rhs.val) && ((val < rhs.val) ^ left_is_negative);
177
+ }
178
+
179
+ inline MemoryAllocation::MemoryAllocation(OrtAllocator* allocator, void* p, size_t size)
180
+ : allocator_(allocator), p_(p), size_(size) {
181
+ }
182
+
183
+ inline MemoryAllocation::~MemoryAllocation() {
184
+ if (p_ != nullptr) {
185
+ // We do not throw out of destructor
186
+ auto ret = GetApi().AllocatorFree(allocator_, p_);
187
+ static_cast<void>(ret);
188
+ }
189
+ }
190
+
191
+ inline MemoryAllocation::MemoryAllocation(MemoryAllocation&& o) noexcept : allocator_(nullptr), p_(nullptr), size_(0) {
192
+ *this = std::move(o);
193
+ }
194
+
195
+ inline MemoryAllocation& MemoryAllocation::operator=(MemoryAllocation&& o) noexcept {
196
+ OrtAllocator* alloc = nullptr;
197
+ void* p = nullptr;
198
+ size_t sz = 0;
199
+
200
+ // Swap out this
201
+ std::swap(alloc, allocator_);
202
+ std::swap(p, p_);
203
+ std::swap(sz, size_);
204
+
205
+ // Swap with incoming
206
+ std::swap(allocator_, o.allocator_);
207
+ std::swap(p_, o.p_);
208
+ std::swap(size_, o.size_);
209
+
210
+ // Destroy this instance if needed
211
+ MemoryAllocation this_alloc(alloc, p, sz);
212
+ return *this;
213
+ }
214
+
215
+ namespace detail {
216
+
217
+ template <typename T>
218
+ inline void* AllocatorImpl<T>::Alloc(size_t size) {
219
+ void* out;
220
+ ThrowOnError(GetApi().AllocatorAlloc(this->p_, size, &out));
221
+ return out;
222
+ }
223
+
224
+ template <typename T>
225
+ inline MemoryAllocation AllocatorImpl<T>::GetAllocation(size_t size) {
226
+ void* out;
227
+ ThrowOnError(GetApi().AllocatorAlloc(this->p_, size, &out));
228
+ MemoryAllocation result(this->p_, out, size);
229
+ return result;
230
+ }
231
+
232
+ template <typename T>
233
+ inline void AllocatorImpl<T>::Free(void* p) {
234
+ ThrowOnError(GetApi().AllocatorFree(this->p_, p));
235
+ }
236
+
237
+ template <typename T>
238
+ inline ConstMemoryInfo AllocatorImpl<T>::GetInfo() const {
239
+ const OrtMemoryInfo* out;
240
+ ThrowOnError(GetApi().AllocatorGetInfo(this->p_, &out));
241
+ return ConstMemoryInfo{out};
242
+ }
243
+
244
+ } // namespace detail
245
+
246
+ inline AllocatorWithDefaultOptions::AllocatorWithDefaultOptions() {
247
+ ThrowOnError(GetApi().GetAllocatorWithDefaultOptions(&this->p_));
248
+ }
249
+
250
+ inline Allocator::Allocator(const Session& sess, const OrtMemoryInfo* mem_info) {
251
+ ThrowOnError(GetApi().CreateAllocator(sess, mem_info, &this->p_));
252
+ }
253
+
254
+ namespace detail {
255
+
256
+ template <typename T>
257
+ inline std::string MemoryInfoImpl<T>::GetAllocatorName() const {
258
+ const char* name = nullptr;
259
+ ThrowOnError(GetApi().MemoryInfoGetName(this->p_, &name));
260
+ return std::string(name);
261
+ }
262
+
263
+ template <typename T>
264
+ inline OrtAllocatorType MemoryInfoImpl<T>::GetAllocatorType() const {
265
+ OrtAllocatorType type;
266
+ ThrowOnError(GetApi().MemoryInfoGetType(this->p_, &type));
267
+ return type;
268
+ }
269
+
270
+ template <typename T>
271
+ inline int MemoryInfoImpl<T>::GetDeviceId() const {
272
+ int id = 0;
273
+ ThrowOnError(GetApi().MemoryInfoGetId(this->p_, &id));
274
+ return id;
275
+ }
276
+
277
+ template <typename T>
278
+ inline OrtMemoryInfoDeviceType MemoryInfoImpl<T>::GetDeviceType() const {
279
+ OrtMemoryInfoDeviceType type;
280
+ GetApi().MemoryInfoGetDeviceType(this->p_, &type);
281
+ return type;
282
+ }
283
+
284
+ template <typename T>
285
+ inline OrtMemType MemoryInfoImpl<T>::GetMemoryType() const {
286
+ OrtMemType type;
287
+ ThrowOnError(GetApi().MemoryInfoGetMemType(this->p_, &type));
288
+ return type;
289
+ }
290
+
291
+ template <typename T>
292
+ template <typename U>
293
+ inline bool MemoryInfoImpl<T>::operator==(const MemoryInfoImpl<U>& o) const {
294
+ int comp_result = 0;
295
+ ThrowOnError(Ort::GetApi().CompareMemoryInfo(this->p_, o, &comp_result));
296
+ return comp_result == 0;
297
+ }
298
+
299
+ } // namespace detail
300
+
301
+ inline MemoryInfo MemoryInfo::CreateCpu(OrtAllocatorType type, OrtMemType mem_type) {
302
+ OrtMemoryInfo* p;
303
+ ThrowOnError(GetApi().CreateCpuMemoryInfo(type, mem_type, &p));
304
+ return MemoryInfo(p);
305
+ }
306
+
307
+ inline MemoryInfo::MemoryInfo(const char* name, OrtAllocatorType type, int id, OrtMemType mem_type) {
308
+ ThrowOnError(GetApi().CreateMemoryInfo(name, type, id, mem_type, &this->p_));
309
+ }
310
+
311
+ namespace detail {
312
+ template <typename T>
313
+ inline std::vector<std::string> ConstIoBindingImpl<T>::GetOutputNames() const {
314
+ AllocatorWithDefaultOptions allocator;
315
+ return binding_utils::GetOutputNamesHelper(this->p_, allocator);
316
+ }
317
+
318
+ template <typename T>
319
+ inline std::vector<std::string> ConstIoBindingImpl<T>::GetOutputNames(OrtAllocator* allocator) const {
320
+ return binding_utils::GetOutputNamesHelper(this->p_, allocator);
321
+ }
322
+
323
+ template <typename T>
324
+ inline std::vector<Value> ConstIoBindingImpl<T>::GetOutputValues() const {
325
+ AllocatorWithDefaultOptions allocator;
326
+ return binding_utils::GetOutputValuesHelper(this->p_, allocator);
327
+ }
328
+
329
+ template <typename T>
330
+ inline std::vector<Value> ConstIoBindingImpl<T>::GetOutputValues(OrtAllocator* allocator) const {
331
+ return binding_utils::GetOutputValuesHelper(this->p_, allocator);
332
+ }
333
+
334
+ template <typename T>
335
+ inline void IoBindingImpl<T>::BindInput(const char* name, const Value& value) {
336
+ ThrowOnError(GetApi().BindInput(this->p_, name, value));
337
+ }
338
+
339
+ template <typename T>
340
+ inline void IoBindingImpl<T>::BindOutput(const char* name, const Value& value) {
341
+ ThrowOnError(GetApi().BindOutput(this->p_, name, value));
342
+ }
343
+
344
+ template <typename T>
345
+ inline void IoBindingImpl<T>::BindOutput(const char* name, const OrtMemoryInfo* mem_info) {
346
+ ThrowOnError(GetApi().BindOutputToDevice(this->p_, name, mem_info));
347
+ }
348
+
349
+ template <typename T>
350
+ inline void IoBindingImpl<T>::ClearBoundInputs() {
351
+ GetApi().ClearBoundInputs(this->p_);
352
+ }
353
+
354
+ template <typename T>
355
+ inline void IoBindingImpl<T>::ClearBoundOutputs() {
356
+ GetApi().ClearBoundOutputs(this->p_);
357
+ }
358
+
359
+ template <typename T>
360
+ inline void IoBindingImpl<T>::SynchronizeInputs() {
361
+ ThrowOnError(GetApi().SynchronizeBoundInputs(this->p_));
362
+ }
363
+
364
+ template <typename T>
365
+ inline void IoBindingImpl<T>::SynchronizeOutputs() {
366
+ ThrowOnError(GetApi().SynchronizeBoundOutputs(this->p_));
367
+ }
368
+
369
+ namespace binding_utils {
370
+ inline std::vector<std::string> GetOutputNamesHelper(const OrtIoBinding* binding, OrtAllocator* allocator) {
371
+ std::vector<std::string> result;
372
+ auto free_fn = detail::AllocatedFree(allocator);
373
+ using Ptr = std::unique_ptr<void, decltype(free_fn)>;
374
+
375
+ char* buffer = nullptr;
376
+ size_t* lengths = nullptr;
377
+ size_t count = 0;
378
+ ThrowOnError(GetApi().GetBoundOutputNames(binding, allocator, &buffer, &lengths, &count));
379
+
380
+ if (count == 0) {
381
+ return result;
382
+ }
383
+
384
+ Ptr buffer_g(buffer, free_fn);
385
+ Ptr lengths_g(lengths, free_fn);
386
+
387
+ result.reserve(count);
388
+ for (size_t i = 0; i < count; ++i) {
389
+ auto sz = *lengths;
390
+ result.emplace_back(buffer, sz);
391
+ buffer += sz;
392
+ ++lengths;
393
+ }
394
+ return result;
395
+ }
396
+
397
+ inline std::vector<Value> GetOutputValuesHelper(const OrtIoBinding* binding, OrtAllocator* allocator) {
398
+ std::vector<Value> result;
399
+ size_t owned = 0;
400
+ size_t output_count = 0;
401
+ // Lambda to release the buffer when no longer needed and
402
+ // make sure that we destroy all instances on exception
403
+ auto free_fn = [&owned, &output_count, allocator](OrtValue** buffer) {
404
+ if (buffer) {
405
+ while (owned < output_count) {
406
+ auto* p = buffer + owned++;
407
+ GetApi().ReleaseValue(*p);
408
+ }
409
+ allocator->Free(allocator, buffer);
410
+ }
411
+ };
412
+ using Ptr = std::unique_ptr<OrtValue*, decltype(free_fn)>;
413
+
414
+ OrtValue** output_buffer = nullptr;
415
+ ThrowOnError(GetApi().GetBoundOutputValues(binding, allocator, &output_buffer, &output_count));
416
+ if (output_count == 0) {
417
+ return result;
418
+ }
419
+
420
+ Ptr buffer_g(output_buffer, free_fn);
421
+
422
+ result.reserve(output_count);
423
+ for (size_t i = 0; i < output_count; ++i) {
424
+ result.emplace_back(output_buffer[i]);
425
+ ++owned;
426
+ }
427
+ return result;
428
+ }
429
+
430
+ } // namespace binding_utils
431
+ } // namespace detail
432
+
433
+ inline IoBinding::IoBinding(Session& session) {
434
+ ThrowOnError(GetApi().CreateIoBinding(session, &this->p_));
435
+ }
436
+
437
+ inline ArenaCfg::ArenaCfg(size_t max_mem, int arena_extend_strategy, int initial_chunk_size_bytes, int max_dead_bytes_per_chunk) {
438
+ ThrowOnError(GetApi().CreateArenaCfg(max_mem, arena_extend_strategy, initial_chunk_size_bytes, max_dead_bytes_per_chunk, &p_));
439
+ }
440
+
441
+ inline ThreadingOptions::ThreadingOptions() {
442
+ ThrowOnError(GetApi().CreateThreadingOptions(&p_));
443
+ }
444
+
445
+ inline ThreadingOptions& ThreadingOptions::SetGlobalIntraOpNumThreads(int intra_op_num_threads) {
446
+ ThrowOnError(GetApi().SetGlobalIntraOpNumThreads(p_, intra_op_num_threads));
447
+ return *this;
448
+ }
449
+
450
+ inline ThreadingOptions& ThreadingOptions::SetGlobalInterOpNumThreads(int inter_op_num_threads) {
451
+ ThrowOnError(GetApi().SetGlobalInterOpNumThreads(p_, inter_op_num_threads));
452
+ return *this;
453
+ }
454
+
455
+ inline ThreadingOptions& ThreadingOptions::SetGlobalSpinControl(int allow_spinning) {
456
+ ThrowOnError(GetApi().SetGlobalSpinControl(p_, allow_spinning));
457
+ return *this;
458
+ }
459
+
460
+ inline ThreadingOptions& ThreadingOptions::SetGlobalDenormalAsZero() {
461
+ ThrowOnError(GetApi().SetGlobalDenormalAsZero(p_));
462
+ return *this;
463
+ }
464
+
465
+ inline ThreadingOptions& ThreadingOptions::SetGlobalCustomCreateThreadFn(OrtCustomCreateThreadFn ort_custom_create_thread_fn) {
466
+ ThrowOnError(GetApi().SetGlobalCustomCreateThreadFn(p_, ort_custom_create_thread_fn));
467
+ return *this;
468
+ }
469
+
470
+ inline ThreadingOptions& ThreadingOptions::SetGlobalCustomThreadCreationOptions(void* ort_custom_thread_creation_options) {
471
+ ThrowOnError(GetApi().SetGlobalCustomThreadCreationOptions(p_, ort_custom_thread_creation_options));
472
+ return *this;
473
+ }
474
+
475
+ inline ThreadingOptions& ThreadingOptions::SetGlobalCustomJoinThreadFn(OrtCustomJoinThreadFn ort_custom_join_thread_fn) {
476
+ ThrowOnError(GetApi().SetGlobalCustomJoinThreadFn(p_, ort_custom_join_thread_fn));
477
+ return *this;
478
+ }
479
+
480
+ inline Env::Env(OrtLoggingLevel logging_level, _In_ const char* logid) {
481
+ ThrowOnError(GetApi().CreateEnv(logging_level, logid, &p_));
482
+ if (strcmp(logid, "onnxruntime-node") == 0) {
483
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_NODEJS));
484
+ } else {
485
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_CPLUSPLUS));
486
+ }
487
+ }
488
+
489
+ inline Env::Env(OrtLoggingLevel logging_level, const char* logid, OrtLoggingFunction logging_function, void* logger_param) {
490
+ ThrowOnError(GetApi().CreateEnvWithCustomLogger(logging_function, logger_param, logging_level, logid, &p_));
491
+ if (strcmp(logid, "onnxruntime-node") == 0) {
492
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_NODEJS));
493
+ } else {
494
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_CPLUSPLUS));
495
+ }
496
+ }
497
+
498
+ inline Env::Env(const OrtThreadingOptions* tp_options, OrtLoggingLevel logging_level, _In_ const char* logid) {
499
+ ThrowOnError(GetApi().CreateEnvWithGlobalThreadPools(logging_level, logid, tp_options, &p_));
500
+ if (strcmp(logid, "onnxruntime-node") == 0) {
501
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_NODEJS));
502
+ } else {
503
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_CPLUSPLUS));
504
+ }
505
+ }
506
+
507
+ inline Env::Env(const OrtThreadingOptions* tp_options, OrtLoggingFunction logging_function, void* logger_param,
508
+ OrtLoggingLevel logging_level, _In_ const char* logid) {
509
+ ThrowOnError(GetApi().CreateEnvWithCustomLoggerAndGlobalThreadPools(logging_function, logger_param, logging_level, logid, tp_options, &p_));
510
+ if (strcmp(logid, "onnxruntime-node") == 0) {
511
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_NODEJS));
512
+ } else {
513
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_CPLUSPLUS));
514
+ }
515
+ }
516
+
517
+ inline Env& Env::EnableTelemetryEvents() {
518
+ ThrowOnError(GetApi().EnableTelemetryEvents(p_));
519
+ return *this;
520
+ }
521
+
522
+ inline Env& Env::DisableTelemetryEvents() {
523
+ ThrowOnError(GetApi().DisableTelemetryEvents(p_));
524
+ return *this;
525
+ }
526
+
527
+ inline Env& Env::UpdateEnvWithCustomLogLevel(OrtLoggingLevel log_severity_level) {
528
+ ThrowOnError(GetApi().UpdateEnvWithCustomLogLevel(p_, log_severity_level));
529
+ return *this;
530
+ }
531
+
532
+ inline Env& Env::CreateAndRegisterAllocator(const OrtMemoryInfo* mem_info, const OrtArenaCfg* arena_cfg) {
533
+ ThrowOnError(GetApi().CreateAndRegisterAllocator(p_, mem_info, arena_cfg));
534
+ return *this;
535
+ }
536
+
537
+ inline Env& Env::CreateAndRegisterAllocatorV2(const std::string& provider_type, const OrtMemoryInfo* mem_info, const std::unordered_map<std::string, std::string>& options, const OrtArenaCfg* arena_cfg) {
538
+ std::vector<const char*> keys, values;
539
+ auto num_entries = options.size();
540
+ if (num_entries > 0) {
541
+ keys.reserve(num_entries);
542
+ values.reserve(num_entries);
543
+ for (const auto& entry : options) {
544
+ keys.push_back(entry.first.c_str());
545
+ values.push_back(entry.second.c_str());
546
+ }
547
+ }
548
+ ThrowOnError(GetApi().CreateAndRegisterAllocatorV2(p_, provider_type.c_str(), mem_info, arena_cfg, keys.data(), values.data(), num_entries));
549
+ return *this;
550
+ }
551
+
552
+ inline CustomOpDomain::CustomOpDomain(const char* domain) {
553
+ ThrowOnError(GetApi().CreateCustomOpDomain(domain, &p_));
554
+ }
555
+
556
+ inline void CustomOpDomain::Add(const OrtCustomOp* op) {
557
+ ThrowOnError(GetApi().CustomOpDomain_Add(p_, op));
558
+ }
559
+
560
+ inline RunOptions::RunOptions() {
561
+ ThrowOnError(GetApi().CreateRunOptions(&p_));
562
+ }
563
+
564
+ inline RunOptions& RunOptions::SetRunLogVerbosityLevel(int level) {
565
+ ThrowOnError(GetApi().RunOptionsSetRunLogVerbosityLevel(p_, level));
566
+ return *this;
567
+ }
568
+
569
+ inline RunOptions& RunOptions::SetRunLogSeverityLevel(int level) {
570
+ ThrowOnError(GetApi().RunOptionsSetRunLogSeverityLevel(p_, level));
571
+ return *this;
572
+ }
573
+
574
+ inline int RunOptions::GetRunLogVerbosityLevel() const {
575
+ int out;
576
+ ThrowOnError(GetApi().RunOptionsGetRunLogVerbosityLevel(p_, &out));
577
+ return out;
578
+ }
579
+
580
+ inline int RunOptions::GetRunLogSeverityLevel() const {
581
+ int out;
582
+ ThrowOnError(GetApi().RunOptionsGetRunLogSeverityLevel(p_, &out));
583
+ return out;
584
+ }
585
+
586
+ inline RunOptions& RunOptions::SetRunTag(const char* run_tag) {
587
+ ThrowOnError(GetApi().RunOptionsSetRunTag(p_, run_tag));
588
+ return *this;
589
+ }
590
+
591
+ inline const char* RunOptions::GetRunTag() const {
592
+ const char* out;
593
+ ThrowOnError(GetApi().RunOptionsGetRunTag(p_, &out));
594
+ return out;
595
+ }
596
+
597
+ inline RunOptions& RunOptions::AddConfigEntry(const char* config_key, const char* config_value) {
598
+ ThrowOnError(GetApi().AddRunConfigEntry(p_, config_key, config_value));
599
+ return *this;
600
+ }
601
+
602
+ inline RunOptions& RunOptions::SetTerminate() {
603
+ ThrowOnError(GetApi().RunOptionsSetTerminate(p_));
604
+ return *this;
605
+ }
606
+
607
+ inline RunOptions& RunOptions::UnsetTerminate() {
608
+ ThrowOnError(GetApi().RunOptionsUnsetTerminate(p_));
609
+ return *this;
610
+ }
611
+
612
+ namespace detail {
613
+
614
+ template <typename T>
615
+ inline Ort::SessionOptions ConstSessionOptionsImpl<T>::Clone() const {
616
+ OrtSessionOptions* out;
617
+ ThrowOnError(GetApi().CloneSessionOptions(this->p_, &out));
618
+ return SessionOptions{out};
619
+ }
620
+
621
+ template <typename T>
622
+ inline std::string ConstSessionOptionsImpl<T>::GetConfigEntry(const char* config_key) const {
623
+ size_t size = 0;
624
+ // Feed nullptr for the data buffer to query the true size of the string value
625
+ Ort::ThrowOnError(GetApi().GetSessionConfigEntry(this->p_, config_key, nullptr, &size));
626
+
627
+ std::string out;
628
+ out.resize(size);
629
+ Ort::ThrowOnError(GetApi().GetSessionConfigEntry(this->p_, config_key, &out[0], &size));
630
+ out.resize(size - 1); // remove the terminating character '\0'
631
+
632
+ return out;
633
+ }
634
+
635
+ template <typename T>
636
+ inline bool ConstSessionOptionsImpl<T>::HasConfigEntry(const char* config_key) const {
637
+ int out = 0;
638
+ Ort::ThrowOnError(GetApi().HasSessionConfigEntry(this->p_, config_key, &out));
639
+ return static_cast<bool>(out);
640
+ }
641
+
642
+ template <typename T>
643
+ inline std::string ConstSessionOptionsImpl<T>::GetConfigEntryOrDefault(const char* config_key, const std::string& def) {
644
+ if (!this->HasConfigEntry(config_key)) {
645
+ return def;
646
+ }
647
+
648
+ return this->GetConfigEntry(config_key);
649
+ }
650
+
651
+ template <typename T>
652
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetIntraOpNumThreads(int intra_op_num_threads) {
653
+ ThrowOnError(GetApi().SetIntraOpNumThreads(this->p_, intra_op_num_threads));
654
+ return *this;
655
+ }
656
+
657
+ template <typename T>
658
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetInterOpNumThreads(int inter_op_num_threads) {
659
+ ThrowOnError(GetApi().SetInterOpNumThreads(this->p_, inter_op_num_threads));
660
+ return *this;
661
+ }
662
+
663
+ template <typename T>
664
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetGraphOptimizationLevel(GraphOptimizationLevel graph_optimization_level) {
665
+ ThrowOnError(GetApi().SetSessionGraphOptimizationLevel(this->p_, graph_optimization_level));
666
+ return *this;
667
+ }
668
+
669
+ template <typename T>
670
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetDeterministicCompute(bool value) {
671
+ ThrowOnError(GetApi().SetDeterministicCompute(this->p_, value));
672
+ return *this;
673
+ }
674
+
675
+ template <typename T>
676
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetOptimizedModelFilePath(const ORTCHAR_T* optimized_model_filepath) {
677
+ ThrowOnError(GetApi().SetOptimizedModelFilePath(this->p_, optimized_model_filepath));
678
+ return *this;
679
+ }
680
+
681
+ template <typename T>
682
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::EnableProfiling(const ORTCHAR_T* profile_file_prefix) {
683
+ ThrowOnError(GetApi().EnableProfiling(this->p_, profile_file_prefix));
684
+ return *this;
685
+ }
686
+
687
+ template <typename T>
688
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::DisableProfiling() {
689
+ ThrowOnError(GetApi().DisableProfiling(this->p_));
690
+ return *this;
691
+ }
692
+
693
+ template <typename T>
694
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::EnableOrtCustomOps() {
695
+ ThrowOnError(GetApi().EnableOrtCustomOps(this->p_));
696
+ return *this;
697
+ }
698
+
699
+ template <typename T>
700
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::EnableMemPattern() {
701
+ ThrowOnError(GetApi().EnableMemPattern(this->p_));
702
+ return *this;
703
+ }
704
+
705
+ template <typename T>
706
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::DisableMemPattern() {
707
+ ThrowOnError(GetApi().DisableMemPattern(this->p_));
708
+ return *this;
709
+ }
710
+
711
+ template <typename T>
712
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::EnableCpuMemArena() {
713
+ ThrowOnError(GetApi().EnableCpuMemArena(this->p_));
714
+ return *this;
715
+ }
716
+
717
+ template <typename T>
718
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::DisableCpuMemArena() {
719
+ ThrowOnError(GetApi().DisableCpuMemArena(this->p_));
720
+ return *this;
721
+ }
722
+
723
+ template <typename T>
724
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetExecutionMode(ExecutionMode execution_mode) {
725
+ ThrowOnError(GetApi().SetSessionExecutionMode(this->p_, execution_mode));
726
+ return *this;
727
+ }
728
+
729
+ template <typename T>
730
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetLogId(const char* logid) {
731
+ ThrowOnError(GetApi().SetSessionLogId(this->p_, logid));
732
+ return *this;
733
+ }
734
+
735
+ template <typename T>
736
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetLogSeverityLevel(int level) {
737
+ ThrowOnError(GetApi().SetSessionLogSeverityLevel(this->p_, level));
738
+ return *this;
739
+ }
740
+
741
+ template <typename T>
742
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::Add(OrtCustomOpDomain* custom_op_domain) {
743
+ ThrowOnError(GetApi().AddCustomOpDomain(this->p_, custom_op_domain));
744
+ return *this;
745
+ }
746
+
747
+ template <typename T>
748
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AddConfigEntry(const char* config_key, const char* config_value) {
749
+ ThrowOnError(GetApi().AddSessionConfigEntry(this->p_, config_key, config_value));
750
+ return *this;
751
+ }
752
+
753
+ template <typename T>
754
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AddInitializer(const char* name, const OrtValue* ort_val) {
755
+ ThrowOnError(GetApi().AddInitializer(this->p_, name, ort_val));
756
+ return *this;
757
+ }
758
+
759
+ template <typename T>
760
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::DisablePerSessionThreads() {
761
+ ThrowOnError(GetApi().DisablePerSessionThreads(this->p_));
762
+ return *this;
763
+ }
764
+
765
+ template <typename T>
766
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AddExternalInitializers(const std::vector<std::string>& names,
767
+ const std::vector<Value>& ort_values) {
768
+ const size_t inputs_num = names.size();
769
+ if (inputs_num != ort_values.size()) {
770
+ ORT_CXX_API_THROW("Expecting names and ort_values to have the same length", ORT_INVALID_ARGUMENT);
771
+ }
772
+ std::vector<const char*> names_ptr;
773
+ std::vector<const OrtValue*> ort_values_ptrs;
774
+ names_ptr.reserve(inputs_num);
775
+ ort_values_ptrs.reserve(inputs_num);
776
+ for (size_t i = 0; i < inputs_num; ++i) {
777
+ names_ptr.push_back(names[i].c_str());
778
+ ort_values_ptrs.push_back(ort_values[i]);
779
+ }
780
+ ThrowOnError(GetApi().AddExternalInitializers(this->p_, names_ptr.data(), ort_values_ptrs.data(), inputs_num));
781
+ return *this;
782
+ }
783
+
784
+ template <typename T>
785
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AddExternalInitializersFromFilesInMemory(const std::vector<std::basic_string<ORTCHAR_T>>& file_names,
786
+ const std::vector<char*>& buffer_array,
787
+ const std::vector<size_t>& file_lengths) {
788
+ const size_t inputs_num = file_names.size();
789
+ if (inputs_num != buffer_array.size()) {
790
+ ORT_CXX_API_THROW("Expecting names and buffer_array to have the same length", ORT_INVALID_ARGUMENT);
791
+ }
792
+ if (inputs_num != file_lengths.size()) {
793
+ ORT_CXX_API_THROW("Expecting names and file_lengths to have the same length", ORT_INVALID_ARGUMENT);
794
+ }
795
+ std::vector<const ORTCHAR_T*> names_ptr;
796
+ names_ptr.reserve(inputs_num);
797
+ for (size_t i = 0; i < inputs_num; ++i) {
798
+ names_ptr.push_back(file_names[i].c_str());
799
+ }
800
+ ThrowOnError(GetApi().AddExternalInitializersFromFilesInMemory(this->p_, names_ptr.data(), buffer_array.data(),
801
+ file_lengths.data(), inputs_num));
802
+ return *this;
803
+ }
804
+
805
+ template <typename T>
806
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_CUDA(const OrtCUDAProviderOptions& provider_options) {
807
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_CUDA(this->p_, &provider_options));
808
+ return *this;
809
+ }
810
+
811
+ template <typename T>
812
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_CUDA_V2(const OrtCUDAProviderOptionsV2& provider_options) {
813
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_CUDA_V2(this->p_, &provider_options));
814
+ return *this;
815
+ }
816
+
817
+ template <typename T>
818
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_ROCM(const OrtROCMProviderOptions& provider_options) {
819
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_ROCM(this->p_, &provider_options));
820
+ return *this;
821
+ }
822
+
823
+ template <typename T>
824
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_TensorRT(const OrtTensorRTProviderOptions& provider_options) {
825
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_TensorRT(this->p_, &provider_options));
826
+ return *this;
827
+ }
828
+
829
+ template <typename T>
830
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_TensorRT_V2(const OrtTensorRTProviderOptionsV2& provider_options) {
831
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_TensorRT_V2(this->p_, &provider_options));
832
+ return *this;
833
+ }
834
+
835
+ template <typename T>
836
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_MIGraphX(const OrtMIGraphXProviderOptions& provider_options) {
837
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_MIGraphX(this->p_, &provider_options));
838
+ return *this;
839
+ }
840
+
841
+ template <typename T>
842
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_CANN(const OrtCANNProviderOptions& provider_options) {
843
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_CANN(this->p_, &provider_options));
844
+ return *this;
845
+ }
846
+
847
+ template <typename T>
848
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_Dnnl(const OrtDnnlProviderOptions& provider_options) {
849
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_Dnnl(this->p_, &provider_options));
850
+ return *this;
851
+ }
852
+
853
+ template <typename T>
854
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider(
855
+ const std::string& provider_name,
856
+ const std::unordered_map<std::string, std::string>& provider_options) {
857
+ auto num_entries = provider_options.size();
858
+ std::vector<const char*> keys, values;
859
+ if (num_entries > 0) {
860
+ keys.reserve(num_entries);
861
+ values.reserve(num_entries);
862
+
863
+ for (const auto& entry : provider_options) {
864
+ keys.push_back(entry.first.c_str());
865
+ values.push_back(entry.second.c_str());
866
+ }
867
+ }
868
+
869
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider(this->p_, provider_name.c_str(),
870
+ keys.data(), values.data(), num_entries));
871
+
872
+ return *this;
873
+ }
874
+
875
+ template <typename T>
876
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetCustomCreateThreadFn(OrtCustomCreateThreadFn ort_custom_create_thread_fn) {
877
+ ThrowOnError(GetApi().SessionOptionsSetCustomCreateThreadFn(this->p_, ort_custom_create_thread_fn));
878
+ return *this;
879
+ }
880
+
881
+ template <typename T>
882
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetCustomThreadCreationOptions(void* ort_custom_thread_creation_options) {
883
+ ThrowOnError(GetApi().SessionOptionsSetCustomThreadCreationOptions(this->p_, ort_custom_thread_creation_options));
884
+ return *this;
885
+ }
886
+
887
+ template <typename T>
888
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetCustomJoinThreadFn(OrtCustomJoinThreadFn ort_custom_join_thread_fn) {
889
+ ThrowOnError(GetApi().SessionOptionsSetCustomJoinThreadFn(this->p_, ort_custom_join_thread_fn));
890
+ return *this;
891
+ }
892
+
893
+ template <typename T>
894
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_OpenVINO(const OrtOpenVINOProviderOptions& provider_options) {
895
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_OpenVINO(this->p_, &provider_options));
896
+ return *this;
897
+ }
898
+
899
+ template <typename T>
900
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_OpenVINO_V2(const std::unordered_map<std::string, std::string>& provider_options) {
901
+ auto num_entries = provider_options.size();
902
+ std::vector<const char*> keys, values;
903
+ if (num_entries > 0) {
904
+ keys.reserve(num_entries);
905
+ values.reserve(num_entries);
906
+
907
+ for (const auto& entry : provider_options) {
908
+ keys.push_back(entry.first.c_str());
909
+ values.push_back(entry.second.c_str());
910
+ }
911
+ }
912
+
913
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_OpenVINO_V2(this->p_,
914
+ keys.data(), values.data(), num_entries));
915
+
916
+ return *this;
917
+ }
918
+
919
+ template <typename T>
920
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_VitisAI(const std::unordered_map<std::string, std::string>& provider_options) {
921
+ auto num_entries = provider_options.size();
922
+ std::vector<const char*> keys, values;
923
+ if (num_entries > 0) {
924
+ keys.reserve(num_entries);
925
+ values.reserve(num_entries);
926
+
927
+ for (const auto& entry : provider_options) {
928
+ keys.push_back(entry.first.c_str());
929
+ values.push_back(entry.second.c_str());
930
+ }
931
+ }
932
+
933
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_VitisAI(this->p_, keys.data(), values.data(), num_entries));
934
+
935
+ return *this;
936
+ }
937
+
938
+ template <typename T>
939
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::RegisterCustomOpsLibrary(const ORTCHAR_T* library_name,
940
+ const CustomOpConfigs& custom_op_configs) {
941
+ // Add custom op config entries before registering the custom op library. Otherwise, the config entries _may_ be ignored by
942
+ // the custom op library.
943
+ for (const auto& config_iter : custom_op_configs.GetFlattenedConfigs()) {
944
+ AddConfigEntry(config_iter.first.c_str(), config_iter.second.c_str());
945
+ }
946
+
947
+ ThrowOnError(GetApi().RegisterCustomOpsLibrary_V2(this->p_, library_name));
948
+ return *this;
949
+ }
950
+
951
+ template <typename T>
952
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::RegisterCustomOpsUsingFunction(const char* registration_function_name) {
953
+ ThrowOnError(GetApi().RegisterCustomOpsUsingFunction(this->p_, registration_function_name));
954
+ return *this;
955
+ }
956
+
957
+ /// Session
958
+ template <typename T>
959
+ inline size_t ConstSessionImpl<T>::GetInputCount() const {
960
+ size_t out;
961
+ ThrowOnError(GetApi().SessionGetInputCount(this->p_, &out));
962
+ return out;
963
+ }
964
+
965
+ template <typename T>
966
+ inline size_t ConstSessionImpl<T>::GetOutputCount() const {
967
+ size_t out;
968
+ ThrowOnError(GetApi().SessionGetOutputCount(this->p_, &out));
969
+ return out;
970
+ }
971
+
972
+ template <typename T>
973
+ inline size_t ConstSessionImpl<T>::GetOverridableInitializerCount() const {
974
+ size_t out;
975
+ ThrowOnError(GetApi().SessionGetOverridableInitializerCount(this->p_, &out));
976
+ return out;
977
+ }
978
+
979
+ template <typename T>
980
+ inline AllocatedStringPtr ConstSessionImpl<T>::GetInputNameAllocated(size_t index, OrtAllocator* allocator) const {
981
+ char* out;
982
+ ThrowOnError(GetApi().SessionGetInputName(this->p_, index, allocator, &out));
983
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
984
+ }
985
+
986
+ template <typename T>
987
+ inline AllocatedStringPtr ConstSessionImpl<T>::GetOutputNameAllocated(size_t index, OrtAllocator* allocator) const {
988
+ char* out;
989
+ ThrowOnError(GetApi().SessionGetOutputName(this->p_, index, allocator, &out));
990
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
991
+ }
992
+
993
+ template <typename T>
994
+ inline AllocatedStringPtr ConstSessionImpl<T>::GetOverridableInitializerNameAllocated(size_t index, OrtAllocator* allocator) const {
995
+ char* out;
996
+ ThrowOnError(GetApi().SessionGetOverridableInitializerName(this->p_, index, allocator, &out));
997
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
998
+ }
999
+
1000
+ template <typename T>
1001
+ inline uint64_t ConstSessionImpl<T>::GetProfilingStartTimeNs() const {
1002
+ uint64_t out;
1003
+ ThrowOnError(GetApi().SessionGetProfilingStartTimeNs(this->p_, &out));
1004
+ return out;
1005
+ }
1006
+
1007
+ template <typename T>
1008
+ inline ModelMetadata ConstSessionImpl<T>::GetModelMetadata() const {
1009
+ OrtModelMetadata* out;
1010
+ ThrowOnError(GetApi().SessionGetModelMetadata(this->p_, &out));
1011
+ return ModelMetadata{out};
1012
+ }
1013
+
1014
+ template <typename T>
1015
+ inline TypeInfo ConstSessionImpl<T>::GetInputTypeInfo(size_t index) const {
1016
+ OrtTypeInfo* out;
1017
+ ThrowOnError(GetApi().SessionGetInputTypeInfo(this->p_, index, &out));
1018
+ return TypeInfo{out};
1019
+ }
1020
+
1021
+ template <typename T>
1022
+ inline TypeInfo ConstSessionImpl<T>::GetOutputTypeInfo(size_t index) const {
1023
+ OrtTypeInfo* out;
1024
+ ThrowOnError(GetApi().SessionGetOutputTypeInfo(this->p_, index, &out));
1025
+ return TypeInfo{out};
1026
+ }
1027
+
1028
+ template <typename T>
1029
+ inline TypeInfo ConstSessionImpl<T>::GetOverridableInitializerTypeInfo(size_t index) const {
1030
+ OrtTypeInfo* out;
1031
+ ThrowOnError(GetApi().SessionGetOverridableInitializerTypeInfo(this->p_, index, &out));
1032
+ return TypeInfo{out};
1033
+ }
1034
+
1035
+ template <typename T>
1036
+ inline std::vector<Value> SessionImpl<T>::Run(const RunOptions& run_options, const char* const* input_names, const Value* input_values, size_t input_count,
1037
+ const char* const* output_names, size_t output_count) {
1038
+ std::vector<Value> output_values;
1039
+ output_values.reserve(output_count);
1040
+ for (size_t i = 0; i < output_count; i++)
1041
+ output_values.emplace_back(nullptr);
1042
+ Run(run_options, input_names, input_values, input_count, output_names, output_values.data(), output_count);
1043
+ return output_values;
1044
+ }
1045
+
1046
+ template <typename T>
1047
+ inline void SessionImpl<T>::Run(const RunOptions& run_options, const char* const* input_names, const Value* input_values, size_t input_count,
1048
+ const char* const* output_names, Value* output_values, size_t output_count) {
1049
+ static_assert(sizeof(Value) == sizeof(OrtValue*), "Value is really just an array of OrtValue* in memory, so we can reinterpret_cast safely");
1050
+ auto ort_input_values = reinterpret_cast<const OrtValue* const*>(input_values);
1051
+ auto ort_output_values = reinterpret_cast<OrtValue**>(output_values);
1052
+ ThrowOnError(GetApi().Run(this->p_, run_options, input_names, ort_input_values, input_count, output_names, output_count, ort_output_values));
1053
+ }
1054
+
1055
+ template <typename T>
1056
+ inline void SessionImpl<T>::Run(const RunOptions& run_options, const IoBinding& io_binding) {
1057
+ ThrowOnError(GetApi().RunWithBinding(this->p_, run_options, io_binding));
1058
+ }
1059
+
1060
+ template <typename T>
1061
+ inline void SessionImpl<T>::RunAsync(const RunOptions& run_options, const char* const* input_names, const Value* input_values, size_t input_count,
1062
+ const char* const* output_names, Value* output_values, size_t output_count, RunAsyncCallbackFn callback, void* user_data) {
1063
+ auto ort_input_values = reinterpret_cast<const OrtValue* const*>(input_values);
1064
+ auto ort_output_values = reinterpret_cast<OrtValue**>(output_values);
1065
+ ThrowOnError(GetApi().RunAsync(this->p_, run_options, input_names,
1066
+ ort_input_values, input_count, output_names, output_count,
1067
+ ort_output_values, callback, user_data));
1068
+ }
1069
+
1070
+ template <typename T>
1071
+ inline AllocatedStringPtr SessionImpl<T>::EndProfilingAllocated(OrtAllocator* allocator) {
1072
+ char* out = nullptr;
1073
+ ThrowOnError(GetApi().SessionEndProfiling(this->p_, allocator, &out));
1074
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1075
+ }
1076
+
1077
+ } // namespace detail
1078
+
1079
+ inline SessionOptions::SessionOptions() {
1080
+ ThrowOnError(GetApi().CreateSessionOptions(&this->p_));
1081
+ }
1082
+
1083
+ /// CustomOpConfigs
1084
+ inline std::string detail::MakeCustomOpConfigEntryKey(const char* custom_op_name, const char* config) {
1085
+ std::string config_key = "custom_op.";
1086
+
1087
+ config_key += custom_op_name;
1088
+ config_key += ".";
1089
+ config_key += config;
1090
+
1091
+ return config_key;
1092
+ }
1093
+
1094
+ inline CustomOpConfigs& CustomOpConfigs::AddConfig(const char* custom_op_name, const char* config_key, const char* config_value) {
1095
+ const std::string full_flat_key = detail::MakeCustomOpConfigEntryKey(custom_op_name, config_key);
1096
+ flat_configs_[full_flat_key] = config_value;
1097
+ return *this;
1098
+ }
1099
+
1100
+ inline const std::unordered_map<std::string, std::string>& CustomOpConfigs::GetFlattenedConfigs() const {
1101
+ return flat_configs_;
1102
+ }
1103
+
1104
+ inline Session::Session(const Env& env, const ORTCHAR_T* model_path, const SessionOptions& options) {
1105
+ ThrowOnError(GetApi().CreateSession(env, model_path, options, &this->p_));
1106
+ }
1107
+
1108
+ inline Session::Session(const Env& env, const ORTCHAR_T* model_path, const SessionOptions& options,
1109
+ OrtPrepackedWeightsContainer* prepacked_weights_container) {
1110
+ ThrowOnError(GetApi().CreateSessionWithPrepackedWeightsContainer(env, model_path, options, prepacked_weights_container, &this->p_));
1111
+ }
1112
+
1113
+ inline Session::Session(const Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options) {
1114
+ ThrowOnError(GetApi().CreateSessionFromArray(env, model_data, model_data_length, options, &this->p_));
1115
+ }
1116
+
1117
+ inline Session::Session(const Env& env, const void* model_data, size_t model_data_length,
1118
+ const SessionOptions& options, OrtPrepackedWeightsContainer* prepacked_weights_container) {
1119
+ ThrowOnError(GetApi().CreateSessionFromArrayWithPrepackedWeightsContainer(env, model_data, model_data_length, options,
1120
+ prepacked_weights_container, &this->p_));
1121
+ }
1122
+
1123
+ inline AllocatedStringPtr ModelMetadata::GetProducerNameAllocated(OrtAllocator* allocator) const {
1124
+ char* out;
1125
+ ThrowOnError(GetApi().ModelMetadataGetProducerName(p_, allocator, &out));
1126
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1127
+ }
1128
+
1129
+ inline AllocatedStringPtr ModelMetadata::GetGraphNameAllocated(OrtAllocator* allocator) const {
1130
+ char* out;
1131
+ ThrowOnError(GetApi().ModelMetadataGetGraphName(p_, allocator, &out));
1132
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1133
+ }
1134
+
1135
+ inline AllocatedStringPtr ModelMetadata::GetDomainAllocated(OrtAllocator* allocator) const {
1136
+ char* out;
1137
+ ThrowOnError(GetApi().ModelMetadataGetDomain(p_, allocator, &out));
1138
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1139
+ }
1140
+
1141
+ inline AllocatedStringPtr Ort::ModelMetadata::GetDescriptionAllocated(OrtAllocator* allocator) const {
1142
+ char* out;
1143
+ ThrowOnError(GetApi().ModelMetadataGetDescription(p_, allocator, &out));
1144
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1145
+ }
1146
+
1147
+ inline AllocatedStringPtr ModelMetadata::GetGraphDescriptionAllocated(OrtAllocator* allocator) const {
1148
+ char* out;
1149
+ ThrowOnError(GetApi().ModelMetadataGetGraphDescription(p_, allocator, &out));
1150
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1151
+ }
1152
+
1153
+ inline AllocatedStringPtr ModelMetadata::LookupCustomMetadataMapAllocated(const char* key, OrtAllocator* allocator) const {
1154
+ char* out;
1155
+ ThrowOnError(GetApi().ModelMetadataLookupCustomMetadataMap(p_, allocator, key, &out));
1156
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1157
+ }
1158
+
1159
+ inline std::vector<AllocatedStringPtr> ModelMetadata::GetCustomMetadataMapKeysAllocated(OrtAllocator* allocator) const {
1160
+ auto deletor = detail::AllocatedFree(allocator);
1161
+ std::vector<AllocatedStringPtr> result;
1162
+
1163
+ char** out = nullptr;
1164
+ int64_t num_keys = 0;
1165
+ ThrowOnError(GetApi().ModelMetadataGetCustomMetadataMapKeys(p_, allocator, &out, &num_keys));
1166
+ if (num_keys <= 0) {
1167
+ return result;
1168
+ }
1169
+
1170
+ // array of pointers will be freed
1171
+ std::unique_ptr<void, decltype(deletor)> array_guard(out, deletor);
1172
+ // reserve may throw
1173
+ auto strings_deletor = [&deletor, num_keys](char** out) { for(int64_t i = 0; i < num_keys; ++i) deletor(out[i]); };
1174
+ std::unique_ptr<char*, decltype(strings_deletor)> strings_guard(out, strings_deletor);
1175
+ result.reserve(static_cast<size_t>(num_keys));
1176
+ strings_guard.release();
1177
+ for (int64_t i = 0; i < num_keys; ++i) {
1178
+ result.push_back(AllocatedStringPtr(out[i], deletor));
1179
+ }
1180
+
1181
+ return result;
1182
+ }
1183
+
1184
+ inline int64_t ModelMetadata::GetVersion() const {
1185
+ int64_t out;
1186
+ ThrowOnError(GetApi().ModelMetadataGetVersion(p_, &out));
1187
+ return out;
1188
+ }
1189
+
1190
+ namespace detail {
1191
+
1192
+ template <typename T>
1193
+ inline ONNXTensorElementDataType TensorTypeAndShapeInfoImpl<T>::GetElementType() const {
1194
+ ONNXTensorElementDataType out;
1195
+ ThrowOnError(GetApi().GetTensorElementType(this->p_, &out));
1196
+ return out;
1197
+ }
1198
+
1199
+ template <typename T>
1200
+ inline size_t TensorTypeAndShapeInfoImpl<T>::GetElementCount() const {
1201
+ size_t out;
1202
+ ThrowOnError(GetApi().GetTensorShapeElementCount(this->p_, &out));
1203
+ return static_cast<size_t>(out);
1204
+ }
1205
+
1206
+ template <typename T>
1207
+ inline size_t TensorTypeAndShapeInfoImpl<T>::GetDimensionsCount() const {
1208
+ size_t out;
1209
+ ThrowOnError(GetApi().GetDimensionsCount(this->p_, &out));
1210
+ return out;
1211
+ }
1212
+
1213
+ template <typename T>
1214
+ inline void TensorTypeAndShapeInfoImpl<T>::GetDimensions(int64_t* values, size_t values_count) const {
1215
+ ThrowOnError(GetApi().GetDimensions(this->p_, values, values_count));
1216
+ }
1217
+
1218
+ template <typename T>
1219
+ inline void TensorTypeAndShapeInfoImpl<T>::GetSymbolicDimensions(const char** values, size_t values_count) const {
1220
+ ThrowOnError(GetApi().GetSymbolicDimensions(this->p_, values, values_count));
1221
+ }
1222
+
1223
+ template <typename T>
1224
+ inline std::vector<int64_t> TensorTypeAndShapeInfoImpl<T>::GetShape() const {
1225
+ std::vector<int64_t> out(GetDimensionsCount(), 0);
1226
+ ThrowOnError(GetApi().GetDimensions(this->p_, out.data(), out.size()));
1227
+ return out;
1228
+ }
1229
+
1230
+ template <typename T>
1231
+ inline ConstTensorTypeAndShapeInfo TypeInfoImpl<T>::GetTensorTypeAndShapeInfo() const {
1232
+ const OrtTensorTypeAndShapeInfo* out;
1233
+ ThrowOnError(GetApi().CastTypeInfoToTensorInfo(this->p_, &out));
1234
+ return ConstTensorTypeAndShapeInfo{out};
1235
+ }
1236
+
1237
+ template <typename T>
1238
+ inline ConstSequenceTypeInfo TypeInfoImpl<T>::GetSequenceTypeInfo() const {
1239
+ const OrtSequenceTypeInfo* out;
1240
+ ThrowOnError(GetApi().CastTypeInfoToSequenceTypeInfo(this->p_, &out));
1241
+ return ConstSequenceTypeInfo{out};
1242
+ }
1243
+
1244
+ template <typename T>
1245
+ inline ConstMapTypeInfo TypeInfoImpl<T>::GetMapTypeInfo() const {
1246
+ const OrtMapTypeInfo* out;
1247
+ ThrowOnError(GetApi().CastTypeInfoToMapTypeInfo(this->p_, &out));
1248
+ return ConstMapTypeInfo{out};
1249
+ }
1250
+
1251
+ template <typename T>
1252
+ inline ONNXType TypeInfoImpl<T>::GetONNXType() const {
1253
+ ONNXType out;
1254
+ ThrowOnError(GetApi().GetOnnxTypeFromTypeInfo(this->p_, &out));
1255
+ return out;
1256
+ }
1257
+
1258
+ template <typename T>
1259
+ inline TypeInfo SequenceTypeInfoImpl<T>::GetSequenceElementType() const {
1260
+ OrtTypeInfo* output;
1261
+ ThrowOnError(GetApi().GetSequenceElementType(this->p_, &output));
1262
+ return TypeInfo{output};
1263
+ }
1264
+
1265
+ template <typename T>
1266
+ inline TypeInfo OptionalTypeInfoImpl<T>::GetOptionalElementType() const {
1267
+ OrtTypeInfo* info;
1268
+ ThrowOnError(GetApi().GetOptionalContainedTypeInfo(this->p_, &info));
1269
+ return TypeInfo{info};
1270
+ }
1271
+
1272
+ template <typename T>
1273
+ inline ONNXTensorElementDataType MapTypeInfoImpl<T>::GetMapKeyType() const {
1274
+ ONNXTensorElementDataType out;
1275
+ ThrowOnError(GetApi().GetMapKeyType(this->p_, &out));
1276
+ return out;
1277
+ }
1278
+
1279
+ template <typename T>
1280
+ inline TypeInfo MapTypeInfoImpl<T>::GetMapValueType() const {
1281
+ OrtTypeInfo* output;
1282
+ ThrowOnError(GetApi().GetMapValueType(this->p_, &output));
1283
+ return TypeInfo{output};
1284
+ }
1285
+
1286
+ template <typename T>
1287
+ inline ConstOptionalTypeInfo TypeInfoImpl<T>::GetOptionalTypeInfo() const {
1288
+ const OrtOptionalTypeInfo* info;
1289
+ ThrowOnError(GetApi().CastTypeInfoToOptionalTypeInfo(this->p_, &info));
1290
+ return ConstOptionalTypeInfo{info};
1291
+ }
1292
+
1293
+ } // namespace detail
1294
+
1295
+ namespace detail {
1296
+
1297
+ template <typename T>
1298
+ template <typename R>
1299
+ inline void ConstValueImpl<T>::GetOpaqueData(const char* domain, const char* type_name, R& out) const {
1300
+ ThrowOnError(GetApi().GetOpaqueValue(domain, type_name, this->p_, &out, sizeof(R)));
1301
+ }
1302
+
1303
+ template <typename T>
1304
+ inline bool ConstValueImpl<T>::IsTensor() const {
1305
+ int out;
1306
+ ThrowOnError(GetApi().IsTensor(this->p_, &out));
1307
+ return out != 0;
1308
+ }
1309
+
1310
+ template <typename T>
1311
+ inline bool ConstValueImpl<T>::HasValue() const {
1312
+ int out;
1313
+ ThrowOnError(GetApi().HasValue(this->p_, &out));
1314
+ return out != 0;
1315
+ }
1316
+
1317
+ template <typename T>
1318
+ inline size_t ConstValueImpl<T>::GetCount() const {
1319
+ size_t out;
1320
+ ThrowOnError(GetApi().GetValueCount(this->p_, &out));
1321
+ return out;
1322
+ }
1323
+
1324
+ template <typename T>
1325
+ inline Value ConstValueImpl<T>::GetValue(int index, OrtAllocator* allocator) const {
1326
+ OrtValue* out;
1327
+ ThrowOnError(GetApi().GetValue(this->p_, index, allocator, &out));
1328
+ return Value{out};
1329
+ }
1330
+
1331
+ template <typename T>
1332
+ inline size_t ConstValueImpl<T>::GetStringTensorDataLength() const {
1333
+ size_t out;
1334
+ ThrowOnError(GetApi().GetStringTensorDataLength(this->p_, &out));
1335
+ return out;
1336
+ }
1337
+
1338
+ template <typename T>
1339
+ inline size_t ConstValueImpl<T>::GetStringTensorElementLength(size_t element_index) const {
1340
+ size_t out;
1341
+ ThrowOnError(GetApi().GetStringTensorElementLength(this->p_, element_index, &out));
1342
+ return out;
1343
+ }
1344
+
1345
+ template <typename T>
1346
+ template <typename R>
1347
+ inline const R* ConstValueImpl<T>::GetTensorData() const {
1348
+ R* out;
1349
+ ThrowOnError(GetApi().GetTensorMutableData(const_cast<OrtValue*>(this->p_), (void**)&out));
1350
+ return out;
1351
+ }
1352
+
1353
+ template <typename T>
1354
+ inline const void* ConstValueImpl<T>::GetTensorRawData() const {
1355
+ void* out;
1356
+ ThrowOnError(GetApi().GetTensorMutableData(const_cast<OrtValue*>(this->p_), &out));
1357
+ return out;
1358
+ }
1359
+
1360
+ template <typename T>
1361
+ inline TypeInfo ConstValueImpl<T>::GetTypeInfo() const {
1362
+ OrtTypeInfo* output;
1363
+ ThrowOnError(GetApi().GetTypeInfo(this->p_, &output));
1364
+ return TypeInfo{output};
1365
+ }
1366
+
1367
+ template <typename T>
1368
+ inline TensorTypeAndShapeInfo ConstValueImpl<T>::GetTensorTypeAndShapeInfo() const {
1369
+ OrtTensorTypeAndShapeInfo* output;
1370
+ ThrowOnError(GetApi().GetTensorTypeAndShape(this->p_, &output));
1371
+ return TensorTypeAndShapeInfo{output};
1372
+ }
1373
+
1374
+ template <typename T>
1375
+ inline ConstMemoryInfo ConstValueImpl<T>::GetTensorMemoryInfo() const {
1376
+ const OrtMemoryInfo* mem_info;
1377
+ ThrowOnError(GetApi().GetTensorMemoryInfo(this->p_, &mem_info));
1378
+ return ConstMemoryInfo(mem_info);
1379
+ }
1380
+
1381
+ template <typename T>
1382
+ inline void ConstValueImpl<T>::GetStringTensorElement(size_t buffer_length, size_t element_index, void* buffer) const {
1383
+ ThrowOnError(GetApi().GetStringTensorElement(this->p_, buffer_length, element_index, buffer));
1384
+ }
1385
+
1386
+ template <typename T>
1387
+ inline std::string ConstValueImpl<T>::GetStringTensorElement(size_t element_index) const {
1388
+ size_t buffer_length;
1389
+ ThrowOnError(GetApi().GetStringTensorElementLength(this->p_, element_index, &buffer_length));
1390
+
1391
+ std::string s;
1392
+ s.resize(buffer_length);
1393
+ ThrowOnError(GetApi().GetStringTensorElement(this->p_, buffer_length, element_index, &s[0]));
1394
+ return s;
1395
+ }
1396
+
1397
+ template <typename T>
1398
+ inline void ConstValueImpl<T>::GetStringTensorContent(void* buffer, size_t buffer_length, size_t* offsets, size_t offsets_count) const {
1399
+ ThrowOnError(GetApi().GetStringTensorContent(this->p_, buffer, buffer_length, offsets, offsets_count));
1400
+ }
1401
+
1402
+ #if !defined(DISABLE_SPARSE_TENSORS)
1403
+ template <typename T>
1404
+ inline OrtSparseFormat ConstValueImpl<T>::GetSparseFormat() const {
1405
+ OrtSparseFormat format;
1406
+ ThrowOnError(GetApi().GetSparseTensorFormat(this->p_, &format));
1407
+ return format;
1408
+ }
1409
+
1410
+ template <typename T>
1411
+ inline TensorTypeAndShapeInfo ConstValueImpl<T>::GetSparseTensorValuesTypeAndShapeInfo() const {
1412
+ OrtTensorTypeAndShapeInfo* output;
1413
+ ThrowOnError(GetApi().GetSparseTensorValuesTypeAndShape(this->p_, &output));
1414
+ return TensorTypeAndShapeInfo{output};
1415
+ }
1416
+
1417
+ template <typename T>
1418
+ inline TensorTypeAndShapeInfo ConstValueImpl<T>::GetSparseTensorIndicesTypeShapeInfo(OrtSparseIndicesFormat indices_format) const {
1419
+ OrtTensorTypeAndShapeInfo* output;
1420
+ ThrowOnError(GetApi().GetSparseTensorIndicesTypeShape(this->p_, indices_format, &output));
1421
+ return TensorTypeAndShapeInfo{output};
1422
+ }
1423
+
1424
+ template <typename T>
1425
+ template <typename R>
1426
+ inline const R* ConstValueImpl<T>::GetSparseTensorIndicesData(OrtSparseIndicesFormat indices_format, size_t& num_indices) const {
1427
+ const void* out;
1428
+ ThrowOnError(GetApi().GetSparseTensorIndices(this->p_, indices_format, &num_indices, &out));
1429
+ return reinterpret_cast<const R*>(out);
1430
+ }
1431
+
1432
+ template <typename T>
1433
+ inline bool ConstValueImpl<T>::IsSparseTensor() const {
1434
+ int out;
1435
+ ThrowOnError(GetApi().IsSparseTensor(this->p_, &out));
1436
+ return out != 0;
1437
+ }
1438
+
1439
+ template <typename T>
1440
+ template <typename R>
1441
+ inline const R* ConstValueImpl<T>::GetSparseTensorValues() const {
1442
+ const void* out;
1443
+ ThrowOnError(GetApi().GetSparseTensorValues(this->p_, &out));
1444
+ return reinterpret_cast<const R*>(out);
1445
+ }
1446
+
1447
+ #endif
1448
+
1449
+ template <typename T>
1450
+ void ValueImpl<T>::FillStringTensor(const char* const* s, size_t s_len) {
1451
+ ThrowOnError(GetApi().FillStringTensor(this->p_, s, s_len));
1452
+ }
1453
+
1454
+ template <typename T>
1455
+ void ValueImpl<T>::FillStringTensorElement(const char* s, size_t index) {
1456
+ ThrowOnError(GetApi().FillStringTensorElement(this->p_, s, index));
1457
+ }
1458
+
1459
+ template <typename T>
1460
+ inline char* ValueImpl<T>::GetResizedStringTensorElementBuffer(size_t index, size_t buffer_length) {
1461
+ char* result;
1462
+ ThrowOnError(GetApi().GetResizedStringTensorElementBuffer(this->p_, index, buffer_length, &result));
1463
+ return result;
1464
+ }
1465
+
1466
+ template <typename T>
1467
+ void* ValueImpl<T>::GetTensorMutableRawData() {
1468
+ void* out;
1469
+ ThrowOnError(GetApi().GetTensorMutableData(this->p_, &out));
1470
+ return out;
1471
+ }
1472
+
1473
+ template <typename T>
1474
+ template <typename R>
1475
+ R* ValueImpl<T>::GetTensorMutableData() {
1476
+ R* out;
1477
+ ThrowOnError(GetApi().GetTensorMutableData(this->p_, (void**)&out));
1478
+ return out;
1479
+ }
1480
+
1481
+ template <typename T>
1482
+ template <typename R>
1483
+ R& ValueImpl<T>::At(const std::vector<int64_t>& location) {
1484
+ static_assert(!std::is_same<T, std::string>::value, "this api does not support std::string");
1485
+ R* out;
1486
+ ThrowOnError(GetApi().TensorAt(this->p_, location.data(), location.size(), (void**)&out));
1487
+ return *out;
1488
+ }
1489
+
1490
+ #if !defined(DISABLE_SPARSE_TENSORS)
1491
+ template <typename T>
1492
+ void ValueImpl<T>::UseCooIndices(int64_t* indices_data, size_t indices_num) {
1493
+ ThrowOnError(GetApi().UseCooIndices(this->p_, indices_data, indices_num));
1494
+ }
1495
+
1496
+ template <typename T>
1497
+ void ValueImpl<T>::UseCsrIndices(int64_t* inner_data, size_t inner_num, int64_t* outer_data, size_t outer_num) {
1498
+ ThrowOnError(GetApi().UseCsrIndices(this->p_, inner_data, inner_num, outer_data, outer_num));
1499
+ }
1500
+
1501
+ template <typename T>
1502
+ void ValueImpl<T>::UseBlockSparseIndices(const Shape& indices_shape, int32_t* indices_data) {
1503
+ ThrowOnError(GetApi().UseBlockSparseIndices(this->p_, indices_shape.shape, indices_shape.shape_len, indices_data));
1504
+ }
1505
+
1506
+ template <typename T>
1507
+ void ValueImpl<T>::FillSparseTensorCoo(const OrtMemoryInfo* mem_info, const OrtSparseValuesParam& values_param,
1508
+ const int64_t* indices_data, size_t indices_num) {
1509
+ ThrowOnError(GetApi().FillSparseTensorCoo(this->p_, mem_info, values_param.values_shape,
1510
+ values_param.values_shape_len, values_param.data.p_data,
1511
+ indices_data, indices_num));
1512
+ }
1513
+
1514
+ template <typename T>
1515
+ void ValueImpl<T>::FillSparseTensorCsr(const OrtMemoryInfo* data_mem_info,
1516
+ const OrtSparseValuesParam& values,
1517
+ const int64_t* inner_indices_data, size_t inner_indices_num,
1518
+ const int64_t* outer_indices_data, size_t outer_indices_num) {
1519
+ ThrowOnError(GetApi().FillSparseTensorCsr(this->p_, data_mem_info, values.values_shape, values.values_shape_len, values.data.p_data,
1520
+ inner_indices_data, inner_indices_num,
1521
+ outer_indices_data, outer_indices_num));
1522
+ }
1523
+
1524
+ template <typename T>
1525
+ void ValueImpl<T>::FillSparseTensorBlockSparse(const OrtMemoryInfo* data_mem_info,
1526
+ const OrtSparseValuesParam& values,
1527
+ const Shape& indices_shape,
1528
+ const int32_t* indices_data) {
1529
+ ThrowOnError(GetApi().FillSparseTensorBlockSparse(this->p_, data_mem_info, values.values_shape, values.values_shape_len, values.data.p_data,
1530
+ indices_shape.shape, indices_shape.shape_len,
1531
+ indices_data));
1532
+ }
1533
+
1534
+ #endif // !defined(DISABLE_SPARSE_TENSORS)
1535
+
1536
+ } // namespace detail
1537
+
1538
+ template <typename T>
1539
+ inline Value Value::CreateTensor(const OrtMemoryInfo* info, T* p_data, size_t p_data_element_count, const int64_t* shape, size_t shape_len) {
1540
+ return CreateTensor(info, p_data, p_data_element_count * sizeof(T), shape, shape_len, TypeToTensorType<T>::type);
1541
+ }
1542
+
1543
+ inline Value Value::CreateTensor(const OrtMemoryInfo* info, void* p_data, size_t p_data_byte_count, const int64_t* shape, size_t shape_len,
1544
+ ONNXTensorElementDataType type) {
1545
+ OrtValue* out;
1546
+ ThrowOnError(GetApi().CreateTensorWithDataAsOrtValue(info, p_data, p_data_byte_count, shape, shape_len, type, &out));
1547
+ return Value{out};
1548
+ }
1549
+
1550
+ template <typename T>
1551
+ inline Value Value::CreateTensor(OrtAllocator* allocator, const int64_t* shape, size_t shape_len) {
1552
+ return CreateTensor(allocator, shape, shape_len, TypeToTensorType<T>::type);
1553
+ }
1554
+
1555
+ inline Value Value::CreateTensor(OrtAllocator* allocator, const int64_t* shape, size_t shape_len, ONNXTensorElementDataType type) {
1556
+ OrtValue* out;
1557
+ ThrowOnError(GetApi().CreateTensorAsOrtValue(allocator, shape, shape_len, type, &out));
1558
+ return Value{out};
1559
+ }
1560
+
1561
+ #if !defined(DISABLE_SPARSE_TENSORS)
1562
+
1563
+ template <typename T>
1564
+ inline Value Value::CreateSparseTensor(const OrtMemoryInfo* info, T* p_data, const Shape& dense_shape,
1565
+ const Shape& values_shape) {
1566
+ return CreateSparseTensor(info, p_data, dense_shape, values_shape, TypeToTensorType<T>::type);
1567
+ }
1568
+
1569
+ inline Value Value::CreateSparseTensor(const OrtMemoryInfo* info, void* p_data, const Shape& dense_shape,
1570
+ const Shape& values_shape, ONNXTensorElementDataType type) {
1571
+ OrtValue* out;
1572
+ ThrowOnError(GetApi().CreateSparseTensorWithValuesAsOrtValue(info, p_data, dense_shape.shape, dense_shape.shape_len,
1573
+ values_shape.shape, values_shape.shape_len, type, &out));
1574
+ return Value{out};
1575
+ }
1576
+
1577
+ template <typename T>
1578
+ inline Value Value::CreateSparseTensor(OrtAllocator* allocator, const Shape& dense_shape) {
1579
+ return CreateSparseTensor(allocator, dense_shape, TypeToTensorType<T>::type);
1580
+ }
1581
+
1582
+ inline Value Value::CreateSparseTensor(OrtAllocator* allocator, const Shape& dense_shape,
1583
+ ONNXTensorElementDataType type) {
1584
+ OrtValue* out;
1585
+ ThrowOnError(GetApi().CreateSparseTensorAsOrtValue(allocator, dense_shape.shape, dense_shape.shape_len, type, &out));
1586
+ return Value{out};
1587
+ }
1588
+ #endif // !defined(DISABLE_SPARSE_TENSORS)
1589
+
1590
+ inline Value Value::CreateMap(const Value& keys, const Value& values) {
1591
+ OrtValue* out;
1592
+ const OrtValue* inputs[2] = {keys, values};
1593
+ ThrowOnError(GetApi().CreateValue(inputs, 2, ONNX_TYPE_MAP, &out));
1594
+ return Value{out};
1595
+ }
1596
+
1597
+ inline Value Value::CreateSequence(const std::vector<Value>& values) {
1598
+ OrtValue* out;
1599
+ std::vector<const OrtValue*> values_ort{values.data(), values.data() + values.size()};
1600
+ ThrowOnError(GetApi().CreateValue(values_ort.data(), values_ort.size(), ONNX_TYPE_SEQUENCE, &out));
1601
+ return Value{out};
1602
+ }
1603
+
1604
+ template <typename T>
1605
+ inline Value Value::CreateOpaque(const char* domain, const char* type_name, const T& data_container) {
1606
+ OrtValue* out;
1607
+ ThrowOnError(GetApi().CreateOpaqueValue(domain, type_name, &data_container, sizeof(T), &out));
1608
+ return Value{out};
1609
+ }
1610
+
1611
+ //
1612
+ // Custom OP Inlines
1613
+ //
1614
+ inline Logger::Logger(const OrtLogger* logger) : logger_(logger) {
1615
+ Ort::ThrowOnError(GetApi().Logger_GetLoggingSeverityLevel(this->logger_, &this->cached_severity_level_));
1616
+ }
1617
+
1618
+ inline OrtLoggingLevel Logger::GetLoggingSeverityLevel() const noexcept {
1619
+ return cached_severity_level_;
1620
+ }
1621
+
1622
+ inline Status Logger::LogMessage(OrtLoggingLevel log_severity_level, const ORTCHAR_T* file_path, int line_number,
1623
+ const char* func_name, const char* message) const noexcept {
1624
+ OrtStatus* status = GetApi().Logger_LogMessage(logger_, log_severity_level, message, file_path, line_number,
1625
+ func_name);
1626
+ return Status{status};
1627
+ }
1628
+
1629
+ // Disable warnings about the format string not being a literal (-Wformat-nonliteral and -Wformat-security)
1630
+ // for gcc and clang. The alternative is to use actual C-style variadic parameters and apply
1631
+ // __attribute__(format(printf...)), which does not work with variadic templates.
1632
+ #if defined(__GNUC__)
1633
+ #pragma GCC diagnostic push
1634
+ #pragma GCC diagnostic ignored "-Wformat-nonliteral"
1635
+ #pragma GCC diagnostic ignored "-Wformat-security"
1636
+ #elif defined(__clang__)
1637
+ #pragma clang diagnostic push
1638
+ #pragma clang diagnostic ignored "-Wformat-nonliteral"
1639
+ #pragma clang diagnostic ignored "-Wformat-security"
1640
+ #endif
1641
+ template <typename... Args>
1642
+ inline Status Logger::LogFormattedMessage(OrtLoggingLevel log_severity_level, const ORTCHAR_T* file_path,
1643
+ int line_number, const char* func_name, const char* format,
1644
+ Args&&... args) const noexcept {
1645
+ int msg_len = std::snprintf(nullptr, 0U, format, std::forward<Args>(args)...);
1646
+
1647
+ if (msg_len < 0) { // Formatting error
1648
+ return Status("Failed to log message due to formatting error", OrtErrorCode::ORT_FAIL);
1649
+ }
1650
+
1651
+ OrtStatus* status = nullptr;
1652
+ const size_t buffer_size = static_cast<size_t>(msg_len) + 1U;
1653
+
1654
+ constexpr size_t kStackBufferSize = 1024;
1655
+
1656
+ if (buffer_size < kStackBufferSize) {
1657
+ char buffer[kStackBufferSize];
1658
+ snprintf(buffer, kStackBufferSize, format, std::forward<Args>(args)...);
1659
+ status = GetApi().Logger_LogMessage(logger_, log_severity_level, buffer, file_path, line_number, func_name);
1660
+ } else {
1661
+ // std::make_unique is only supported starting at C++14.
1662
+ #if (__cplusplus >= 201402L) || (_MSC_VER >= 1900)
1663
+ auto buffer = std::make_unique<char[]>(buffer_size);
1664
+ #else
1665
+ std::unique_ptr<char[]> buffer(new char[buffer_size]);
1666
+ #endif
1667
+ std::snprintf(buffer.get(), buffer_size, format, std::forward<Args>(args)...);
1668
+ status = GetApi().Logger_LogMessage(logger_, log_severity_level, buffer.get(), file_path, line_number, func_name);
1669
+ }
1670
+
1671
+ return Status{status};
1672
+ }
1673
+ // Re-enable -Wformat-nonliteral and -Wformat-security
1674
+ #if defined(__GNUC__)
1675
+ #pragma GCC diagnostic pop
1676
+ #elif defined(__clang__)
1677
+ #pragma clang diagnostic pop
1678
+ #endif
1679
+
1680
+ inline KernelContext::KernelContext(OrtKernelContext* context) : ctx_(context) {
1681
+ }
1682
+
1683
+ inline size_t KernelContext::GetInputCount() const {
1684
+ size_t out = 0;
1685
+ Ort::ThrowOnError(GetApi().KernelContext_GetInputCount(ctx_, &out));
1686
+ return out;
1687
+ }
1688
+
1689
+ inline size_t KernelContext::GetOutputCount() const {
1690
+ size_t out = 0;
1691
+ Ort::ThrowOnError(GetApi().KernelContext_GetOutputCount(ctx_, &out));
1692
+ return out;
1693
+ }
1694
+
1695
+ inline ConstValue KernelContext::GetInput(size_t index) const {
1696
+ const OrtValue* out = nullptr;
1697
+ Ort::ThrowOnError(GetApi().KernelContext_GetInput(ctx_, index, &out));
1698
+ return ConstValue{out};
1699
+ }
1700
+
1701
+ inline UnownedValue KernelContext::GetOutput(size_t index, const int64_t* dim_values, size_t dim_count) const {
1702
+ OrtValue* out = nullptr;
1703
+ Ort::ThrowOnError(GetApi().KernelContext_GetOutput(ctx_, index, dim_values, dim_count, &out));
1704
+ return UnownedValue(out);
1705
+ }
1706
+
1707
+ inline UnownedValue KernelContext::GetOutput(size_t index, const std::vector<int64_t>& dims) const {
1708
+ OrtValue* out = nullptr;
1709
+ Ort::ThrowOnError(GetApi().KernelContext_GetOutput(ctx_, index, dims.data(), dims.size(), &out));
1710
+ return UnownedValue(out);
1711
+ }
1712
+
1713
+ inline void* KernelContext::GetGPUComputeStream() const {
1714
+ void* out = nullptr;
1715
+ Ort::ThrowOnError(GetApi().KernelContext_GetGPUComputeStream(ctx_, &out));
1716
+ return out;
1717
+ }
1718
+
1719
+ inline OrtAllocator* KernelContext::GetAllocator(const OrtMemoryInfo& memory_info) const {
1720
+ OrtAllocator* out = nullptr;
1721
+ Ort::ThrowOnError(GetApi().KernelContext_GetAllocator(ctx_, &memory_info, &out));
1722
+ return out;
1723
+ }
1724
+
1725
+ inline Logger KernelContext::GetLogger() const {
1726
+ const OrtLogger* out = nullptr;
1727
+ ThrowOnError(GetApi().KernelContext_GetLogger(this->ctx_, &out));
1728
+ return Logger{out};
1729
+ }
1730
+
1731
+ inline void KernelContext::ParallelFor(void (*fn)(void*, size_t), size_t total, size_t num_batch, void* usr_data) const {
1732
+ ThrowOnError(GetApi().KernelContext_ParallelFor(ctx_, fn, total, num_batch, usr_data));
1733
+ }
1734
+
1735
+ inline OpAttr::OpAttr(const char* name, const void* data, int len, OrtOpAttrType type) {
1736
+ Ort::ThrowOnError(GetApi().CreateOpAttr(name, data, len, type, &p_));
1737
+ }
1738
+
1739
+ namespace detail {
1740
+ template <typename T>
1741
+ inline KernelInfo KernelInfoImpl<T>::Copy() const {
1742
+ OrtKernelInfo* info_copy = nullptr;
1743
+ Ort::ThrowOnError(GetApi().CopyKernelInfo(this->p_, &info_copy));
1744
+ return KernelInfo{info_copy};
1745
+ }
1746
+
1747
+ template <typename T>
1748
+ inline size_t KernelInfoImpl<T>::GetInputCount() const {
1749
+ size_t out = 0;
1750
+ ThrowOnError(GetApi().KernelInfo_GetInputCount(this->p_, &out));
1751
+ return out;
1752
+ }
1753
+
1754
+ template <typename T>
1755
+ inline size_t KernelInfoImpl<T>::GetOutputCount() const {
1756
+ size_t out = 0;
1757
+ ThrowOnError(GetApi().KernelInfo_GetOutputCount(this->p_, &out));
1758
+ return out;
1759
+ }
1760
+
1761
+ template <typename T>
1762
+ inline std::string KernelInfoImpl<T>::GetInputName(size_t index) const {
1763
+ size_t size = 0;
1764
+
1765
+ // Feed nullptr for the data buffer to query the true size of the string value
1766
+ Ort::ThrowOnError(GetApi().KernelInfo_GetInputName(this->p_, index, nullptr, &size));
1767
+
1768
+ std::string out;
1769
+ out.resize(size);
1770
+ Ort::ThrowOnError(GetApi().KernelInfo_GetInputName(this->p_, index, &out[0], &size));
1771
+ out.resize(size - 1); // remove the terminating character '\0'
1772
+
1773
+ return out;
1774
+ }
1775
+
1776
+ template <typename T>
1777
+ inline std::string KernelInfoImpl<T>::GetOutputName(size_t index) const {
1778
+ size_t size = 0;
1779
+
1780
+ // Feed nullptr for the data buffer to query the true size of the string value
1781
+ Ort::ThrowOnError(GetApi().KernelInfo_GetOutputName(this->p_, index, nullptr, &size));
1782
+
1783
+ std::string out;
1784
+ out.resize(size);
1785
+ Ort::ThrowOnError(GetApi().KernelInfo_GetOutputName(this->p_, index, &out[0], &size));
1786
+ out.resize(size - 1); // remove the terminating character '\0'
1787
+
1788
+ return out;
1789
+ }
1790
+
1791
+ template <typename T>
1792
+ inline TypeInfo KernelInfoImpl<T>::GetInputTypeInfo(size_t index) const {
1793
+ OrtTypeInfo* out = nullptr;
1794
+ ThrowOnError(GetApi().KernelInfo_GetInputTypeInfo(this->p_, index, &out));
1795
+ return TypeInfo{out};
1796
+ }
1797
+
1798
+ template <typename T>
1799
+ inline TypeInfo KernelInfoImpl<T>::GetOutputTypeInfo(size_t index) const {
1800
+ OrtTypeInfo* out = nullptr;
1801
+ ThrowOnError(GetApi().KernelInfo_GetOutputTypeInfo(this->p_, index, &out));
1802
+ return TypeInfo{out};
1803
+ }
1804
+
1805
+ template <typename T>
1806
+ inline Value KernelInfoImpl<T>::GetTensorAttribute(const char* name, OrtAllocator* allocator) const {
1807
+ OrtValue* out = nullptr;
1808
+ ThrowOnError(GetApi().KernelInfoGetAttribute_tensor(this->p_, name, allocator, &out));
1809
+ return Value{out};
1810
+ }
1811
+
1812
+ template <typename T>
1813
+ inline ConstValue KernelInfoImpl<T>::GetTensorConstantInput(size_t index, int* is_constant) const {
1814
+ const OrtValue* out = nullptr;
1815
+ ThrowOnError(GetApi().KernelInfoGetConstantInput_tensor(this->p_, index, is_constant, &out));
1816
+ return ConstValue{out};
1817
+ }
1818
+
1819
+ template <typename T>
1820
+ inline std::string KernelInfoImpl<T>::GetNodeName() const {
1821
+ size_t size = 0;
1822
+
1823
+ // Feed nullptr for the data buffer to query the true size of the string value
1824
+ Ort::ThrowOnError(GetApi().KernelInfo_GetNodeName(this->p_, nullptr, &size));
1825
+
1826
+ std::string out;
1827
+ out.resize(size);
1828
+ Ort::ThrowOnError(GetApi().KernelInfo_GetNodeName(this->p_, &out[0], &size));
1829
+ out.resize(size - 1); // remove the terminating character '\0'
1830
+
1831
+ return out;
1832
+ }
1833
+
1834
+ template <typename T>
1835
+ inline Logger KernelInfoImpl<T>::GetLogger() const {
1836
+ const OrtLogger* out = nullptr;
1837
+ ThrowOnError(GetApi().KernelInfo_GetLogger(this->p_, &out));
1838
+ return Logger{out};
1839
+ }
1840
+
1841
+ inline void attr_utils::GetAttr(const OrtKernelInfo* p, const char* name, float& out) {
1842
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttribute_float(p, name, &out));
1843
+ }
1844
+
1845
+ inline void attr_utils::GetAttr(const OrtKernelInfo* p, const char* name, int64_t& out) {
1846
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttribute_int64(p, name, &out));
1847
+ }
1848
+
1849
+ inline void attr_utils::GetAttr(const OrtKernelInfo* p, const char* name, std::string& result) {
1850
+ size_t size = 0;
1851
+ // Feed nullptr for the data buffer to query the true size of the string attribute
1852
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttribute_string(p, name, nullptr, &size));
1853
+
1854
+ std::string out;
1855
+ out.resize(size);
1856
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttribute_string(p, name, &out[0], &size));
1857
+ out.resize(size - 1); // remove the terminating character '\0'
1858
+ out.swap(result);
1859
+ }
1860
+
1861
+ inline void attr_utils::GetAttrs(const OrtKernelInfo* p, const char* name, std::vector<float>& result) {
1862
+ size_t size = 0;
1863
+ // Feed nullptr for the data buffer to query the true size of the attribute
1864
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttributeArray_float(p, name, nullptr, &size));
1865
+
1866
+ std::vector<float> out;
1867
+ out.resize(size);
1868
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttributeArray_float(p, name, out.data(), &size));
1869
+ out.swap(result);
1870
+ }
1871
+
1872
+ inline void attr_utils::GetAttrs(const OrtKernelInfo* p, const char* name, std::vector<int64_t>& result) {
1873
+ size_t size = 0;
1874
+
1875
+ // Feed nullptr for the data buffer to query the true size of the attribute
1876
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttributeArray_int64(p, name, nullptr, &size));
1877
+
1878
+ std::vector<int64_t> out;
1879
+ out.resize(size);
1880
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttributeArray_int64(p, name, out.data(), &size));
1881
+ out.swap(result);
1882
+ }
1883
+ } // namespace detail
1884
+
1885
+ inline KernelInfo::KernelInfo(OrtKernelInfo* info) : detail::KernelInfoImpl<OrtKernelInfo>{info} {}
1886
+
1887
+ inline Op::Op(OrtOp* p) : Base<OrtOp>(p) {}
1888
+
1889
+ inline Op Op::Create(const OrtKernelInfo* info, const char* op_name, const char* domain, int version,
1890
+ const char** type_constraint_names,
1891
+ const ONNXTensorElementDataType* type_constraint_values,
1892
+ size_t type_constraint_count,
1893
+ const OpAttr* attr_values, size_t attr_count,
1894
+ size_t input_count, size_t output_count) {
1895
+ static_assert(sizeof(OpAttr) == sizeof(OrtOpAttr*),
1896
+ "OpAttr's is expected to be just an array of OrtOpAttr in memory so we can reinterpret safely");
1897
+ auto attr_input_values = reinterpret_cast<const OrtOpAttr* const*>(attr_values);
1898
+ OrtOp* op;
1899
+ Ort::ThrowOnError(GetApi().CreateOp(info, op_name, domain, version, type_constraint_names, type_constraint_values,
1900
+ static_cast<int>(type_constraint_count),
1901
+ attr_input_values,
1902
+ static_cast<int>(attr_count),
1903
+ static_cast<int>(input_count),
1904
+ static_cast<int>(output_count), &op));
1905
+ return Op{op};
1906
+ }
1907
+
1908
+ inline void Op::Invoke(const OrtKernelContext* context,
1909
+ const Value* input_values,
1910
+ size_t input_count,
1911
+ Value* output_values,
1912
+ size_t output_count) {
1913
+ static_assert(sizeof(Value) == sizeof(OrtValue*),
1914
+ "Value is really just an array of OrtValue* in memory, so we can reinterpret_cast safely");
1915
+ auto ort_input_values = reinterpret_cast<const OrtValue* const*>(input_values);
1916
+ auto ort_output_values = reinterpret_cast<OrtValue**>(output_values);
1917
+ Ort::ThrowOnError(GetApi().InvokeOp(context, p_, ort_input_values, static_cast<int>(input_count),
1918
+ ort_output_values, static_cast<int>(output_count)));
1919
+ }
1920
+
1921
+ inline void Op::Invoke(const OrtKernelContext* context,
1922
+ const OrtValue* const* input_values,
1923
+ size_t input_count,
1924
+ OrtValue* const* output_values,
1925
+ size_t output_count) {
1926
+ Ort::ThrowOnError(GetApi().InvokeOp(context, p_, input_values, static_cast<int>(input_count),
1927
+ output_values, static_cast<int>(output_count)));
1928
+ }
1929
+
1930
+ inline std::string GetVersionString() {
1931
+ return OrtGetApiBase()->GetVersionString();
1932
+ }
1933
+
1934
+ inline std::string GetBuildInfoString() {
1935
+ return GetApi().GetBuildInfoString();
1936
+ }
1937
+
1938
+ inline std::vector<std::string> GetAvailableProviders() {
1939
+ char** providers;
1940
+ int len;
1941
+
1942
+ auto release_fn = [&len](char** providers) {
1943
+ // This should always return nullptr.
1944
+ ThrowOnError(GetApi().ReleaseAvailableProviders(providers, len));
1945
+ };
1946
+
1947
+ ThrowOnError(GetApi().GetAvailableProviders(&providers, &len));
1948
+ std::unique_ptr<char*, decltype(release_fn)> guard(providers, release_fn);
1949
+ std::vector<std::string> available_providers;
1950
+ available_providers.reserve(static_cast<size_t>(len));
1951
+ for (int i = 0; i < len; ++i) {
1952
+ available_providers.emplace_back(providers[i]);
1953
+ }
1954
+ return available_providers;
1955
+ }
1956
+
1957
+ template <typename TOp, typename TKernel, bool WithStatus>
1958
+ void CustomOpBase<TOp, TKernel, WithStatus>::GetSessionConfigs(std::unordered_map<std::string, std::string>& out,
1959
+ ConstSessionOptions options) const {
1960
+ const TOp* derived = static_cast<const TOp*>(this);
1961
+ std::vector<std::string> keys = derived->GetSessionConfigKeys();
1962
+
1963
+ out.reserve(keys.size());
1964
+
1965
+ std::string config_entry_key = detail::MakeCustomOpConfigEntryKey(derived->GetName(), "");
1966
+ const size_t prefix_size = config_entry_key.length();
1967
+
1968
+ for (const auto& key : keys) {
1969
+ config_entry_key.resize(prefix_size);
1970
+ config_entry_key.append(key);
1971
+ out[key] = options.GetConfigEntryOrDefault(config_entry_key.c_str(), "");
1972
+ }
1973
+ }
1974
+
1975
+ inline ShapeInferContext::ShapeInferContext(const OrtApi* ort_api,
1976
+ OrtShapeInferContext* ctx) : ort_api_(ort_api), ctx_(ctx) {
1977
+ size_t input_count = 0;
1978
+ Ort::ThrowOnError(ort_api_->ShapeInferContext_GetInputCount(ctx_, &input_count));
1979
+ for (size_t ith_input = 0; ith_input < input_count; ++ith_input) {
1980
+ OrtTensorTypeAndShapeInfo* info{};
1981
+ Ort::ThrowOnError(ort_api_->ShapeInferContext_GetInputTypeShape(ctx, ith_input, &info));
1982
+ TensorTypeAndShapeInfo type_shape_info(info);
1983
+ auto integer_shape = type_shape_info.GetShape();
1984
+ std::vector<const char*> symbolic_shape(integer_shape.size(), {});
1985
+ if (!integer_shape.empty()) {
1986
+ type_shape_info.GetSymbolicDimensions(&symbolic_shape[0], integer_shape.size());
1987
+ }
1988
+ Shape shape;
1989
+ for (size_t ith = 0; ith < integer_shape.size(); ++ith) {
1990
+ if (symbolic_shape[ith] && std::string{symbolic_shape[ith]}.size() > 0) {
1991
+ shape.emplace_back(symbolic_shape[ith]);
1992
+ } else {
1993
+ shape.emplace_back(integer_shape[ith]);
1994
+ }
1995
+ }
1996
+ input_shapes_.push_back(std::move(shape));
1997
+ type_shape_info.release();
1998
+ }
1999
+ }
2000
+
2001
+ inline Status ShapeInferContext::SetOutputShape(size_t indice, const Shape& shape, ONNXTensorElementDataType type) {
2002
+ OrtTensorTypeAndShapeInfo* info = {};
2003
+ ORT_CXX_RETURN_ON_API_FAIL(ort_api_->CreateTensorTypeAndShapeInfo(&info));
2004
+ ORT_CXX_RETURN_ON_API_FAIL(ort_api_->SetTensorElementType(info, type));
2005
+
2006
+ using InfoPtr = std::unique_ptr<OrtTensorTypeAndShapeInfo, std::function<void(OrtTensorTypeAndShapeInfo*)>>;
2007
+
2008
+ InfoPtr info_ptr(info, [this](OrtTensorTypeAndShapeInfo* obj) {
2009
+ ort_api_->ReleaseTensorTypeAndShapeInfo(obj);
2010
+ });
2011
+
2012
+ std::vector<int64_t> integer_dims;
2013
+ std::vector<const char*> symbolic_dims;
2014
+
2015
+ for (const auto dim : shape) {
2016
+ if (dim.IsInt()) {
2017
+ integer_dims.push_back(dim.AsInt());
2018
+ symbolic_dims.push_back("");
2019
+ } else {
2020
+ if (!dim.AsSym() || std::string{dim.AsSym()}.empty()) {
2021
+ ORT_CXX_API_THROW("Symbolic dim must not be an empty string", ORT_INVALID_ARGUMENT);
2022
+ }
2023
+ integer_dims.push_back(SymbolicInteger::INVALID_INT_DIM);
2024
+ symbolic_dims.push_back(dim.AsSym());
2025
+ }
2026
+ }
2027
+
2028
+ ORT_CXX_RETURN_ON_API_FAIL(ort_api_->SetDimensions(info, integer_dims.data(), integer_dims.size()));
2029
+ ORT_CXX_RETURN_ON_API_FAIL(ort_api_->SetSymbolicDimensions(info, symbolic_dims.data(), symbolic_dims.size()));
2030
+ ORT_CXX_RETURN_ON_API_FAIL(ort_api_->ShapeInferContext_SetOutputTypeShape(ctx_, indice, info));
2031
+ return Status{nullptr};
2032
+ }
2033
+
2034
+ inline int64_t ShapeInferContext::GetAttrInt(const char* attr_name) {
2035
+ const auto* attr = GetAttrHdl(attr_name);
2036
+ int64_t i = {};
2037
+ size_t out = {};
2038
+ Ort::ThrowOnError(ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_INT, &i, sizeof(i), &out));
2039
+ return i;
2040
+ }
2041
+
2042
+ inline ShapeInferContext::Ints ShapeInferContext::GetAttrInts(const char* attr_name) {
2043
+ const auto* attr = GetAttrHdl(attr_name);
2044
+ int64_t i = {};
2045
+ size_t out = {};
2046
+ // first call to get the bytes needed
2047
+ auto status = ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_INTS, &i, sizeof(i), &out);
2048
+ if (status) {
2049
+ size_t num_i = out / sizeof(int64_t);
2050
+ ShapeInferContext::Ints ints(num_i, 0);
2051
+ Ort::ThrowOnError(ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_INTS, ints.data(), out, &out));
2052
+ return ints;
2053
+ } else {
2054
+ return {i};
2055
+ }
2056
+ }
2057
+
2058
+ inline float ShapeInferContext::GetAttrFloat(const char* attr_name) {
2059
+ const auto* attr = GetAttrHdl(attr_name);
2060
+ float f = {};
2061
+ size_t out = {};
2062
+ Ort::ThrowOnError(ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_FLOAT, &f, sizeof(f), &out));
2063
+ return f;
2064
+ }
2065
+
2066
+ inline ShapeInferContext::Floats ShapeInferContext::GetAttrFloats(const char* attr_name) {
2067
+ const auto* attr = GetAttrHdl(attr_name);
2068
+ float f = {};
2069
+ size_t out = {};
2070
+ // first call to get the bytes needed
2071
+ auto status = ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_FLOATS, &f, sizeof(f), &out);
2072
+ if (status) {
2073
+ size_t num_f = out / sizeof(float);
2074
+ ShapeInferContext::Floats floats(num_f, 0);
2075
+ Ort::ThrowOnError(ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_FLOATS, floats.data(), out, &out));
2076
+ return floats;
2077
+ } else {
2078
+ return {f};
2079
+ }
2080
+ }
2081
+
2082
+ inline std::string ShapeInferContext::GetAttrString(const char* attr_name) {
2083
+ const auto* attr = GetAttrHdl(attr_name);
2084
+ char c = {};
2085
+ size_t out = {};
2086
+ // first call to get the bytes needed
2087
+ auto status = ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_STRING, &c, sizeof(char), &out);
2088
+ if (status) {
2089
+ std::vector<char> chars(out, '\0');
2090
+ Ort::ThrowOnError(ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_STRING, chars.data(), out, &out));
2091
+ return {chars.data()};
2092
+ } else {
2093
+ return {c};
2094
+ }
2095
+ }
2096
+
2097
+ inline ShapeInferContext::Strings ShapeInferContext::GetAttrStrings(const char* attr_name) {
2098
+ const auto* attr = GetAttrHdl(attr_name);
2099
+ char c = {};
2100
+ size_t out = {};
2101
+ // first call to get the bytes needed
2102
+ auto status = ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_STRINGS, &c, sizeof(char), &out);
2103
+ if (status) {
2104
+ std::vector<char> chars(out, '\0');
2105
+ Ort::ThrowOnError(ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_STRINGS, chars.data(), out, &out));
2106
+ ShapeInferContext::Strings strings;
2107
+ char* char_st = chars.data();
2108
+ char* char_ed = char_st + out;
2109
+ while (char_st < char_ed) {
2110
+ strings.emplace_back(char_st);
2111
+ while (*char_st != '\0') {
2112
+ char_st++;
2113
+ }
2114
+ char_st++;
2115
+ }
2116
+ return strings;
2117
+ } else {
2118
+ return {std::string{c}};
2119
+ }
2120
+ }
2121
+
2122
+ inline const OrtOpAttr* ShapeInferContext::GetAttrHdl(const char* attr_name) const {
2123
+ const OrtOpAttr* attr_hdl = {};
2124
+ Ort::ThrowOnError(ort_api_->ShapeInferContext_GetAttribute(ctx_, attr_name, &attr_hdl));
2125
+ return attr_hdl;
2126
+ }
2127
+
2128
+ } // namespace Ort
1.19.0/onnxruntime.xcframework/Headers/onnxruntime_float16.h ADDED
@@ -0,0 +1,540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ #pragma once
5
+
6
+ #include <stdint.h>
7
+ #include <cmath>
8
+ #include <cstring>
9
+ #include <limits>
10
+
11
+ namespace onnxruntime_float16 {
12
+
13
+ namespace detail {
14
+
15
+ enum class endian {
16
+ #if defined(_WIN32)
17
+ little = 0,
18
+ big = 1,
19
+ native = little,
20
+ #elif defined(__GNUC__) || defined(__clang__)
21
+ little = __ORDER_LITTLE_ENDIAN__,
22
+ big = __ORDER_BIG_ENDIAN__,
23
+ native = __BYTE_ORDER__,
24
+ #else
25
+ #error onnxruntime_float16::detail::endian is not implemented in this environment.
26
+ #endif
27
+ };
28
+
29
+ static_assert(
30
+ endian::native == endian::little || endian::native == endian::big,
31
+ "Only little-endian or big-endian native byte orders are supported.");
32
+
33
+ } // namespace detail
34
+
35
+ /// <summary>
36
+ /// Shared implementation between public and internal classes. CRTP pattern.
37
+ /// </summary>
38
+ template <class Derived>
39
+ struct Float16Impl {
40
+ protected:
41
+ /// <summary>
42
+ /// Converts from float to uint16_t float16 representation
43
+ /// </summary>
44
+ /// <param name="v"></param>
45
+ /// <returns></returns>
46
+ constexpr static uint16_t ToUint16Impl(float v) noexcept;
47
+
48
+ /// <summary>
49
+ /// Converts float16 to float
50
+ /// </summary>
51
+ /// <returns>float representation of float16 value</returns>
52
+ float ToFloatImpl() const noexcept;
53
+
54
+ /// <summary>
55
+ /// Creates an instance that represents absolute value.
56
+ /// </summary>
57
+ /// <returns>Absolute value</returns>
58
+ uint16_t AbsImpl() const noexcept {
59
+ return static_cast<uint16_t>(val & ~kSignMask);
60
+ }
61
+
62
+ /// <summary>
63
+ /// Creates a new instance with the sign flipped.
64
+ /// </summary>
65
+ /// <returns>Flipped sign instance</returns>
66
+ uint16_t NegateImpl() const noexcept {
67
+ return IsNaN() ? val : static_cast<uint16_t>(val ^ kSignMask);
68
+ }
69
+
70
+ public:
71
+ // uint16_t special values
72
+ static constexpr uint16_t kSignMask = 0x8000U;
73
+ static constexpr uint16_t kBiasedExponentMask = 0x7C00U;
74
+ static constexpr uint16_t kPositiveInfinityBits = 0x7C00U;
75
+ static constexpr uint16_t kNegativeInfinityBits = 0xFC00U;
76
+ static constexpr uint16_t kPositiveQNaNBits = 0x7E00U;
77
+ static constexpr uint16_t kNegativeQNaNBits = 0xFE00U;
78
+ static constexpr uint16_t kEpsilonBits = 0x4170U;
79
+ static constexpr uint16_t kMinValueBits = 0xFBFFU; // Minimum normal number
80
+ static constexpr uint16_t kMaxValueBits = 0x7BFFU; // Largest normal number
81
+ static constexpr uint16_t kOneBits = 0x3C00U;
82
+ static constexpr uint16_t kMinusOneBits = 0xBC00U;
83
+
84
+ uint16_t val{0};
85
+
86
+ Float16Impl() = default;
87
+
88
+ /// <summary>
89
+ /// Checks if the value is negative
90
+ /// </summary>
91
+ /// <returns>true if negative</returns>
92
+ bool IsNegative() const noexcept {
93
+ return static_cast<int16_t>(val) < 0;
94
+ }
95
+
96
+ /// <summary>
97
+ /// Tests if the value is NaN
98
+ /// </summary>
99
+ /// <returns>true if NaN</returns>
100
+ bool IsNaN() const noexcept {
101
+ return AbsImpl() > kPositiveInfinityBits;
102
+ }
103
+
104
+ /// <summary>
105
+ /// Tests if the value is finite
106
+ /// </summary>
107
+ /// <returns>true if finite</returns>
108
+ bool IsFinite() const noexcept {
109
+ return AbsImpl() < kPositiveInfinityBits;
110
+ }
111
+
112
+ /// <summary>
113
+ /// Tests if the value represents positive infinity.
114
+ /// </summary>
115
+ /// <returns>true if positive infinity</returns>
116
+ bool IsPositiveInfinity() const noexcept {
117
+ return val == kPositiveInfinityBits;
118
+ }
119
+
120
+ /// <summary>
121
+ /// Tests if the value represents negative infinity
122
+ /// </summary>
123
+ /// <returns>true if negative infinity</returns>
124
+ bool IsNegativeInfinity() const noexcept {
125
+ return val == kNegativeInfinityBits;
126
+ }
127
+
128
+ /// <summary>
129
+ /// Tests if the value is either positive or negative infinity.
130
+ /// </summary>
131
+ /// <returns>True if absolute value is infinity</returns>
132
+ bool IsInfinity() const noexcept {
133
+ return AbsImpl() == kPositiveInfinityBits;
134
+ }
135
+
136
+ /// <summary>
137
+ /// Tests if the value is NaN or zero. Useful for comparisons.
138
+ /// </summary>
139
+ /// <returns>True if NaN or zero.</returns>
140
+ bool IsNaNOrZero() const noexcept {
141
+ auto abs = AbsImpl();
142
+ return (abs == 0 || abs > kPositiveInfinityBits);
143
+ }
144
+
145
+ /// <summary>
146
+ /// Tests if the value is normal (not zero, subnormal, infinite, or NaN).
147
+ /// </summary>
148
+ /// <returns>True if so</returns>
149
+ bool IsNormal() const noexcept {
150
+ auto abs = AbsImpl();
151
+ return (abs < kPositiveInfinityBits) // is finite
152
+ && (abs != 0) // is not zero
153
+ && ((abs & kBiasedExponentMask) != 0); // is not subnormal (has a non-zero exponent)
154
+ }
155
+
156
+ /// <summary>
157
+ /// Tests if the value is subnormal (denormal).
158
+ /// </summary>
159
+ /// <returns>True if so</returns>
160
+ bool IsSubnormal() const noexcept {
161
+ auto abs = AbsImpl();
162
+ return (abs < kPositiveInfinityBits) // is finite
163
+ && (abs != 0) // is not zero
164
+ && ((abs & kBiasedExponentMask) == 0); // is subnormal (has a zero exponent)
165
+ }
166
+
167
+ /// <summary>
168
+ /// Creates an instance that represents absolute value.
169
+ /// </summary>
170
+ /// <returns>Absolute value</returns>
171
+ Derived Abs() const noexcept { return Derived::FromBits(AbsImpl()); }
172
+
173
+ /// <summary>
174
+ /// Creates a new instance with the sign flipped.
175
+ /// </summary>
176
+ /// <returns>Flipped sign instance</returns>
177
+ Derived Negate() const noexcept { return Derived::FromBits(NegateImpl()); }
178
+
179
+ /// <summary>
180
+ /// IEEE defines that positive and negative zero are equal, this gives us a quick equality check
181
+ /// for two values by or'ing the private bits together and stripping the sign. They are both zero,
182
+ /// and therefore equivalent, if the resulting value is still zero.
183
+ /// </summary>
184
+ /// <param name="lhs">first value</param>
185
+ /// <param name="rhs">second value</param>
186
+ /// <returns>True if both arguments represent zero</returns>
187
+ static bool AreZero(const Float16Impl& lhs, const Float16Impl& rhs) noexcept {
188
+ return static_cast<uint16_t>((lhs.val | rhs.val) & ~kSignMask) == 0;
189
+ }
190
+
191
+ bool operator==(const Float16Impl& rhs) const noexcept {
192
+ if (IsNaN() || rhs.IsNaN()) {
193
+ // IEEE defines that NaN is not equal to anything, including itself.
194
+ return false;
195
+ }
196
+ return val == rhs.val;
197
+ }
198
+
199
+ bool operator!=(const Float16Impl& rhs) const noexcept { return !(*this == rhs); }
200
+
201
+ bool operator<(const Float16Impl& rhs) const noexcept {
202
+ if (IsNaN() || rhs.IsNaN()) {
203
+ // IEEE defines that NaN is unordered with respect to everything, including itself.
204
+ return false;
205
+ }
206
+
207
+ const bool left_is_negative = IsNegative();
208
+ if (left_is_negative != rhs.IsNegative()) {
209
+ // When the signs of left and right differ, we know that left is less than right if it is
210
+ // the negative value. The exception to this is if both values are zero, in which case IEEE
211
+ // says they should be equal, even if the signs differ.
212
+ return left_is_negative && !AreZero(*this, rhs);
213
+ }
214
+ return (val != rhs.val) && ((val < rhs.val) ^ left_is_negative);
215
+ }
216
+ };
217
+
218
+ // The following Float16_t conversions are based on the code from
219
+ // Eigen library.
220
+
221
+ // The conversion routines are Copyright (c) Fabian Giesen, 2016.
222
+ // The original license follows:
223
+ //
224
+ // Copyright (c) Fabian Giesen, 2016
225
+ // All rights reserved.
226
+ // Redistribution and use in source and binary forms, with or without
227
+ // modification, are permitted.
228
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
229
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
230
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
231
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
232
+ // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
233
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
234
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
235
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
236
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
237
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
238
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
239
+
240
+ namespace detail {
241
+ union float32_bits {
242
+ unsigned int u;
243
+ float f;
244
+ };
245
+ } // namespace detail
246
+
247
+ template <class Derived>
248
+ inline constexpr uint16_t Float16Impl<Derived>::ToUint16Impl(float v) noexcept {
249
+ detail::float32_bits f{};
250
+ f.f = v;
251
+
252
+ constexpr detail::float32_bits f32infty = {255 << 23};
253
+ constexpr detail::float32_bits f16max = {(127 + 16) << 23};
254
+ constexpr detail::float32_bits denorm_magic = {((127 - 15) + (23 - 10) + 1) << 23};
255
+ constexpr unsigned int sign_mask = 0x80000000u;
256
+ uint16_t val = static_cast<uint16_t>(0x0u);
257
+
258
+ unsigned int sign = f.u & sign_mask;
259
+ f.u ^= sign;
260
+
261
+ // NOTE all the integer compares in this function can be safely
262
+ // compiled into signed compares since all operands are below
263
+ // 0x80000000. Important if you want fast straight SSE2 code
264
+ // (since there's no unsigned PCMPGTD).
265
+
266
+ if (f.u >= f16max.u) { // result is Inf or NaN (all exponent bits set)
267
+ val = (f.u > f32infty.u) ? 0x7e00 : 0x7c00; // NaN->qNaN and Inf->Inf
268
+ } else { // (De)normalized number or zero
269
+ if (f.u < (113 << 23)) { // resulting FP16 is subnormal or zero
270
+ // use a magic value to align our 10 mantissa bits at the bottom of
271
+ // the float. as long as FP addition is round-to-nearest-even this
272
+ // just works.
273
+ f.f += denorm_magic.f;
274
+
275
+ // and one integer subtract of the bias later, we have our final float!
276
+ val = static_cast<uint16_t>(f.u - denorm_magic.u);
277
+ } else {
278
+ unsigned int mant_odd = (f.u >> 13) & 1; // resulting mantissa is odd
279
+
280
+ // update exponent, rounding bias part 1
281
+ // Equivalent to `f.u += ((unsigned int)(15 - 127) << 23) + 0xfff`, but
282
+ // without arithmetic overflow.
283
+ f.u += 0xc8000fffU;
284
+ // rounding bias part 2
285
+ f.u += mant_odd;
286
+ // take the bits!
287
+ val = static_cast<uint16_t>(f.u >> 13);
288
+ }
289
+ }
290
+
291
+ val |= static_cast<uint16_t>(sign >> 16);
292
+ return val;
293
+ }
294
+
295
+ template <class Derived>
296
+ inline float Float16Impl<Derived>::ToFloatImpl() const noexcept {
297
+ constexpr detail::float32_bits magic = {113 << 23};
298
+ constexpr unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift
299
+ detail::float32_bits o{};
300
+
301
+ o.u = (val & 0x7fff) << 13; // exponent/mantissa bits
302
+ unsigned int exp = shifted_exp & o.u; // just the exponent
303
+ o.u += (127 - 15) << 23; // exponent adjust
304
+
305
+ // handle exponent special cases
306
+ if (exp == shifted_exp) { // Inf/NaN?
307
+ o.u += (128 - 16) << 23; // extra exp adjust
308
+ } else if (exp == 0) { // Zero/Denormal?
309
+ o.u += 1 << 23; // extra exp adjust
310
+ o.f -= magic.f; // re-normalize
311
+ }
312
+
313
+ // Attempt to workaround the Internal Compiler Error on ARM64
314
+ // for bitwise | operator, including std::bitset
315
+ #if (defined _MSC_VER) && (defined _M_ARM || defined _M_ARM64 || defined _M_ARM64EC)
316
+ if (IsNegative()) {
317
+ return -o.f;
318
+ }
319
+ #else
320
+ // original code:
321
+ o.u |= (val & 0x8000U) << 16U; // sign bit
322
+ #endif
323
+ return o.f;
324
+ }
325
+
326
+ /// Shared implementation between public and internal classes. CRTP pattern.
327
+ template <class Derived>
328
+ struct BFloat16Impl {
329
+ protected:
330
+ /// <summary>
331
+ /// Converts from float to uint16_t float16 representation
332
+ /// </summary>
333
+ /// <param name="v"></param>
334
+ /// <returns></returns>
335
+ static uint16_t ToUint16Impl(float v) noexcept;
336
+
337
+ /// <summary>
338
+ /// Converts bfloat16 to float
339
+ /// </summary>
340
+ /// <returns>float representation of bfloat16 value</returns>
341
+ float ToFloatImpl() const noexcept;
342
+
343
+ /// <summary>
344
+ /// Creates an instance that represents absolute value.
345
+ /// </summary>
346
+ /// <returns>Absolute value</returns>
347
+ uint16_t AbsImpl() const noexcept {
348
+ return static_cast<uint16_t>(val & ~kSignMask);
349
+ }
350
+
351
+ /// <summary>
352
+ /// Creates a new instance with the sign flipped.
353
+ /// </summary>
354
+ /// <returns>Flipped sign instance</returns>
355
+ uint16_t NegateImpl() const noexcept {
356
+ return IsNaN() ? val : static_cast<uint16_t>(val ^ kSignMask);
357
+ }
358
+
359
+ public:
360
+ // uint16_t special values
361
+ static constexpr uint16_t kSignMask = 0x8000U;
362
+ static constexpr uint16_t kBiasedExponentMask = 0x7F80U;
363
+ static constexpr uint16_t kPositiveInfinityBits = 0x7F80U;
364
+ static constexpr uint16_t kNegativeInfinityBits = 0xFF80U;
365
+ static constexpr uint16_t kPositiveQNaNBits = 0x7FC1U;
366
+ static constexpr uint16_t kNegativeQNaNBits = 0xFFC1U;
367
+ static constexpr uint16_t kSignaling_NaNBits = 0x7F80U;
368
+ static constexpr uint16_t kEpsilonBits = 0x0080U;
369
+ static constexpr uint16_t kMinValueBits = 0xFF7FU;
370
+ static constexpr uint16_t kMaxValueBits = 0x7F7FU;
371
+ static constexpr uint16_t kRoundToNearest = 0x7FFFU;
372
+ static constexpr uint16_t kOneBits = 0x3F80U;
373
+ static constexpr uint16_t kMinusOneBits = 0xBF80U;
374
+
375
+ uint16_t val{0};
376
+
377
+ BFloat16Impl() = default;
378
+
379
+ /// <summary>
380
+ /// Checks if the value is negative
381
+ /// </summary>
382
+ /// <returns>true if negative</returns>
383
+ bool IsNegative() const noexcept {
384
+ return static_cast<int16_t>(val) < 0;
385
+ }
386
+
387
+ /// <summary>
388
+ /// Tests if the value is NaN
389
+ /// </summary>
390
+ /// <returns>true if NaN</returns>
391
+ bool IsNaN() const noexcept {
392
+ return AbsImpl() > kPositiveInfinityBits;
393
+ }
394
+
395
+ /// <summary>
396
+ /// Tests if the value is finite
397
+ /// </summary>
398
+ /// <returns>true if finite</returns>
399
+ bool IsFinite() const noexcept {
400
+ return AbsImpl() < kPositiveInfinityBits;
401
+ }
402
+
403
+ /// <summary>
404
+ /// Tests if the value represents positive infinity.
405
+ /// </summary>
406
+ /// <returns>true if positive infinity</returns>
407
+ bool IsPositiveInfinity() const noexcept {
408
+ return val == kPositiveInfinityBits;
409
+ }
410
+
411
+ /// <summary>
412
+ /// Tests if the value represents negative infinity
413
+ /// </summary>
414
+ /// <returns>true if negative infinity</returns>
415
+ bool IsNegativeInfinity() const noexcept {
416
+ return val == kNegativeInfinityBits;
417
+ }
418
+
419
+ /// <summary>
420
+ /// Tests if the value is either positive or negative infinity.
421
+ /// </summary>
422
+ /// <returns>True if absolute value is infinity</returns>
423
+ bool IsInfinity() const noexcept {
424
+ return AbsImpl() == kPositiveInfinityBits;
425
+ }
426
+
427
+ /// <summary>
428
+ /// Tests if the value is NaN or zero. Useful for comparisons.
429
+ /// </summary>
430
+ /// <returns>True if NaN or zero.</returns>
431
+ bool IsNaNOrZero() const noexcept {
432
+ auto abs = AbsImpl();
433
+ return (abs == 0 || abs > kPositiveInfinityBits);
434
+ }
435
+
436
+ /// <summary>
437
+ /// Tests if the value is normal (not zero, subnormal, infinite, or NaN).
438
+ /// </summary>
439
+ /// <returns>True if so</returns>
440
+ bool IsNormal() const noexcept {
441
+ auto abs = AbsImpl();
442
+ return (abs < kPositiveInfinityBits) // is finite
443
+ && (abs != 0) // is not zero
444
+ && ((abs & kBiasedExponentMask) != 0); // is not subnormal (has a non-zero exponent)
445
+ }
446
+
447
+ /// <summary>
448
+ /// Tests if the value is subnormal (denormal).
449
+ /// </summary>
450
+ /// <returns>True if so</returns>
451
+ bool IsSubnormal() const noexcept {
452
+ auto abs = AbsImpl();
453
+ return (abs < kPositiveInfinityBits) // is finite
454
+ && (abs != 0) // is not zero
455
+ && ((abs & kBiasedExponentMask) == 0); // is subnormal (has a zero exponent)
456
+ }
457
+
458
+ /// <summary>
459
+ /// Creates an instance that represents absolute value.
460
+ /// </summary>
461
+ /// <returns>Absolute value</returns>
462
+ Derived Abs() const noexcept { return Derived::FromBits(AbsImpl()); }
463
+
464
+ /// <summary>
465
+ /// Creates a new instance with the sign flipped.
466
+ /// </summary>
467
+ /// <returns>Flipped sign instance</returns>
468
+ Derived Negate() const noexcept { return Derived::FromBits(NegateImpl()); }
469
+
470
+ /// <summary>
471
+ /// IEEE defines that positive and negative zero are equal, this gives us a quick equality check
472
+ /// for two values by or'ing the private bits together and stripping the sign. They are both zero,
473
+ /// and therefore equivalent, if the resulting value is still zero.
474
+ /// </summary>
475
+ /// <param name="lhs">first value</param>
476
+ /// <param name="rhs">second value</param>
477
+ /// <returns>True if both arguments represent zero</returns>
478
+ static bool AreZero(const BFloat16Impl& lhs, const BFloat16Impl& rhs) noexcept {
479
+ // IEEE defines that positive and negative zero are equal, this gives us a quick equality check
480
+ // for two values by or'ing the private bits together and stripping the sign. They are both zero,
481
+ // and therefore equivalent, if the resulting value is still zero.
482
+ return static_cast<uint16_t>((lhs.val | rhs.val) & ~kSignMask) == 0;
483
+ }
484
+ };
485
+
486
+ template <class Derived>
487
+ inline uint16_t BFloat16Impl<Derived>::ToUint16Impl(float v) noexcept {
488
+ uint16_t result;
489
+ if (std::isnan(v)) {
490
+ result = kPositiveQNaNBits;
491
+ } else {
492
+ auto get_msb_half = [](float fl) {
493
+ uint16_t result;
494
+ #ifdef __cpp_if_constexpr
495
+ if constexpr (detail::endian::native == detail::endian::little) {
496
+ #else
497
+ if (detail::endian::native == detail::endian::little) {
498
+ #endif
499
+ std::memcpy(&result, reinterpret_cast<char*>(&fl) + sizeof(uint16_t), sizeof(uint16_t));
500
+ } else {
501
+ std::memcpy(&result, &fl, sizeof(uint16_t));
502
+ }
503
+ return result;
504
+ };
505
+
506
+ uint16_t upper_bits = get_msb_half(v);
507
+ union {
508
+ uint32_t U32;
509
+ float F32;
510
+ };
511
+ F32 = v;
512
+ U32 += (upper_bits & 1) + kRoundToNearest;
513
+ result = get_msb_half(F32);
514
+ }
515
+ return result;
516
+ }
517
+
518
+ template <class Derived>
519
+ inline float BFloat16Impl<Derived>::ToFloatImpl() const noexcept {
520
+ if (IsNaN()) {
521
+ return std::numeric_limits<float>::quiet_NaN();
522
+ }
523
+ float result;
524
+ char* const first = reinterpret_cast<char*>(&result);
525
+ char* const second = first + sizeof(uint16_t);
526
+ #ifdef __cpp_if_constexpr
527
+ if constexpr (detail::endian::native == detail::endian::little) {
528
+ #else
529
+ if (detail::endian::native == detail::endian::little) {
530
+ #endif
531
+ std::memset(first, 0, sizeof(uint16_t));
532
+ std::memcpy(second, &val, sizeof(uint16_t));
533
+ } else {
534
+ std::memcpy(first, &val, sizeof(uint16_t));
535
+ std::memset(second, 0, sizeof(uint16_t));
536
+ }
537
+ return result;
538
+ }
539
+
540
+ } // namespace onnxruntime_float16
1.19.0/onnxruntime.xcframework/Headers/onnxruntime_lite_custom_op.h ADDED
@@ -0,0 +1,1119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ // Summary
5
+ // The header has APIs to save custom op authors the trouble of defining schemas,
6
+ // which will be inferred by functions' signature, as long as their argument list has types supported here.
7
+ // Input could be:
8
+ // 1. Tensor of onnx data types.
9
+ // 2. Span of onnx data types.
10
+ // 3. Scalar of onnx data types.
11
+ // A input could be optional if indicated as std::optional<...>.
12
+ // For an output, it must be a tensor of onnx data types.
13
+ // Further, the header also has utility for a simple custom struct, where resources could be kept, to be registered as a custom op.
14
+ // For concrete examples, please search keyword "LiteCustomOpTest" under "<cloned_src_dir>/onnxruntime/test/".
15
+ // Note - all APIs in this header are ABI.
16
+
17
+ #pragma once
18
+ #include "onnxruntime_cxx_api.h"
19
+ #include <optional>
20
+ #include <numeric>
21
+ #include <functional>
22
+ #include <unordered_set>
23
+
24
+ namespace Ort {
25
+ namespace Custom {
26
+
27
+ class ArgBase {
28
+ public:
29
+ ArgBase(OrtKernelContext* ctx,
30
+ size_t indice,
31
+ bool is_input) : ctx_(ctx), indice_(indice), is_input_(is_input) {}
32
+ virtual ~ArgBase() {};
33
+
34
+ protected:
35
+ struct KernelContext ctx_;
36
+ size_t indice_;
37
+ bool is_input_;
38
+ };
39
+
40
+ using ArgPtr = std::unique_ptr<Custom::ArgBase>;
41
+ using ArgPtrs = std::vector<ArgPtr>;
42
+
43
+ class TensorBase : public ArgBase {
44
+ public:
45
+ TensorBase(OrtKernelContext* ctx,
46
+ size_t indice,
47
+ bool is_input) : ArgBase(ctx, indice, is_input) {}
48
+
49
+ operator bool() const {
50
+ return shape_.has_value();
51
+ }
52
+
53
+ const std::vector<int64_t>& Shape() const {
54
+ if (!shape_.has_value()) {
55
+ ORT_CXX_API_THROW("tensor shape is not yet initialized", OrtErrorCode::ORT_RUNTIME_EXCEPTION);
56
+ }
57
+ return shape_.value();
58
+ }
59
+
60
+ ONNXTensorElementDataType Type() const {
61
+ return type_;
62
+ }
63
+
64
+ int64_t NumberOfElement() const {
65
+ if (shape_.has_value()) {
66
+ return std::accumulate(shape_->begin(), shape_->end(), 1LL, std::multiplies<int64_t>());
67
+ } else {
68
+ return 0;
69
+ }
70
+ }
71
+
72
+ std::string Shape2Str() const {
73
+ if (shape_.has_value()) {
74
+ std::string shape_str;
75
+ for (const auto& dim : *shape_) {
76
+ shape_str.append(std::to_string(dim));
77
+ shape_str.append(", ");
78
+ }
79
+ return shape_str;
80
+ } else {
81
+ return "empty";
82
+ }
83
+ }
84
+
85
+ bool IsCpuTensor() const {
86
+ return strcmp("Cpu", mem_type_) == 0;
87
+ }
88
+
89
+ virtual const void* DataRaw() const = 0;
90
+ virtual size_t SizeInBytes() const = 0;
91
+
92
+ protected:
93
+ std::optional<std::vector<int64_t>> shape_;
94
+ ONNXTensorElementDataType type_ = ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED;
95
+ const char* mem_type_ = "Cpu";
96
+ };
97
+
98
+ template <typename T>
99
+ struct Span {
100
+ const T* data_ = {};
101
+ size_t size_ = {};
102
+ void Assign(const T* data, size_t size) {
103
+ data_ = data;
104
+ size_ = size;
105
+ }
106
+ size_t size() const { return size_; }
107
+ T operator[](size_t indice) const {
108
+ return data_[indice];
109
+ }
110
+ const T* data() const { return data_; }
111
+ };
112
+
113
+ template <typename T>
114
+ class Tensor : public TensorBase {
115
+ public:
116
+ using TT = typename std::remove_reference<T>::type;
117
+ Tensor(OrtKernelContext* ctx, size_t indice, bool is_input) : TensorBase(ctx, indice, is_input) {
118
+ if (is_input_) {
119
+ if (indice >= ctx_.GetInputCount()) {
120
+ ORT_CXX_API_THROW("invalid indice for Ort::Custom::Tensor", OrtErrorCode::ORT_INVALID_ARGUMENT);
121
+ }
122
+ const_value_ = ctx_.GetInput(indice);
123
+ auto type_shape_info = const_value_.GetTensorTypeAndShapeInfo();
124
+ shape_ = type_shape_info.GetShape();
125
+ }
126
+ }
127
+ const TT* Data() const {
128
+ return reinterpret_cast<const TT*>(const_value_.GetTensorRawData());
129
+ }
130
+ TT* Allocate(const std::vector<int64_t>& shape) {
131
+ shape_ = shape;
132
+ if (!data_) {
133
+ shape_ = shape;
134
+ data_ = ctx_.GetOutput(indice_, shape).template GetTensorMutableData<TT>();
135
+ }
136
+ return data_;
137
+ }
138
+ static TT GetT() { return (TT)0; }
139
+ const Span<T>& AsSpan() {
140
+ if (!shape_.has_value() || shape_->size() != 1) {
141
+ ORT_CXX_API_THROW("invalid shape while trying to get a span out of Ort::Custom::Tensor",
142
+ OrtErrorCode::ORT_RUNTIME_EXCEPTION);
143
+ }
144
+ span_.Assign(Data(), static_cast<size_t>((*shape_)[0]));
145
+ return span_;
146
+ }
147
+ const T& AsScalar() {
148
+ if (!shape_.has_value() || shape_->size() != 1 || (*shape_)[0] != 1) {
149
+ ORT_CXX_API_THROW("invalid shape while trying to get a scalar from Ort::Custom::Tensor",
150
+ OrtErrorCode::ORT_RUNTIME_EXCEPTION);
151
+ }
152
+ return *Data();
153
+ }
154
+ const void* DataRaw() const override {
155
+ return reinterpret_cast<const void*>(Data());
156
+ }
157
+
158
+ size_t SizeInBytes() const override {
159
+ return sizeof(TT) * static_cast<size_t>(NumberOfElement());
160
+ }
161
+
162
+ private:
163
+ ConstValue const_value_; // for input
164
+ TT* data_{}; // for output
165
+ Span<T> span_;
166
+ };
167
+
168
+ template <>
169
+ class Tensor<std::string> : public TensorBase {
170
+ public:
171
+ using strings = std::vector<std::string>;
172
+
173
+ Tensor(OrtKernelContext* ctx, size_t indice, bool is_input) : TensorBase(ctx, indice, is_input) {
174
+ if (is_input_) {
175
+ if (indice >= ctx_.GetInputCount()) {
176
+ ORT_CXX_API_THROW("invalid indice for Ort::Custom::Tensor", OrtErrorCode::ORT_INVALID_ARGUMENT);
177
+ }
178
+ auto const_value = ctx_.GetInput(indice);
179
+ auto type_shape_info = const_value.GetTensorTypeAndShapeInfo();
180
+ shape_ = type_shape_info.GetShape();
181
+ auto num_chars = const_value.GetStringTensorDataLength();
182
+ // note - there will be copy ...
183
+ auto num_strings = static_cast<size_t>(NumberOfElement());
184
+ if (num_strings) {
185
+ std::vector<char> chars(num_chars + 1, '\0');
186
+ std::vector<size_t> offsets(num_strings);
187
+ const_value.GetStringTensorContent(static_cast<void*>(chars.data()), num_chars, offsets.data(), offsets.size());
188
+ auto upper_bound = num_strings - 1;
189
+ input_strings_.resize(num_strings);
190
+ for (size_t i = upper_bound;; --i) {
191
+ if (i < upper_bound) {
192
+ chars[offsets[i + 1]] = '\0';
193
+ }
194
+ input_strings_[i] = chars.data() + offsets[i];
195
+ if (0 == i) {
196
+ break;
197
+ }
198
+ }
199
+ }
200
+ }
201
+ }
202
+ const strings& Data() const {
203
+ return input_strings_;
204
+ }
205
+ const void* DataRaw() const override {
206
+ if (input_strings_.size() != 1) {
207
+ ORT_CXX_API_THROW("DataRaw() only applies to string scalar", ORT_RUNTIME_EXCEPTION);
208
+ }
209
+ return reinterpret_cast<const void*>(input_strings_[0].c_str());
210
+ }
211
+ size_t SizeInBytes() const override {
212
+ if (input_strings_.size() != 1) {
213
+ ORT_CXX_API_THROW("SizeInBytes() only applies to string scalar", ORT_RUNTIME_EXCEPTION);
214
+ }
215
+ return input_strings_[0].size();
216
+ }
217
+ void SetStringOutput(const strings& ss, const std::vector<int64_t>& dims) {
218
+ shape_ = dims;
219
+ std::vector<const char*> raw;
220
+ for (const auto& s : ss) {
221
+ raw.push_back(s.data());
222
+ }
223
+ auto output = ctx_.GetOutput(indice_, dims.data(), dims.size());
224
+ // note - there will be copy ...
225
+ output.FillStringTensor(raw.data(), raw.size());
226
+ }
227
+ const Span<std::string>& AsSpan() {
228
+ ORT_CXX_API_THROW("span for TensorT of string not implemented", OrtErrorCode::ORT_RUNTIME_EXCEPTION);
229
+ }
230
+ const std::string& AsScalar() {
231
+ if (input_strings_.size() != 1) {
232
+ ORT_CXX_API_THROW("invalid shape while trying to get a scalar string from Ort::Custom::Tensor",
233
+ OrtErrorCode::ORT_RUNTIME_EXCEPTION);
234
+ }
235
+ return input_strings_[0];
236
+ }
237
+
238
+ private:
239
+ std::vector<std::string> input_strings_; // for input
240
+ };
241
+
242
+ template <>
243
+ class Tensor<std::string_view> : public TensorBase {
244
+ public:
245
+ using strings = std::vector<std::string>;
246
+ using string_views = std::vector<std::string_view>;
247
+
248
+ Tensor(OrtKernelContext* ctx, size_t indice, bool is_input) : TensorBase(ctx, indice, is_input) {
249
+ if (is_input_) {
250
+ if (indice >= ctx_.GetInputCount()) {
251
+ ORT_CXX_API_THROW("invalid indice for Ort::Custom::Tensor", OrtErrorCode::ORT_INVALID_ARGUMENT);
252
+ }
253
+ auto const_value = ctx_.GetInput(indice);
254
+ auto type_shape_info = const_value.GetTensorTypeAndShapeInfo();
255
+ shape_ = type_shape_info.GetShape();
256
+ auto num_chars = const_value.GetStringTensorDataLength();
257
+ chars_.resize(num_chars + 1, '\0');
258
+ auto num_strings = static_cast<size_t>(NumberOfElement());
259
+ if (num_strings) {
260
+ std::vector<size_t> offsets(num_strings);
261
+ const_value.GetStringTensorContent(static_cast<void*>(chars_.data()), num_chars, offsets.data(), offsets.size());
262
+ offsets.push_back(num_chars);
263
+ for (size_t i = 0; i < num_strings; ++i) {
264
+ input_string_views_.emplace_back(chars_.data() + offsets[i], offsets[i + 1] - offsets[i]);
265
+ }
266
+ }
267
+ }
268
+ }
269
+ const string_views& Data() const {
270
+ return input_string_views_;
271
+ }
272
+ const void* DataRaw() const override {
273
+ if (input_string_views_.size() != 1) {
274
+ ORT_CXX_API_THROW("DataRaw() only applies to string scalar", ORT_RUNTIME_EXCEPTION);
275
+ }
276
+ return reinterpret_cast<const void*>(input_string_views_[0].data());
277
+ }
278
+ size_t SizeInBytes() const override {
279
+ if (input_string_views_.size() != 1) {
280
+ ORT_CXX_API_THROW("SizeInBytes() only applies to string scalar", ORT_RUNTIME_EXCEPTION);
281
+ }
282
+ return input_string_views_[0].size();
283
+ }
284
+ void SetStringOutput(const strings& ss, const std::vector<int64_t>& dims) {
285
+ shape_ = dims;
286
+ std::vector<const char*> raw;
287
+ for (const auto& s : ss) {
288
+ raw.push_back(s.data());
289
+ }
290
+ auto output = ctx_.GetOutput(indice_, dims.data(), dims.size());
291
+ // note - there will be copy ...
292
+ output.FillStringTensor(raw.data(), raw.size());
293
+ }
294
+ const Span<std::string_view>& AsSpan() {
295
+ ORT_CXX_API_THROW("span for TensorT of string view not implemented", OrtErrorCode::ORT_RUNTIME_EXCEPTION);
296
+ }
297
+ std::string_view AsScalar() {
298
+ if (input_string_views_.size() != 1) {
299
+ ORT_CXX_API_THROW("invalid shape while trying to get a scalar string view from Ort::Custom::Tensor",
300
+ OrtErrorCode::ORT_RUNTIME_EXCEPTION);
301
+ }
302
+ return input_string_views_[0];
303
+ }
304
+
305
+ private:
306
+ std::vector<char> chars_; // for input
307
+ std::vector<std::string_view> input_string_views_; // for input
308
+ };
309
+
310
+ using TensorPtr = std::unique_ptr<Custom::TensorBase>;
311
+ using TensorPtrs = std::vector<TensorPtr>;
312
+
313
+ struct TensorArray : public ArgBase {
314
+ TensorArray(OrtKernelContext* ctx,
315
+ size_t start_indice,
316
+ bool is_input) : ArgBase(ctx,
317
+ start_indice,
318
+ is_input) {
319
+ if (is_input) {
320
+ auto input_count = ctx_.GetInputCount();
321
+ for (size_t ith_input = start_indice; ith_input < input_count; ++ith_input) {
322
+ auto const_value = ctx_.GetInput(start_indice);
323
+ auto type_shape_info = const_value.GetTensorTypeAndShapeInfo();
324
+ auto type = type_shape_info.GetElementType();
325
+ TensorPtr tensor;
326
+ switch (type) {
327
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL:
328
+ tensor = std::make_unique<Custom::Tensor<bool>>(ctx, ith_input, true);
329
+ break;
330
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:
331
+ tensor = std::make_unique<Custom::Tensor<float>>(ctx, ith_input, true);
332
+ break;
333
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE:
334
+ tensor = std::make_unique<Custom::Tensor<double>>(ctx, ith_input, true);
335
+ break;
336
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8:
337
+ tensor = std::make_unique<Custom::Tensor<uint8_t>>(ctx, ith_input, true);
338
+ break;
339
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8:
340
+ tensor = std::make_unique<Custom::Tensor<int8_t>>(ctx, ith_input, true);
341
+ break;
342
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16:
343
+ tensor = std::make_unique<Custom::Tensor<uint16_t>>(ctx, ith_input, true);
344
+ break;
345
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16:
346
+ tensor = std::make_unique<Custom::Tensor<int16_t>>(ctx, ith_input, true);
347
+ break;
348
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32:
349
+ tensor = std::make_unique<Custom::Tensor<uint32_t>>(ctx, ith_input, true);
350
+ break;
351
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32:
352
+ tensor = std::make_unique<Custom::Tensor<int32_t>>(ctx, ith_input, true);
353
+ break;
354
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64:
355
+ tensor = std::make_unique<Custom::Tensor<uint64_t>>(ctx, ith_input, true);
356
+ break;
357
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:
358
+ tensor = std::make_unique<Custom::Tensor<int64_t>>(ctx, ith_input, true);
359
+ break;
360
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING:
361
+ tensor = std::make_unique<Custom::Tensor<std::string>>(ctx, ith_input, true);
362
+ break;
363
+ default:
364
+ ORT_CXX_API_THROW("unknow input type", ORT_RUNTIME_EXCEPTION);
365
+ break;
366
+ }
367
+ tensors_.emplace_back(tensor.release());
368
+ } // for
369
+ }
370
+ }
371
+ template <typename T>
372
+ T* AllocateOutput(size_t ith_output, const std::vector<int64_t>& shape) {
373
+ // ith_output is the indice of output relative to the tensor array
374
+ // indice_ + ith_output is the indice relative to context
375
+ auto tensor = std::make_unique<Tensor<T>>(ctx_.GetOrtKernelContext(), indice_ + ith_output, false);
376
+ auto raw_output = tensor.get()->Allocate(shape);
377
+ tensors_.emplace_back(tensor.release());
378
+ return raw_output;
379
+ }
380
+ Tensor<std::string>& AllocateStringTensor(size_t ith_output) {
381
+ // ith_output is the indice of output relative to the tensor array
382
+ // indice_ + ith_output is the indice relative to context
383
+ auto tensor = std::make_unique<Tensor<std::string>>(ctx_.GetOrtKernelContext(), indice_ + ith_output, false);
384
+ Tensor<std::string>& output = *tensor;
385
+ tensors_.emplace_back(tensor.release());
386
+ return output;
387
+ }
388
+ size_t Size() const {
389
+ return tensors_.size();
390
+ }
391
+ const TensorPtr& operator[](size_t ith_input) const {
392
+ // ith_input is the indice of output relative to the tensor array
393
+ return tensors_.at(ith_input);
394
+ }
395
+
396
+ private:
397
+ TensorPtrs tensors_;
398
+ };
399
+
400
+ using Variadic = TensorArray;
401
+
402
+ /*
403
+ Note:
404
+ OrtLiteCustomOp inherits from OrtCustomOp to bridge tween a custom func/struct and ort core.
405
+ The lifetime of an OrtLiteCustomOp instance is managed by customer code, not ort, so:
406
+ 1. DO NOT cast OrtLiteCustomOp to OrtCustomOp and release since there is no virtual destructor in the hierarchy.
407
+ 2. OrtLiteCustomFunc and OrtLiteCustomStruct, as two sub-structs, can be released in form of OrtLiteCustomOp since all members are kept in the OrtLiteCustomOp,
408
+ hence memory could still be recycled properly.
409
+ Further, OrtCustomOp is a c struct bearing no v-table, so offspring structs are by design to be of zero virtual functions to maintain cast safety.
410
+ */
411
+ struct OrtLiteCustomOp : public OrtCustomOp {
412
+ using ConstOptionalFloatTensor = std::optional<const Custom::Tensor<float>&>;
413
+ using OptionalFloatTensor = std::optional<Custom::Tensor<float>>;
414
+
415
+ // CreateTuple
416
+ template <size_t ith_input, size_t ith_output, typename... Ts>
417
+ static typename std::enable_if<sizeof...(Ts) == 0, std::tuple<>>::type
418
+ CreateTuple(OrtKernelContext*, ArgPtrs&, size_t, size_t, const std::string&) {
419
+ return std::make_tuple();
420
+ }
421
+
422
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts>
423
+ static typename std::enable_if<std::is_same<T, OrtKernelContext*>::value, std::tuple<T, Ts...>>::type
424
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) {
425
+ std::tuple<T> current = std::tuple<OrtKernelContext*>{context};
426
+ auto next = CreateTuple<ith_input, ith_output, Ts...>(context, args, num_input, num_output, ep);
427
+ return std::tuple_cat(current, next);
428
+ }
429
+
430
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts>
431
+ static typename std::enable_if<std::is_same<T, OrtKernelContext&>::value, std::tuple<T, Ts...>>::type
432
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) {
433
+ std::tuple<T> current = std::tuple<OrtKernelContext&>{*context};
434
+ auto next = CreateTuple<ith_input, ith_output, Ts...>(context, args, num_input, num_output, ep);
435
+ return std::tuple_cat(current, next);
436
+ }
437
+
438
+ #ifdef ORT_CUDA_CTX
439
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts>
440
+ static typename std::enable_if<std::is_same<T, const CudaContext&>::value, std::tuple<T, Ts...>>::type
441
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) {
442
+ thread_local CudaContext cuda_context;
443
+ cuda_context.Init(*context);
444
+ std::tuple<T> current = std::tuple<const CudaContext&>{cuda_context};
445
+ auto next = CreateTuple<ith_input, ith_output, Ts...>(context, args, num_input, num_output, ep);
446
+ return std::tuple_cat(current, next);
447
+ }
448
+ #endif
449
+
450
+ #ifdef ORT_ROCM_CTX
451
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts>
452
+ static typename std::enable_if<std::is_same<T, const RocmContext&>::value, std::tuple<T, Ts...>>::type
453
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) {
454
+ thread_local RocmContext rocm_context;
455
+ rocm_context.Init(*context);
456
+ std::tuple<T> current = std::tuple<const RocmContext&>{rocm_context};
457
+ auto next = CreateTuple<ith_input, ith_output, Ts...>(context, args, num_input, num_output, ep);
458
+ return std::tuple_cat(current, next);
459
+ }
460
+ #endif
461
+
462
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts>
463
+ static typename std::enable_if<std::is_same<T, const TensorArray*>::value, std::tuple<T, Ts...>>::type
464
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) {
465
+ args.push_back(std::make_unique<TensorArray>(context, ith_input, true));
466
+ std::tuple<T> current = std::tuple<T>{reinterpret_cast<T>(args.back().get())};
467
+ auto next = CreateTuple<ith_input + 1, ith_output, Ts...>(context, args, num_input, num_output, ep);
468
+ return std::tuple_cat(current, next);
469
+ }
470
+
471
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts>
472
+ static typename std::enable_if<std::is_same<T, const TensorArray&>::value, std::tuple<T, Ts...>>::type
473
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) {
474
+ args.push_back(std::make_unique<TensorArray>(context, ith_input, true));
475
+ std::tuple<T> current = std::tuple<T>{reinterpret_cast<T>(*args.back().get())};
476
+ auto next = CreateTuple<ith_input + 1, ith_output, Ts...>(context, args, num_input, num_output, ep);
477
+ return std::tuple_cat(current, next);
478
+ }
479
+
480
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts>
481
+ static typename std::enable_if<std::is_same<T, TensorArray*>::value, std::tuple<T, Ts...>>::type
482
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) {
483
+ args.push_back(std::make_unique<TensorArray>(context, ith_output, false));
484
+ std::tuple<T> current = std::tuple<T>{reinterpret_cast<T>(args.back().get())};
485
+ auto next = CreateTuple<ith_input, ith_output + 1, Ts...>(context, args, num_input, num_output, ep);
486
+ return std::tuple_cat(current, next);
487
+ }
488
+
489
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts>
490
+ static typename std::enable_if<std::is_same<T, TensorArray&>::value, std::tuple<T, Ts...>>::type
491
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) {
492
+ args.push_back(std::make_unique<TensorArray>(context, ith_output, false));
493
+ std::tuple<T> current = std::tuple<T>{reinterpret_cast<T>(*args.back().get())};
494
+ auto next = CreateTuple<ith_input, ith_output + 1, Ts...>(context, args, num_input, num_output, ep);
495
+ return std::tuple_cat(current, next);
496
+ }
497
+
498
+ #define CREATE_TUPLE_INPUT(data_type) \
499
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
500
+ static typename std::enable_if<std::is_same<T, const Custom::Tensor<data_type>*>::value, std::tuple<T, Ts...>>::type \
501
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
502
+ args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_input, true)); \
503
+ std::tuple<T> current = std::tuple<T>{reinterpret_cast<T>(args.back().get())}; \
504
+ auto next = CreateTuple<ith_input + 1, ith_output, Ts...>(context, args, num_input, num_output, ep); \
505
+ return std::tuple_cat(current, next); \
506
+ } \
507
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
508
+ static typename std::enable_if<std::is_same<T, const Custom::Tensor<data_type>&>::value, std::tuple<T, Ts...>>::type \
509
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
510
+ args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_input, true)); \
511
+ std::tuple<T> current = std::tuple<T>{reinterpret_cast<T>(*args.back().get())}; \
512
+ auto next = CreateTuple<ith_input + 1, ith_output, Ts...>(context, args, num_input, num_output, ep); \
513
+ return std::tuple_cat(current, next); \
514
+ } \
515
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
516
+ static typename std::enable_if<std::is_same<T, std::optional<const Custom::Tensor<data_type>*>>::value, std::tuple<T, Ts...>>::type \
517
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
518
+ if (ith_input < num_input) { \
519
+ args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_input, true)); \
520
+ std::tuple<T> current = std::tuple<T>{reinterpret_cast<Custom::Tensor<data_type>*>(args.back().get())}; \
521
+ auto next = CreateTuple<ith_input + 1, ith_output, Ts...>(context, args, num_input, num_output, ep); \
522
+ return std::tuple_cat(current, next); \
523
+ } else { \
524
+ std::tuple<T> current = std::tuple<T>{}; \
525
+ auto next = CreateTuple<ith_input + 1, ith_output, Ts...>(context, args, num_input, num_output, ep); \
526
+ return std::tuple_cat(current, next); \
527
+ } \
528
+ } \
529
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
530
+ static typename std::enable_if<std::is_same<T, const Custom::Span<data_type>*>::value, std::tuple<T, Ts...>>::type \
531
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
532
+ if ("CPUExecutionProvider" != ep) { \
533
+ ORT_CXX_API_THROW("span input could only be applied to CPU EP", OrtErrorCode::ORT_RUNTIME_EXCEPTION); \
534
+ } \
535
+ args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_input, true)); \
536
+ std::tuple<T> current = std::tuple<T>{&reinterpret_cast<Custom::Tensor<data_type>*>(args.back().get())->AsSpan()}; \
537
+ auto next = CreateTuple<ith_input + 1, ith_output, Ts...>(context, args, num_input, num_output, ep); \
538
+ return std::tuple_cat(current, next); \
539
+ } \
540
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
541
+ static typename std::enable_if<std::is_same<T, const Custom::Span<data_type>&>::value, std::tuple<T, Ts...>>::type \
542
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
543
+ if ("CPUExecutionProvider" != ep) { \
544
+ ORT_CXX_API_THROW("span input could only be applied to CPU EP", OrtErrorCode::ORT_RUNTIME_EXCEPTION); \
545
+ } \
546
+ args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_input, true)); \
547
+ std::tuple<T> current = std::tuple<T>{reinterpret_cast<Custom::Tensor<data_type>*>(args.back().get())->AsSpan()}; \
548
+ auto next = CreateTuple<ith_input + 1, ith_output, Ts...>(context, args, num_input, num_output, ep); \
549
+ return std::tuple_cat(current, next); \
550
+ } \
551
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
552
+ static typename std::enable_if<std::is_same<T, std::optional<const Custom::Span<data_type>*>>::value, std::tuple<T, Ts...>>::type \
553
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
554
+ if (ith_input < num_input) { \
555
+ if ("CPUExecutionProvider" != ep) { \
556
+ ORT_CXX_API_THROW("span input could only be applied to CPU EP", OrtErrorCode::ORT_RUNTIME_EXCEPTION); \
557
+ } \
558
+ args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_input, true)); \
559
+ std::tuple<T> current = std::tuple<T>{&reinterpret_cast<Custom::Tensor<data_type>*>(args.back().get())->AsSpan()}; \
560
+ auto next = CreateTuple<ith_input + 1, ith_output, Ts...>(context, args, num_input, num_output, ep); \
561
+ return std::tuple_cat(current, next); \
562
+ } else { \
563
+ std::tuple<T> current = std::tuple<T>{}; \
564
+ auto next = CreateTuple<ith_input + 1, ith_output, Ts...>(context, args, num_input, num_output, ep); \
565
+ return std::tuple_cat(current, next); \
566
+ } \
567
+ } \
568
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
569
+ static typename std::enable_if<std::is_same<T, data_type>::value, std::tuple<T, Ts...>>::type \
570
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
571
+ if ("CPUExecutionProvider" != ep) { \
572
+ ORT_CXX_API_THROW("scalar input could only be applied to CPU EP", OrtErrorCode::ORT_RUNTIME_EXCEPTION); \
573
+ } \
574
+ args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_input, true)); \
575
+ std::tuple<T> current = std::tuple<T>{reinterpret_cast<Custom::Tensor<data_type>*>(args.back().get())->AsScalar()}; \
576
+ auto next = CreateTuple<ith_input + 1, ith_output, Ts...>(context, args, num_input, num_output, ep); \
577
+ return std::tuple_cat(current, next); \
578
+ } \
579
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
580
+ static typename std::enable_if<std::is_same<T, std::optional<data_type>>::value, std::tuple<T, Ts...>>::type \
581
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
582
+ if (ith_input < num_input) { \
583
+ if ("CPUExecutionProvider" != ep) { \
584
+ ORT_CXX_API_THROW("scalar input could only be applied to CPU EP", OrtErrorCode::ORT_RUNTIME_EXCEPTION); \
585
+ } \
586
+ args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_input, true)); \
587
+ std::tuple<T> current = std::tuple<T>{reinterpret_cast<Custom::Tensor<data_type>*>(args.back().get())->AsScalar()}; \
588
+ auto next = CreateTuple<ith_input + 1, ith_output, Ts...>(context, args, num_input, num_output, ep); \
589
+ return std::tuple_cat(current, next); \
590
+ } else { \
591
+ std::tuple<T> current = std::tuple<T>{}; \
592
+ auto next = CreateTuple<ith_input + 1, ith_output, Ts...>(context, args, num_input, num_output, ep); \
593
+ return std::tuple_cat(current, next); \
594
+ } \
595
+ }
596
+ #define CREATE_TUPLE_OUTPUT(data_type) \
597
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
598
+ static typename std::enable_if<std::is_same<T, Custom::Tensor<data_type>*>::value, std::tuple<T, Ts...>>::type \
599
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
600
+ args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_output, false)); \
601
+ std::tuple<T> current = std::tuple<T>{reinterpret_cast<T>(args.back().get())}; \
602
+ auto next = CreateTuple<ith_input, ith_output + 1, Ts...>(context, args, num_input, num_output, ep); \
603
+ return std::tuple_cat(current, next); \
604
+ } \
605
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
606
+ static typename std::enable_if<std::is_same<T, Custom::Tensor<data_type>&>::value, std::tuple<T, Ts...>>::type \
607
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
608
+ args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_output, false)); \
609
+ std::tuple<T> current = std::tuple<T>{reinterpret_cast<T>(*args.back().get())}; \
610
+ auto next = CreateTuple<ith_input, ith_output + 1, Ts...>(context, args, num_input, num_output, ep); \
611
+ return std::tuple_cat(current, next); \
612
+ } \
613
+ template <size_t ith_input, size_t ith_output, typename T, typename... Ts> \
614
+ static typename std::enable_if<std::is_same<T, std::optional<Custom::Tensor<data_type>*>>::value, std::tuple<T, Ts...>>::type \
615
+ CreateTuple(OrtKernelContext* context, ArgPtrs& args, size_t num_input, size_t num_output, const std::string& ep) { \
616
+ if (ith_output < num_output) { \
617
+ args.push_back(std::make_unique<Custom::Tensor<data_type>>(context, ith_output, false)); \
618
+ std::tuple<T> current = std::tuple<T>{reinterpret_cast<Custom::Tensor<data_type>*>(args.back().get())}; \
619
+ auto next = CreateTuple<ith_input, ith_output + 1, Ts...>(context, args, num_input, num_output, ep); \
620
+ return std::tuple_cat(current, next); \
621
+ } else { \
622
+ std::tuple<T> current = std::tuple<T>{}; \
623
+ auto next = CreateTuple<ith_input, ith_output + 1, Ts...>(context, args, num_input, num_output, ep); \
624
+ return std::tuple_cat(current, next); \
625
+ } \
626
+ }
627
+ #define CREATE_TUPLE(data_type) \
628
+ CREATE_TUPLE_INPUT(data_type) \
629
+ CREATE_TUPLE_OUTPUT(data_type)
630
+
631
+ CREATE_TUPLE(bool)
632
+ CREATE_TUPLE(float)
633
+ CREATE_TUPLE(Ort::Float16_t)
634
+ CREATE_TUPLE(Ort::BFloat16_t)
635
+ CREATE_TUPLE(double)
636
+ CREATE_TUPLE(int8_t)
637
+ CREATE_TUPLE(int16_t)
638
+ CREATE_TUPLE(int32_t)
639
+ CREATE_TUPLE(int64_t)
640
+ CREATE_TUPLE(uint8_t)
641
+ CREATE_TUPLE(uint16_t)
642
+ CREATE_TUPLE(uint32_t)
643
+ CREATE_TUPLE(uint64_t)
644
+ CREATE_TUPLE(std::string)
645
+ CREATE_TUPLE_INPUT(std::string_view)
646
+ CREATE_TUPLE(Ort::Float8E4M3FN_t)
647
+ CREATE_TUPLE(Ort::Float8E4M3FNUZ_t)
648
+ CREATE_TUPLE(Ort::Float8E5M2_t)
649
+ CREATE_TUPLE(Ort::Float8E5M2FNUZ_t)
650
+
651
+ // ParseArgs ...
652
+ template <typename... Ts>
653
+ static typename std::enable_if<0 == sizeof...(Ts)>::type
654
+ ParseArgs(std::vector<ONNXTensorElementDataType>&, std::vector<ONNXTensorElementDataType>&) {
655
+ }
656
+
657
+ template <typename T, typename... Ts>
658
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, OrtKernelContext*>::value>::type
659
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) {
660
+ ParseArgs<Ts...>(input_types, output_types);
661
+ }
662
+
663
+ template <typename T, typename... Ts>
664
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, OrtKernelContext&>::value>::type
665
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) {
666
+ ParseArgs<Ts...>(input_types, output_types);
667
+ }
668
+
669
+ #ifdef ORT_CUDA_CTX
670
+ template <typename T, typename... Ts>
671
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, const CudaContext&>::value>::type
672
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) {
673
+ ParseArgs<Ts...>(input_types, output_types);
674
+ }
675
+ #endif
676
+
677
+ #ifdef ORT_ROCM_CTX
678
+ template <typename T, typename... Ts>
679
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, const RocmContext&>::value>::type
680
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) {
681
+ ParseArgs<Ts...>(input_types, output_types);
682
+ }
683
+ #endif
684
+
685
+ template <typename T, typename... Ts>
686
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, const TensorArray&>::value>::type
687
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) {
688
+ input_types.push_back(ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED);
689
+ ParseArgs<Ts...>(input_types, output_types);
690
+ }
691
+
692
+ template <typename T, typename... Ts>
693
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, const TensorArray*>::value>::type
694
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) {
695
+ input_types.push_back(ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED);
696
+ ParseArgs<Ts...>(input_types, output_types);
697
+ }
698
+
699
+ template <typename T, typename... Ts>
700
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, TensorArray&>::value>::type
701
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) {
702
+ output_types.push_back(ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED);
703
+ ParseArgs<Ts...>(input_types, output_types);
704
+ }
705
+
706
+ template <typename T, typename... Ts>
707
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, TensorArray*>::value>::type
708
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) {
709
+ output_types.push_back(ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED);
710
+ ParseArgs<Ts...>(input_types, output_types);
711
+ }
712
+
713
+ #define PARSE_INPUT_BASE(pack_type, onnx_type) \
714
+ template <typename T, typename... Ts> \
715
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, pack_type>::value>::type \
716
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) { \
717
+ input_types.push_back(onnx_type); \
718
+ ParseArgs<Ts...>(input_types, output_types); \
719
+ } \
720
+ template <typename T, typename... Ts> \
721
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, const std::optional<pack_type>>::value>::type \
722
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) { \
723
+ input_types.push_back(onnx_type); \
724
+ ParseArgs<Ts...>(input_types, output_types); \
725
+ } \
726
+ template <typename T, typename... Ts> \
727
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, std::optional<pack_type>>::value>::type \
728
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) { \
729
+ input_types.push_back(onnx_type); \
730
+ ParseArgs<Ts...>(input_types, output_types); \
731
+ }
732
+
733
+ #define PARSE_INPUT(data_type, onnx_type) \
734
+ PARSE_INPUT_BASE(const Custom::Tensor<data_type>*, onnx_type) \
735
+ PARSE_INPUT_BASE(const Custom::Tensor<data_type>&, onnx_type) \
736
+ PARSE_INPUT_BASE(const Custom::Span<data_type>*, onnx_type) \
737
+ PARSE_INPUT_BASE(const Custom::Span<data_type>&, onnx_type) \
738
+ PARSE_INPUT_BASE(data_type, onnx_type)
739
+
740
+ #define PARSE_OUTPUT(data_type, onnx_type) \
741
+ template <typename T, typename... Ts> \
742
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, Custom::Tensor<data_type>*>::value>::type \
743
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) { \
744
+ output_types.push_back(onnx_type); \
745
+ ParseArgs<Ts...>(input_types, output_types); \
746
+ } \
747
+ template <typename T, typename... Ts> \
748
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, Custom::Tensor<data_type>&>::value>::type \
749
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) { \
750
+ output_types.push_back(onnx_type); \
751
+ ParseArgs<Ts...>(input_types, output_types); \
752
+ } \
753
+ template <typename T, typename... Ts> \
754
+ static typename std::enable_if<0 <= sizeof...(Ts) && std::is_same<T, std::optional<Custom::Tensor<data_type>*>>::value>::type \
755
+ ParseArgs(std::vector<ONNXTensorElementDataType>& input_types, std::vector<ONNXTensorElementDataType>& output_types) { \
756
+ output_types.push_back(onnx_type); \
757
+ ParseArgs<Ts...>(input_types, output_types); \
758
+ }
759
+
760
+ #define PARSE_ARGS(data_type, onnx_type) \
761
+ PARSE_INPUT(data_type, onnx_type) \
762
+ PARSE_OUTPUT(data_type, onnx_type)
763
+
764
+ PARSE_ARGS(bool, ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL)
765
+ PARSE_ARGS(float, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT)
766
+ PARSE_ARGS(Ort::Float16_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16)
767
+ PARSE_ARGS(Ort::BFloat16_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16)
768
+ PARSE_ARGS(double, ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE)
769
+ PARSE_ARGS(int8_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8)
770
+ PARSE_ARGS(int16_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16)
771
+ PARSE_ARGS(int32_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32)
772
+ PARSE_ARGS(int64_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64)
773
+ PARSE_ARGS(uint8_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8)
774
+ PARSE_ARGS(uint16_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16)
775
+ PARSE_ARGS(uint32_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32)
776
+ PARSE_ARGS(uint64_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64)
777
+ PARSE_ARGS(std::string, ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING)
778
+ PARSE_ARGS(std::string_view, ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING) // todo - remove string_view output
779
+ PARSE_ARGS(Ort::Float8E4M3FN_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FN)
780
+ PARSE_ARGS(Ort::Float8E4M3FNUZ_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FNUZ)
781
+ PARSE_ARGS(Ort::Float8E5M2_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2)
782
+ PARSE_ARGS(Ort::Float8E5M2FNUZ_t, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2FNUZ)
783
+
784
+ OrtLiteCustomOp(const char* op_name,
785
+ const char* execution_provider,
786
+ ShapeInferFn shape_infer_fn,
787
+ int start_ver = 1,
788
+ int end_ver = MAX_CUSTOM_OP_END_VER) : op_name_(op_name),
789
+ execution_provider_(execution_provider),
790
+ shape_infer_fn_(shape_infer_fn),
791
+ start_ver_(start_ver),
792
+ end_ver_(end_ver) {
793
+ OrtCustomOp::version = ORT_API_VERSION;
794
+
795
+ OrtCustomOp::GetName = [](const OrtCustomOp* op) { return static_cast<const OrtLiteCustomOp*>(op)->op_name_.c_str(); };
796
+ OrtCustomOp::GetExecutionProviderType = [](const OrtCustomOp* op) { return ((OrtLiteCustomOp*)op)->execution_provider_.c_str(); };
797
+ OrtCustomOp::GetInputMemoryType = [](const OrtCustomOp*, size_t) { return OrtMemTypeDefault; };
798
+
799
+ OrtCustomOp::GetInputTypeCount = [](const OrtCustomOp* op) {
800
+ auto self = reinterpret_cast<const OrtLiteCustomOp*>(op);
801
+ return self->input_types_.size();
802
+ };
803
+
804
+ OrtCustomOp::GetInputType = [](const OrtCustomOp* op, size_t indice) {
805
+ auto self = reinterpret_cast<const OrtLiteCustomOp*>(op);
806
+ return self->input_types_[indice];
807
+ };
808
+
809
+ OrtCustomOp::GetOutputTypeCount = [](const OrtCustomOp* op) {
810
+ auto self = reinterpret_cast<const OrtLiteCustomOp*>(op);
811
+ return self->output_types_.size();
812
+ };
813
+
814
+ OrtCustomOp::GetOutputType = [](const OrtCustomOp* op, size_t indice) {
815
+ auto self = reinterpret_cast<const OrtLiteCustomOp*>(op);
816
+ return self->output_types_[indice];
817
+ };
818
+
819
+ OrtCustomOp::GetInputCharacteristic = [](const OrtCustomOp* op, size_t indice) {
820
+ auto self = reinterpret_cast<const OrtLiteCustomOp*>(op);
821
+ return self->input_types_[indice] == ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED ? INPUT_OUTPUT_VARIADIC : INPUT_OUTPUT_OPTIONAL;
822
+ };
823
+
824
+ OrtCustomOp::GetOutputCharacteristic = [](const OrtCustomOp* op, size_t indice) {
825
+ auto self = reinterpret_cast<const OrtLiteCustomOp*>(op);
826
+ return self->output_types_[indice] == ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED ? INPUT_OUTPUT_VARIADIC : INPUT_OUTPUT_OPTIONAL;
827
+ };
828
+
829
+ OrtCustomOp::GetVariadicInputMinArity = [](const OrtCustomOp*) {
830
+ return 1;
831
+ };
832
+
833
+ OrtCustomOp::GetVariadicInputHomogeneity = [](const OrtCustomOp*) {
834
+ return 0;
835
+ };
836
+
837
+ OrtCustomOp::GetVariadicOutputMinArity = [](const OrtCustomOp*) {
838
+ return 1;
839
+ };
840
+
841
+ OrtCustomOp::GetVariadicOutputHomogeneity = [](const OrtCustomOp*) {
842
+ return 0;
843
+ };
844
+
845
+ OrtCustomOp::GetVariadicInputMinArity = [](const OrtCustomOp*) { return 0; };
846
+ OrtCustomOp::GetVariadicInputHomogeneity = [](const OrtCustomOp*) { return 0; };
847
+ OrtCustomOp::GetVariadicOutputMinArity = [](const OrtCustomOp*) { return 0; };
848
+ OrtCustomOp::GetVariadicOutputHomogeneity = [](const OrtCustomOp*) { return 0; };
849
+
850
+ OrtCustomOp::CreateKernelV2 = {};
851
+ OrtCustomOp::KernelComputeV2 = {};
852
+ OrtCustomOp::KernelCompute = {};
853
+
854
+ OrtCustomOp::InferOutputShapeFn = {};
855
+
856
+ OrtCustomOp::GetStartVersion = [](const OrtCustomOp* op) {
857
+ auto self = reinterpret_cast<const OrtLiteCustomOp*>(op);
858
+ return self->start_ver_;
859
+ };
860
+
861
+ OrtCustomOp::GetEndVersion = [](const OrtCustomOp* op) {
862
+ auto self = reinterpret_cast<const OrtLiteCustomOp*>(op);
863
+ return self->end_ver_;
864
+ };
865
+
866
+ OrtCustomOp::GetMayInplace = {};
867
+ OrtCustomOp::ReleaseMayInplace = {};
868
+ OrtCustomOp::GetAliasMap = {};
869
+ OrtCustomOp::ReleaseAliasMap = {};
870
+ }
871
+
872
+ const std::string op_name_;
873
+ const std::string execution_provider_;
874
+
875
+ std::vector<ONNXTensorElementDataType> input_types_;
876
+ std::vector<ONNXTensorElementDataType> output_types_;
877
+
878
+ ShapeInferFn shape_infer_fn_ = {};
879
+
880
+ int start_ver_ = 1;
881
+ int end_ver_ = MAX_CUSTOM_OP_END_VER;
882
+
883
+ void* compute_fn_ = {};
884
+ void* compute_fn_return_status_ = {};
885
+ };
886
+
887
+ //////////////////////////// OrtLiteCustomFunc ////////////////////////////////
888
+ // The struct is to implement function-as-op.
889
+ // E.g. a function might be defined as:
890
+ // void Filter(const Ort::Custom::Tensor<float>& floats_in, Ort::Custom::Tensor<float>& floats_out) { ... }
891
+ // It could be registered this way:
892
+ // Ort::CustomOpDomain v2_domain{"v2"};
893
+ // std::unique_ptr<OrtLiteCustomOp> fil_op_ptr{Ort::Custom::CreateLiteCustomOp("Filter", "CPUExecutionProvider", Filter)};
894
+ // v2_domain.Add(fil_op_ptr.get());
895
+ // session_options.Add(v2_domain);
896
+ // For the complete example, please search keyword "LiteCustomOpTest" under "<cloned_src_dir>/onnxruntime/test/".
897
+ template <typename... Args>
898
+ struct OrtLiteCustomFunc : public OrtLiteCustomOp {
899
+ using ComputeFn = void (*)(Args...);
900
+ using ComputeFnReturnStatus = Status (*)(Args...);
901
+ using MyType = OrtLiteCustomFunc<Args...>;
902
+
903
+ struct Kernel {
904
+ size_t num_input_{};
905
+ size_t num_output_{};
906
+ ComputeFn compute_fn_{};
907
+ ComputeFnReturnStatus compute_fn_return_status_{};
908
+ std::string ep_{};
909
+ };
910
+
911
+ OrtLiteCustomFunc(const char* op_name,
912
+ const char* execution_provider,
913
+ ComputeFn compute_fn,
914
+ ShapeInferFn shape_infer_fn = {},
915
+ int start_ver = 1,
916
+ int end_ver = MAX_CUSTOM_OP_END_VER) : OrtLiteCustomOp(op_name, execution_provider, shape_infer_fn, start_ver, end_ver) {
917
+ compute_fn_ = reinterpret_cast<void*>(compute_fn);
918
+ ParseArgs<Args...>(input_types_, output_types_);
919
+
920
+ OrtCustomOp::KernelCompute = [](void* op_kernel, OrtKernelContext* context) {
921
+ auto kernel = reinterpret_cast<Kernel*>(op_kernel);
922
+ std::vector<ArgPtr> args;
923
+ auto t = CreateTuple<0, 0, Args...>(context, args, kernel->num_input_, kernel->num_output_, kernel->ep_);
924
+ std::apply([kernel](Args const&... t_args) { kernel->compute_fn_(t_args...); }, t);
925
+ };
926
+
927
+ OrtCustomOp::CreateKernel = [](const OrtCustomOp* this_, const OrtApi* ort_api, const OrtKernelInfo* info) {
928
+ auto kernel = std::make_unique<Kernel>();
929
+ auto me = static_cast<const MyType*>(this_);
930
+ kernel->compute_fn_ = reinterpret_cast<ComputeFn>(me->compute_fn_);
931
+ Ort::ThrowOnError(ort_api->KernelInfo_GetInputCount(info, &kernel->num_input_));
932
+ Ort::ThrowOnError(ort_api->KernelInfo_GetOutputCount(info, &kernel->num_output_));
933
+ auto self = static_cast<const OrtLiteCustomFunc*>(this_);
934
+ kernel->ep_ = self->execution_provider_;
935
+ return reinterpret_cast<void*>(kernel.release());
936
+ };
937
+
938
+ OrtCustomOp::KernelDestroy = [](void* op_kernel) {
939
+ delete reinterpret_cast<Kernel*>(op_kernel);
940
+ };
941
+
942
+ if (shape_infer_fn_) {
943
+ OrtCustomOp::InferOutputShapeFn = [](const OrtCustomOp* op, OrtShapeInferContext* ort_ctx) -> OrtStatusPtr {
944
+ auto shape_info_fn = static_cast<const MyType*>(op)->shape_infer_fn_;
945
+ ShapeInferContext ctx(&GetApi(), ort_ctx);
946
+ return shape_info_fn(ctx);
947
+ };
948
+ }
949
+ }
950
+
951
+ OrtLiteCustomFunc(const char* op_name,
952
+ const char* execution_provider,
953
+ ComputeFnReturnStatus compute_fn_return_status,
954
+ ShapeInferFn shape_infer_fn = {},
955
+ int start_ver = 1,
956
+ int end_ver = MAX_CUSTOM_OP_END_VER) : OrtLiteCustomOp(op_name, execution_provider, shape_infer_fn, start_ver, end_ver) {
957
+ compute_fn_return_status_ = reinterpret_cast<void*>(compute_fn_return_status);
958
+ ParseArgs<Args...>(input_types_, output_types_);
959
+
960
+ OrtCustomOp::KernelComputeV2 = [](void* op_kernel, OrtKernelContext* context) -> OrtStatusPtr {
961
+ auto kernel = reinterpret_cast<Kernel*>(op_kernel);
962
+ std::vector<ArgPtr> args;
963
+ auto t = CreateTuple<0, 0, Args...>(context, args, kernel->num_input_, kernel->num_output_, kernel->ep_);
964
+ return std::apply([kernel](Args const&... t_args) { Status status = kernel->compute_fn_return_status_(t_args...); return status.release(); }, t);
965
+ };
966
+
967
+ OrtCustomOp::CreateKernel = [](const OrtCustomOp* this_, const OrtApi* ort_api, const OrtKernelInfo* info) {
968
+ auto kernel = std::make_unique<Kernel>();
969
+ auto me = static_cast<const MyType*>(this_);
970
+ kernel->compute_fn_return_status_ = reinterpret_cast<ComputeFnReturnStatus>(me->compute_fn_return_status_);
971
+ Ort::ThrowOnError(ort_api->KernelInfo_GetInputCount(info, &kernel->num_input_));
972
+ Ort::ThrowOnError(ort_api->KernelInfo_GetOutputCount(info, &kernel->num_output_));
973
+ auto self = static_cast<const OrtLiteCustomFunc*>(this_);
974
+ kernel->ep_ = self->execution_provider_;
975
+ return reinterpret_cast<void*>(kernel.release());
976
+ };
977
+
978
+ OrtCustomOp::KernelDestroy = [](void* op_kernel) {
979
+ delete reinterpret_cast<Kernel*>(op_kernel);
980
+ };
981
+
982
+ if (shape_infer_fn_) {
983
+ OrtCustomOp::InferOutputShapeFn = [](const OrtCustomOp* op, OrtShapeInferContext* ort_ctx) -> OrtStatusPtr {
984
+ auto shape_info_fn = static_cast<const MyType*>(op)->shape_infer_fn_;
985
+ ShapeInferContext ctx(&GetApi(), ort_ctx);
986
+ return shape_info_fn(ctx);
987
+ };
988
+ }
989
+ }
990
+ }; // struct OrtLiteCustomFunc
991
+
992
+ /////////////////////////// OrtLiteCustomStruct ///////////////////////////
993
+ // The struct is to implement struct-as-op.
994
+ // E.g. a struct might be defined as:
995
+ // struct Merge {
996
+ // Merge(const OrtApi* ort_api, const OrtKernelInfo* info) {...}
997
+ // void Compute(const Ort::Custom::Tensor<std::string_view>& strings_in,
998
+ // std::string_view string_in,
999
+ // Ort::Custom::Tensor<std::string>* strings_out) {...}
1000
+ // bool reverse_ = false;
1001
+ // };
1002
+ // It could be registered this way:
1003
+ // Ort::CustomOpDomain v2_domain{"v2"};
1004
+ // std::unique_ptr<OrtLiteCustomOp> mrg_op_ptr{Ort::Custom::CreateLiteCustomOp<Merge>("Merge", "CPUExecutionProvider")};
1005
+ // v2_domain.Add(mrg_op_ptr.get());
1006
+ // session_options.Add(v2_domain);
1007
+ // For the complete example, please search keyword "LiteCustomOpTest" under "<cloned_src_dir>/onnxruntime/test/".
1008
+ template <typename CustomOp>
1009
+ struct OrtLiteCustomStruct : public OrtLiteCustomOp {
1010
+ template <typename... Args>
1011
+ using CustomComputeFn = void (CustomOp::*)(Args...);
1012
+
1013
+ template <typename... Args>
1014
+ using CustomComputeFnReturnStatus = Status (CustomOp::*)(Args...);
1015
+
1016
+ using MyType = OrtLiteCustomStruct<CustomOp>;
1017
+
1018
+ struct Kernel {
1019
+ size_t num_input_{};
1020
+ size_t num_output_{};
1021
+ std::unique_ptr<CustomOp> custom_op_;
1022
+ std::string ep_{};
1023
+ };
1024
+
1025
+ OrtLiteCustomStruct(const char* op_name,
1026
+ const char* execution_provider,
1027
+ int start_ver = 1,
1028
+ int end_ver = MAX_CUSTOM_OP_END_VER) : OrtLiteCustomOp(op_name, execution_provider, {}, start_ver, end_ver) {
1029
+ SetCompute(&CustomOp::Compute);
1030
+
1031
+ OrtCustomOp::CreateKernel = [](const OrtCustomOp* this_, const OrtApi* ort_api, const OrtKernelInfo* info) {
1032
+ auto kernel = std::make_unique<Kernel>();
1033
+ Ort::ThrowOnError(ort_api->KernelInfo_GetInputCount(info, &kernel->num_input_));
1034
+ Ort::ThrowOnError(ort_api->KernelInfo_GetOutputCount(info, &kernel->num_output_));
1035
+ kernel->custom_op_ = std::make_unique<CustomOp>(ort_api, info);
1036
+ auto self = static_cast<const OrtLiteCustomStruct*>(this_);
1037
+ kernel->ep_ = self->execution_provider_;
1038
+ return reinterpret_cast<void*>(kernel.release());
1039
+ };
1040
+
1041
+ OrtCustomOp::KernelDestroy = [](void* op_kernel) {
1042
+ delete reinterpret_cast<Kernel*>(op_kernel);
1043
+ };
1044
+
1045
+ SetShapeInfer<CustomOp>(0);
1046
+ }
1047
+
1048
+ template <typename... Args>
1049
+ void SetCompute(CustomComputeFn<Args...>) {
1050
+ ParseArgs<Args...>(input_types_, output_types_);
1051
+ OrtCustomOp::KernelCompute = [](void* op_kernel, OrtKernelContext* context) {
1052
+ auto kernel = reinterpret_cast<Kernel*>(op_kernel);
1053
+ ArgPtrs args;
1054
+ auto t = CreateTuple<0, 0, Args...>(context, args, kernel->num_input_, kernel->num_output_, kernel->ep_);
1055
+ std::apply([kernel](Args const&... t_args) { kernel->custom_op_->Compute(t_args...); }, t);
1056
+ };
1057
+ }
1058
+
1059
+ template <typename... Args>
1060
+ void SetCompute(CustomComputeFnReturnStatus<Args...>) {
1061
+ ParseArgs<Args...>(input_types_, output_types_);
1062
+ OrtCustomOp::KernelComputeV2 = [](void* op_kernel, OrtKernelContext* context) -> OrtStatusPtr {
1063
+ auto kernel = reinterpret_cast<Kernel*>(op_kernel);
1064
+ ArgPtrs args;
1065
+ auto t = CreateTuple<0, 0, Args...>(context, args, kernel->num_input_, kernel->num_output_, kernel->ep_);
1066
+ return std::apply([kernel](Args const&... t_args) { Status status = kernel->custom_op_->Compute(t_args...); return status.release(); }, t);
1067
+ };
1068
+ }
1069
+
1070
+ template <typename C>
1071
+ decltype(&C::InferOutputShape) SetShapeInfer(decltype(&C::InferOutputShape)) {
1072
+ OrtCustomOp::InferOutputShapeFn = [](const OrtCustomOp*, OrtShapeInferContext* ort_ctx) -> OrtStatusPtr {
1073
+ ShapeInferContext ctx(&GetApi(), ort_ctx);
1074
+ return C::InferOutputShape(ctx);
1075
+ };
1076
+ return {};
1077
+ }
1078
+
1079
+ template <typename C>
1080
+ void SetShapeInfer(...) {
1081
+ OrtCustomOp::InferOutputShapeFn = {};
1082
+ }
1083
+ }; // struct OrtLiteCustomStruct
1084
+
1085
+ /////////////////////////// CreateLiteCustomOp ////////////////////////////
1086
+
1087
+ template <typename... Args>
1088
+ OrtLiteCustomOp* CreateLiteCustomOp(const char* op_name,
1089
+ const char* execution_provider,
1090
+ void (*custom_compute_fn)(Args...),
1091
+ Status (*shape_infer_fn)(ShapeInferContext&) = {},
1092
+ int start_ver = 1,
1093
+ int end_ver = MAX_CUSTOM_OP_END_VER) {
1094
+ using LiteOp = OrtLiteCustomFunc<Args...>;
1095
+ return std::make_unique<LiteOp>(op_name, execution_provider, custom_compute_fn, shape_infer_fn, start_ver, end_ver).release();
1096
+ }
1097
+
1098
+ template <typename... Args>
1099
+ OrtLiteCustomOp* CreateLiteCustomOp(const char* op_name,
1100
+ const char* execution_provider,
1101
+ Status (*custom_compute_fn_v2)(Args...),
1102
+ Status (*shape_infer_fn)(ShapeInferContext&) = {},
1103
+ int start_ver = 1,
1104
+ int end_ver = MAX_CUSTOM_OP_END_VER) {
1105
+ using LiteOp = OrtLiteCustomFunc<Args...>;
1106
+ return std::make_unique<LiteOp>(op_name, execution_provider, custom_compute_fn_v2, shape_infer_fn, start_ver, end_ver).release();
1107
+ }
1108
+
1109
+ template <typename CustomOp>
1110
+ OrtLiteCustomOp* CreateLiteCustomOp(const char* op_name,
1111
+ const char* execution_provider,
1112
+ int start_ver = 1,
1113
+ int end_ver = MAX_CUSTOM_OP_END_VER) {
1114
+ using LiteOp = OrtLiteCustomStruct<CustomOp>;
1115
+ return std::make_unique<LiteOp>(op_name, execution_provider, start_ver, end_ver).release();
1116
+ }
1117
+
1118
+ } // namespace Custom
1119
+ } // namespace Ort
1.19.0/onnxruntime.xcframework/Headers/onnxruntime_run_options_config_keys.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ #pragma once
5
+
6
+ /*
7
+ * This file defines RunOptions Config Keys and format of the Config Values.
8
+ *
9
+ * The Naming Convention for a RunOptions Config Key,
10
+ * "[Area][.[SubArea1].[SubArea2]...].[Keyname]"
11
+ * Such as "ep.cuda.use_arena"
12
+ * The Config Key cannot be empty
13
+ * The maximum length of the Config Key is 128
14
+ *
15
+ * The string format of a RunOptions Config Value is defined individually for each Config.
16
+ * The maximum length of the Config Value is 1024
17
+ */
18
+
19
+ // Key for enabling shrinkages of user listed device memory arenas.
20
+ // Expects a list of semi-colon separated key value pairs separated by colon in the following format:
21
+ // "device_0:device_id_0;device_1:device_id_1"
22
+ // No white-spaces allowed in the provided list string.
23
+ // Currently, the only supported devices are : "cpu", "gpu" (case sensitive).
24
+ // If "cpu" is included in the list, DisableCpuMemArena() API must not be called (i.e.) arena for cpu should be enabled.
25
+ // Example usage: "cpu:0;gpu:0" (or) "gpu:0"
26
+ // By default, the value for this key is empty (i.e.) no memory arenas are shrunk
27
+ static const char* const kOrtRunOptionsConfigEnableMemoryArenaShrinkage = "memory.enable_memory_arena_shrinkage";
28
+
29
+ // Set to '1' to not synchronize execution providers with CPU at the end of session run.
30
+ // Per default it will be set to '0'
31
+ // Taking CUDA EP as an example, it omit triggering cudaStreamSynchronize on the compute stream.
32
+ static const char* const kOrtRunOptionsConfigDisableSynchronizeExecutionProviders = "disable_synchronize_execution_providers";
33
+
34
+ // Set HTP performance mode for QNN HTP backend before session run.
35
+ // options for HTP performance mode: "burst", "balanced", "default", "high_performance",
36
+ // "high_power_saver", "low_balanced", "extreme_power_saver", "low_power_saver", "power_saver",
37
+ // "sustained_high_performance". Default to "default".
38
+ static const char* const kOrtRunOptionsConfigQnnPerfMode = "qnn.htp_perf_mode";
39
+
40
+ // Set HTP performance mode for QNN HTP backend post session run.
41
+ static const char* const kOrtRunOptionsConfigQnnPerfModePostRun = "qnn.htp_perf_mode_post_run";
42
+
43
+ // Set RPC control latency for QNN HTP backend
44
+ static const char* const kOrtRunOptionsConfigQnnRpcControlLatency = "qnn.rpc_control_latency";
45
+
46
+ // Set graph annotation id for CUDA EP. Use with enable_cuda_graph=true.
47
+ // The value should be an integer. If the value is not set, the default value is 0 and
48
+ // ORT session only captures one cuda graph before another capture is requested.
49
+ // If the value is set to -1, cuda graph capture/replay is disabled in that run.
50
+ // User are not expected to set the value to 0 as it is reserved for internal use.
51
+ static const char* const kOrtRunOptionsConfigCudaGraphAnnotation = "gpu_graph_id";
1.19.0/onnxruntime.xcframework/Headers/onnxruntime_session_options_config_keys.h ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ #pragma once
5
+
6
+ /*
7
+ * This file defines SessionOptions Config Keys and format of the Config Values.
8
+ *
9
+ * The Naming Convention for a SessionOptions Config Key,
10
+ * "[Area][.[SubArea1].[SubArea2]...].[Keyname]"
11
+ * Such as "ep.cuda.use_arena"
12
+ * The Config Key cannot be empty
13
+ * The maximum length of the Config Key is 128
14
+ *
15
+ * The string format of a SessionOptions Config Value is defined individually for each Config.
16
+ * The maximum length of the Config Value is 1024
17
+ */
18
+
19
+ // Key for disable PrePacking,
20
+ // If the config value is set to "1" then the prepacking is disabled, otherwise prepacking is enabled (default value)
21
+ static const char* const kOrtSessionOptionsConfigDisablePrepacking = "session.disable_prepacking";
22
+
23
+ // A value of "1" means allocators registered in the env will be used. "0" means the allocators created in the session
24
+ // will be used. Use this to override the usage of env allocators on a per session level.
25
+ static const char* const kOrtSessionOptionsConfigUseEnvAllocators = "session.use_env_allocators";
26
+
27
+ // Set to 'ORT' (case sensitive) to load an ORT format model.
28
+ // If unset, model type will default to ONNX unless inferred from filename ('.ort' == ORT format) or bytes to be ORT
29
+ static const char* const kOrtSessionOptionsConfigLoadModelFormat = "session.load_model_format";
30
+
31
+ // Set to 'ORT' (case sensitive) to save optimized model in ORT format when SessionOptions.optimized_model_path is set.
32
+ // If unset, format will default to ONNX unless optimized_model_filepath ends in '.ort'.
33
+ static const char* const kOrtSessionOptionsConfigSaveModelFormat = "session.save_model_format";
34
+
35
+ // If a value is "1", flush-to-zero and denormal-as-zero are applied. The default is "0".
36
+ // When multiple sessions are created, a main thread doesn't override changes from succeeding session options,
37
+ // but threads in session thread pools follow option changes.
38
+ // When ORT runs with OpenMP, the same rule is applied, i.e. the first session option to flush-to-zero and
39
+ // denormal-as-zero is only applied to global OpenMP thread pool, which doesn't support per-session thread pool.
40
+ // Note that an alternative way not using this option at runtime is to train and export a model without denormals
41
+ // and that's recommended because turning this option on may hurt model accuracy.
42
+ static const char* const kOrtSessionOptionsConfigSetDenormalAsZero = "session.set_denormal_as_zero";
43
+
44
+ // It controls to run quantization model in QDQ (QuantizelinearDeQuantizelinear) format or not.
45
+ // "0": enable. ORT does fusion logic for QDQ format.
46
+ // "1": disable. ORT doesn't do fusion logic for QDQ format.
47
+ // Its default value is "0" unless the DirectML execution provider is registered, in which case it defaults to "1".
48
+ static const char* const kOrtSessionOptionsDisableQuantQDQ = "session.disable_quant_qdq";
49
+
50
+ // It controls whether to enable Double QDQ remover and Identical Children Consolidation
51
+ // "0": not to disable. ORT does remove the middle 2 Nodes from a Q->(QD->Q)->QD pairs
52
+ // "1": disable. ORT doesn't remove the middle 2 Nodes from a Q->(QD->Q)->QD pairs
53
+ // Its default value is "0"
54
+ static const char* const kOrtSessionOptionsDisableDoubleQDQRemover = "session.disable_double_qdq_remover";
55
+
56
+ // If set to "1", enables the removal of QuantizeLinear/DequantizeLinear node pairs once all QDQ handling has been
57
+ // completed. e.g. If after all QDQ handling has completed and we have -> FloatOp -> Q -> DQ -> FloatOp -> the
58
+ // Q -> DQ could potentially be removed. This will provide a performance benefit by avoiding going from float to
59
+ // 8-bit and back to float, but could impact accuracy. The impact on accuracy will be model specific and depend on
60
+ // other factors like whether the model was created using Quantization Aware Training or Post Training Quantization.
61
+ // As such, it's best to test to determine if enabling this works well for your scenario.
62
+ // The default value is "0"
63
+ // Available since version 1.11.
64
+ static const char* const kOrtSessionOptionsEnableQuantQDQCleanup = "session.enable_quant_qdq_cleanup";
65
+
66
+ // Enable or disable gelu approximation in graph optimization. "0": disable; "1": enable. The default is "0".
67
+ // GeluApproximation has side effects which may change the inference results. It is disabled by default due to this.
68
+ static const char* const kOrtSessionOptionsEnableGeluApproximation = "optimization.enable_gelu_approximation";
69
+
70
+ // This setting controls whether to enable AheadOfTime function inlining.
71
+ // AOT function inlining examines the graph and attempts to inline as many locally defined functions in the model
72
+ // as possible with the help of enabled execution providers.
73
+ // This can reduce the number of function calls and improve performance because it is done before
74
+ // Level1 optimizers and constant folding. However, under some circumstances, when the EPs are not available,
75
+ // one can disable the AOT inlining, produce an optimized model and postpone AOT until run time.
76
+ // "0": enable; "1": disable.
77
+ // Its default value is "0".
78
+ static const char* const kOrtSessionOptionsDisableAheadOfTimeFunctionInlining = "session.disable_aot_function_inlining";
79
+
80
+ #ifdef ENABLE_TRAINING
81
+ // Specifies a path of the file containing a list of memory optimization configurations.
82
+ // The value should be a string indicating the file path of the config file.
83
+ // The content of the config file is a JSON struct like this:
84
+ // [
85
+ // "Gelu+Cast+:1:0",
86
+ // "Dropout+:1:1"
87
+ // ]
88
+ // Taking the example of "Gelu+Cast+:1:0",
89
+ // > "Gelu+Cast+" is the subgraph string, a valid "subgraph string" should be one subgraph representation
90
+ // output by ORT graph transformations.
91
+ // > "1" is "optimization strategy", valid values: 0 - disabled, 1 - recompute.
92
+ // > "0" is "number of subgraph to apply" which is used to control how many subgraphs to apply optimization,
93
+ // to avoid "oversaving" the memory.
94
+ static const char* const kOrtSessionOptionsMemoryOptimizerApplyConfig = "optimization.memory_optimizer_config";
95
+
96
+ // Specifies the config for detecting subgraphs for memory footprint reduction.
97
+ // The value should be a string contains int separated using commas. The default value is "0:0".
98
+ static const char* const kOrtSessionOptionsMemoryOptimizerProbeConfig = "optimization.enable_memory_probe_recompute_config";
99
+ #endif
100
+
101
+ // This setting if set should contain a comma separated list of optimizers names that should be disabled.
102
+ // Optimizers may take time to execute and affect model loading time. If you feel that a specific optimizer
103
+ // does not provider runtime benefits, but affects your model loading time you may disable it using this config
104
+ // entry. This option is not enabled in ORT_MINIMAL_BUILD build.
105
+ // A list of optimizes is available in onnxruntime/core/optimizer/graph_transformer_utils.cc
106
+ //
107
+ // Default is an empty string which means no optimizers are disabled.
108
+ static const char* const kOrtSessionOptionsDisableSpecifiedOptimizers = "optimization.disable_specified_optimizers";
109
+
110
+ // Enable or disable using device allocator for allocating initialized tensor memory. "1": enable; "0": disable. The default is "0".
111
+ // Using device allocators means the memory allocation is made using malloc/new.
112
+ static const char* const kOrtSessionOptionsUseDeviceAllocatorForInitializers = "session.use_device_allocator_for_initializers";
113
+
114
+ // Configure whether to allow the inter_op/intra_op threads spinning a number of times before blocking
115
+ // "0": thread will block if found no job to run
116
+ // "1": default, thread will spin a number of times before blocking
117
+ static const char* const kOrtSessionOptionsConfigAllowInterOpSpinning = "session.inter_op.allow_spinning";
118
+ static const char* const kOrtSessionOptionsConfigAllowIntraOpSpinning = "session.intra_op.allow_spinning";
119
+
120
+ // Key for using model bytes directly for ORT format
121
+ // If a session is created using an input byte array contains the ORT format model data,
122
+ // By default we will copy the model bytes at the time of session creation to ensure the model bytes
123
+ // buffer is valid.
124
+ // Setting this option to "1" will disable copy the model bytes, and use the model bytes directly. The caller
125
+ // has to guarantee that the model bytes are valid until the ORT session using the model bytes is destroyed.
126
+ static const char* const kOrtSessionOptionsConfigUseORTModelBytesDirectly = "session.use_ort_model_bytes_directly";
127
+
128
+ /// <summary>
129
+ /// Key for using the ORT format model flatbuffer bytes directly for initializers.
130
+ /// This avoids copying the bytes and reduces peak memory usage during model loading and initialization.
131
+ /// Requires `session.use_ort_model_bytes_directly` to be true.
132
+ /// If set, the flatbuffer bytes provided when creating the InferenceSession MUST remain valid for the entire
133
+ /// duration of the InferenceSession.
134
+ /// </summary>
135
+ static const char* const kOrtSessionOptionsConfigUseORTModelBytesForInitializers =
136
+ "session.use_ort_model_bytes_for_initializers";
137
+
138
+ // This should only be specified when exporting an ORT format model for use on a different platform.
139
+ // If the ORT format model will be used on ARM platforms set to "1". For other platforms set to "0"
140
+ // Available since version 1.11.
141
+ static const char* const kOrtSessionOptionsQDQIsInt8Allowed = "session.qdqisint8allowed";
142
+
143
+ // x64 SSE4.1/AVX2/AVX512(with no VNNI) has overflow problem with quantizied matrix multiplication with U8S8.
144
+ // To avoid this we need to use slower U8U8 matrix multiplication instead. This option, if
145
+ // turned on, use slower U8U8 matrix multiplications. Only effective with AVX2 or AVX512
146
+ // platforms.
147
+ static const char* const kOrtSessionOptionsAvx2PrecisionMode = "session.x64quantprecision";
148
+
149
+ // Specifies how minimal build graph optimizations are handled in a full build.
150
+ // These optimizations are at the extended level or higher.
151
+ // Possible values and their effects are:
152
+ // "save": Save runtime optimizations when saving an ORT format model.
153
+ // "apply": Only apply optimizations available in a minimal build.
154
+ // ""/<unspecified>: Apply optimizations available in a full build.
155
+ // Available since version 1.11.
156
+ static const char* const kOrtSessionOptionsConfigMinimalBuildOptimizations =
157
+ "optimization.minimal_build_optimizations";
158
+
159
+ // Note: The options specific to an EP should be specified prior to appending that EP to the session options object in
160
+ // order for them to take effect.
161
+
162
+ // Specifies a list of stop op types. Nodes of a type in the stop op types and nodes downstream from them will not be
163
+ // run by the NNAPI EP.
164
+ // The value should be a ","-delimited list of op types. For example, "Add,Sub".
165
+ // If not specified, the default set of stop ops is used. To specify an empty stop ops types list and disable stop op
166
+ // exclusion, set the value to "".
167
+ static const char* const kOrtSessionOptionsConfigNnapiEpPartitioningStopOps = "ep.nnapi.partitioning_stop_ops";
168
+
169
+ // Enabling dynamic block-sizing for multithreading.
170
+ // With a positive value, thread pool will split a task of N iterations to blocks of size starting from:
171
+ // N / (num_of_threads * dynamic_block_base)
172
+ // As execution progresses, the size will decrease according to the diminishing residual of N,
173
+ // meaning the task will be distributed in smaller granularity for better parallelism.
174
+ // For some models, it helps to reduce the variance of E2E inference latency and boost performance.
175
+ // The feature will not function by default, specify any positive integer, e.g. "4", to enable it.
176
+ // Available since version 1.11.
177
+ static const char* const kOrtSessionOptionsConfigDynamicBlockBase = "session.dynamic_block_base";
178
+
179
+ // This option allows to decrease CPU usage between infrequent
180
+ // requests and forces any TP threads spinning stop immediately when the last of
181
+ // concurrent Run() call returns.
182
+ // Spinning is restarted on the next Run() call.
183
+ // Applies only to internal thread-pools
184
+ static const char* const kOrtSessionOptionsConfigForceSpinningStop = "session.force_spinning_stop";
185
+
186
+ // "1": all inconsistencies encountered during shape and type inference
187
+ // will result in failures.
188
+ // "0": in some cases warnings will be logged but processing will continue. The default.
189
+ // May be useful to expose bugs in models.
190
+ static const char* const kOrtSessionOptionsConfigStrictShapeTypeInference = "session.strict_shape_type_inference";
191
+
192
+ // "1": every model using a more recent opset than the latest released one will fail
193
+ // "0": the model may or may not work if onnxruntime cannot find an implementation, this option
194
+ // is used for development purpose.
195
+ static const char* const kOrtSessionOptionsConfigStrictAllowReleasedOpsetsOnly = "session.allow_released_opsets_only";
196
+
197
+ // The file saves configuration for partitioning node among logic streams
198
+ static const char* const kNodePartitionConfigFile = "session.node_partition_config_file";
199
+
200
+ // This Option allows setting affinities for intra op threads.
201
+ // Affinity string follows format:
202
+ // logical_processor_id,logical_processor_id;logical_processor_id,logical_processor_id
203
+ // Semicolon isolates configurations among threads, while comma split processors where ith thread expected to attach to.
204
+ // e.g.1,2,3;4,5
205
+ // specifies affinities for two threads, with the 1st thread attach to the 1st, 2nd, and 3rd processor, and 2nd thread to the 4th and 5th.
206
+ // To ease the configuration, an "interval" is also allowed:
207
+ // e.g. 1-8;8-16;17-24
208
+ // orders that the 1st thread runs on first eight processors, 2nd thread runs on next eight processors, and so forth.
209
+ // Note:
210
+ // 1. Once set, the number of thread affinities must equal to intra_op_num_threads - 1, since ort does not set affinity on the main thread which
211
+ // is started and managed by the calling app;
212
+ // 2. For windows, ort will infer the group id from a logical processor id, for example, assuming there are two groups with each has 64 logical processors,
213
+ // an id of 64 will be inferred as the last processor of the 1st group, while 65 will be interpreted as the 1st processor of the second group.
214
+ // Hence 64-65 is an invalid configuration, because a windows thread cannot be attached to processors across group boundary.
215
+ static const char* const kOrtSessionOptionsConfigIntraOpThreadAffinities = "session.intra_op_thread_affinities";
216
+
217
+ // This option will dump out the model to assist debugging any issues with layout transformation,
218
+ // and is primarily intended for developer usage. It is only relevant if an execution provider that requests
219
+ // NHWC layout is enabled such as NNAPI, XNNPACK or QNN.
220
+ //
221
+ // Default is off. Set to "1" to enable.
222
+ //
223
+ // If modified by layout transformation the model will be dumped after these steps:
224
+ // 1) insertion of the layout transformation Transpose nodes
225
+ // 2) after those are optimized using the transpose optimizer,
226
+ // 3) after the L1 transformers are applied to the updated graph.
227
+ // The model will be saved to filename post_layout_transform_step_<step_number>.onnx.
228
+ static const char* const kDebugLayoutTransformation = "session.debug_layout_transformation";
229
+
230
+ // Graph nodes that are not supported by the execution providers (EPs) explicitly added to the session are
231
+ // assigned (i.e., "fallback") to the CPU EP by default.
232
+ //
233
+ // This option allows the user to disable the fallback of unsupported graph nodes to the CPU EP.
234
+ // If this option is set to "1", session creation will fail if the execution providers other than the CPU EP cannot
235
+ // fully support all of the nodes in the graph.
236
+ //
237
+ // It is invalid to set this option and explicitly add the CPU EP to the session. In this case, session creation
238
+ // will also fail with an error.
239
+ //
240
+ // Option values:
241
+ // - "0": CPU EP fallback is not disabled. [DEFAULT]
242
+ // - "1": CPU EP fallback is disabled.
243
+ static const char* const kOrtSessionOptionsDisableCPUEPFallback = "session.disable_cpu_ep_fallback";
244
+
245
+ // Use this config when serializing a large model after optimization to specify an external initializers file
246
+ static const char* const kOrtSessionOptionsOptimizedModelExternalInitializersFileName =
247
+ "session.optimized_model_external_initializers_file_name";
248
+
249
+ // Use this config to control the minimum size of the initializer when externalizing it during serialization
250
+ static const char* const kOrtSessionOptionsOptimizedModelExternalInitializersMinSizeInBytes =
251
+ "session.optimized_model_external_initializers_min_size_in_bytes";
252
+
253
+ // Enable EP context feature to dump the partitioned graph which includes the EP context into Onnx file.
254
+ // The dumped Onnx model with EP context can be used for future inference to avoid the EP graph partitioning/compile overhead.
255
+ // "0": disable. (default)
256
+ // "1": enable.
257
+ static const char* const kOrtSessionOptionEpContextEnable = "ep.context_enable";
258
+
259
+ // Specify the file path for the Onnx model which has EP context.
260
+ // Default to original_file_name_ctx.onnx if not specified
261
+ static const char* const kOrtSessionOptionEpContextFilePath = "ep.context_file_path";
262
+
263
+ // Flag to specify whether to dump the EP context into the Onnx model.
264
+ // "0": dump the EP context into separate file, keep the file name in the Onnx model.
265
+ // "1": dump the EP context into the Onnx model. (default).
266
+ static const char* const kOrtSessionOptionEpContextEmbedMode = "ep.context_embed_mode";
267
+
268
+ // Specify the EPContext node name prefix to make it unique
269
+ // in case user need to merge/connect multiple EPContext nodes in one model
270
+ static const char* const kOrtSessionOptionEpContextNodeNamePrefix = "ep.context_node_name_prefix";
271
+
272
+ // Gemm fastmath mode provides fp32 gemm acceleration with bfloat16 based matmul.
273
+ // Option values:
274
+ // - "0": Gemm FastMath mode is not enabled. [DEFAULT]
275
+ // - "1": Gemm FastMath mode is enabled.
276
+ static const char* const kOrtSessionOptionsMlasGemmFastMathArm64Bfloat16 = "mlas.enable_gemm_fastmath_arm64_bfloat16";
277
+
278
+ // When converting DQ + MatMul -> MatMulNBits, the accuracy level of the MatMulNBits is controlled by this option.
279
+ // Refer to MatMulNBits op schema for more details.
280
+ // If not provided, default is 4.
281
+ static const char* const kOrtSessionOptionsQDQMatMulNBitsAccuracyLevel = "session.qdq_matmulnbits_accuracy_level";
1.19.0/onnxruntime.xcframework/Info.plist ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
3
+ <plist version="1.0">
4
+ <dict>
5
+ <key>AvailableLibraries</key>
6
+ <array>
7
+ <dict>
8
+ <key>BinaryPath</key>
9
+ <string>onnxruntime.a</string>
10
+ <key>LibraryIdentifier</key>
11
+ <string>ios-arm64</string>
12
+ <key>LibraryPath</key>
13
+ <string>onnxruntime.a</string>
14
+ <key>SupportedArchitectures</key>
15
+ <array>
16
+ <string>arm64</string>
17
+ </array>
18
+ <key>SupportedPlatform</key>
19
+ <string>ios</string>
20
+ </dict>
21
+ <dict>
22
+ <key>BinaryPath</key>
23
+ <string>onnxruntime.a</string>
24
+ <key>LibraryIdentifier</key>
25
+ <string>ios-arm64_x86_64-simulator</string>
26
+ <key>LibraryPath</key>
27
+ <string>onnxruntime.a</string>
28
+ <key>SupportedArchitectures</key>
29
+ <array>
30
+ <string>arm64</string>
31
+ <string>x86_64</string>
32
+ </array>
33
+ <key>SupportedPlatform</key>
34
+ <string>ios</string>
35
+ <key>SupportedPlatformVariant</key>
36
+ <string>simulator</string>
37
+ </dict>
38
+ <dict>
39
+ <key>BinaryPath</key>
40
+ <string>onnxruntime.a</string>
41
+ <key>LibraryIdentifier</key>
42
+ <string>macos-arm64_x86_64</string>
43
+ <key>LibraryPath</key>
44
+ <string>onnxruntime.a</string>
45
+ <key>SupportedArchitectures</key>
46
+ <array>
47
+ <string>arm64</string>
48
+ <string>x86_64</string>
49
+ </array>
50
+ <key>SupportedPlatform</key>
51
+ <string>macos</string>
52
+ </dict>
53
+ </array>
54
+ <key>CFBundlePackageType</key>
55
+ <string>XFWK</string>
56
+ <key>XCFrameworkFormatVersion</key>
57
+ <string>1.0</string>
58
+ </dict>
59
+ </plist>
1.19.0/onnxruntime.xcframework/ios-arm64/libonnxruntime.a ADDED
@@ -0,0 +1 @@
 
 
1
+ onnxruntime.a
1.19.0/onnxruntime.xcframework/ios-arm64/onnxruntime.a ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdd25c2bd667422cd65a887b01a7fa22f2ed2c8b8e77f3b18e1afdb8e51e4e05
3
+ size 70629456
1.19.0/onnxruntime.xcframework/ios-arm64_x86_64-simulator/libonnxruntime.a ADDED
@@ -0,0 +1 @@
 
 
1
+ onnxruntime.a
1.19.0/onnxruntime.xcframework/ios-arm64_x86_64-simulator/onnxruntime.a ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:000467707858217768a2812645d52023d8fa7b48d406314f5efbe5224c1379d7
3
+ size 144404664
1.19.0/onnxruntime.xcframework/macos-arm64_x86_64/libonnxruntime.a ADDED
@@ -0,0 +1 @@
 
 
1
+ onnxruntime.a
1.19.0/onnxruntime.xcframework/macos-arm64_x86_64/onnxruntime.a ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5fd73fc147fae44684fea398bd315b453ad96b096e7b4c6ad6364b4e4986561
3
+ size 137292416