csukuangfj commited on
Commit
8cac9ed
·
1 Parent(s): 03531af

Add onnxruntime.xcframework 1.18.1

Browse files
1.18.1/onnxruntime.xcframework/Headers/coreml_provider_factory.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+ #pragma once
4
+
5
+ #include "onnxruntime_c_api.h"
6
+
7
+ // COREMLFlags are bool options we want to set for CoreML EP
8
+ // This enum is defined as bit flags, and cannot have negative value
9
+ // To generate an uint32_t coreml_flags for using with OrtSessionOptionsAppendExecutionProvider_CoreML below,
10
+ // uint32_t coreml_flags = 0;
11
+ // coreml_flags |= COREML_FLAG_USE_CPU_ONLY;
12
+ enum COREMLFlags {
13
+ COREML_FLAG_USE_NONE = 0x000,
14
+
15
+ // Using CPU only in CoreML EP, this may decrease the perf but will provide
16
+ // reference output value without precision loss, which is useful for validation
17
+ COREML_FLAG_USE_CPU_ONLY = 0x001,
18
+
19
+ // Enable CoreML EP on subgraph
20
+ COREML_FLAG_ENABLE_ON_SUBGRAPH = 0x002,
21
+
22
+ // By default CoreML Execution provider will be enabled for all compatible Apple devices
23
+ // Enable this option will only enable CoreML EP for Apple devices with ANE (Apple Neural Engine)
24
+ // Please note, enable this option does not guarantee the entire model to be executed using ANE only
25
+ COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE = 0x004,
26
+
27
+ // Only allow CoreML EP to take nodes with inputs with static shapes. By default it will also allow inputs with
28
+ // dynamic shapes. However, the performance may be negatively impacted if inputs have dynamic shapes.
29
+ COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES = 0x008,
30
+
31
+ // Create an MLProgram. By default it will create a NeuralNetwork model. Requires Core ML 5 or later.
32
+ COREML_FLAG_CREATE_MLPROGRAM = 0x010,
33
+
34
+ // Keep COREML_FLAG_LAST at the end of the enum definition
35
+ // And assign the last COREMLFlag to it
36
+ COREML_FLAG_LAST = COREML_FLAG_CREATE_MLPROGRAM,
37
+ };
38
+
39
+ #ifdef __cplusplus
40
+ extern "C" {
41
+ #endif
42
+
43
+ ORT_EXPORT ORT_API_STATUS(OrtSessionOptionsAppendExecutionProvider_CoreML,
44
+ _In_ OrtSessionOptions* options, uint32_t coreml_flags);
45
+
46
+ #ifdef __cplusplus
47
+ }
48
+ #endif
1.18.1/onnxruntime.xcframework/Headers/cpu_provider_factory.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ #include "onnxruntime_c_api.h"
5
+
6
+ #ifdef __cplusplus
7
+ extern "C" {
8
+ #endif
9
+
10
+ /**
11
+ * \param use_arena zero: false. non-zero: true.
12
+ */
13
+ ORT_EXPORT
14
+ ORT_API_STATUS(OrtSessionOptionsAppendExecutionProvider_CPU, _In_ OrtSessionOptions* options, int use_arena)
15
+ ORT_ALL_ARGS_NONNULL;
16
+
17
+ #ifdef __cplusplus
18
+ }
19
+ #endif
1.18.1/onnxruntime.xcframework/Headers/onnxruntime_c_api.h ADDED
The diff for this file is too large to render. See raw diff
 
1.18.1/onnxruntime.xcframework/Headers/onnxruntime_cxx_api.h ADDED
The diff for this file is too large to render. See raw diff
 
1.18.1/onnxruntime.xcframework/Headers/onnxruntime_cxx_inline.h ADDED
@@ -0,0 +1,2125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ // Do not include this file directly. Please include "onnxruntime_cxx_api.h" instead.
5
+ // If interested in trying out features of the new experimental C++ API, include "experimental_onnxruntime_cxx_api.h" instead.
6
+ //
7
+ // These are the inline implementations of the C++ header APIs. They're in this separate file as to not clutter
8
+ // the main C++ file with implementation details.
9
+
10
+ #include <algorithm>
11
+ #include <functional>
12
+ #include <iterator>
13
+ #include <type_traits>
14
+
15
+ // Convert OrtStatus to Ort::Status and return
16
+ // instead of throwing
17
+ #define ORT_CXX_RETURN_ON_API_FAIL(expression) \
18
+ { \
19
+ auto ort_status = (expression); \
20
+ if (ort_status) { \
21
+ return Ort::Status(ort_status); \
22
+ } \
23
+ }
24
+
25
+ #ifdef __cpp_if_constexpr
26
+ #define ORT_CXX_IF_CONSTEXPR if constexpr
27
+ #else
28
+ #define ORT_CXX_IF_CONSTEXPR if
29
+ #endif
30
+
31
+ namespace Ort {
32
+
33
+ namespace detail {
34
+ inline void ThrowStatus(const Status& st) {
35
+ std::string error_message = st.GetErrorMessage();
36
+ OrtErrorCode error_code = st.GetErrorCode();
37
+ ORT_CXX_API_THROW(std::move(error_message), error_code);
38
+ }
39
+ } // namespace detail
40
+
41
+ inline void ThrowOnError(OrtStatus* ort_status) {
42
+ if (ort_status) {
43
+ Ort::Status st(ort_status);
44
+ detail::ThrowStatus(st);
45
+ }
46
+ }
47
+
48
+ inline void ThrowOnError(const Status& st) {
49
+ if (st) {
50
+ detail::ThrowStatus(st);
51
+ }
52
+ }
53
+
54
+ inline Status::Status(OrtStatus* status) noexcept : Base<OrtStatus>{status} {
55
+ }
56
+
57
+ inline Status::Status(const std::exception& e) noexcept {
58
+ p_ = GetApi().CreateStatus(ORT_FAIL, e.what());
59
+ }
60
+
61
+ inline Status::Status(const Exception& e) noexcept {
62
+ p_ = GetApi().CreateStatus(e.GetOrtErrorCode(), e.what());
63
+ }
64
+
65
+ inline Status::Status(const char* message, OrtErrorCode code) noexcept {
66
+ p_ = GetApi().CreateStatus(code, message);
67
+ }
68
+
69
+ inline std::string Status::GetErrorMessage() const {
70
+ std::string message(GetApi().GetErrorMessage(p_));
71
+ return message;
72
+ }
73
+
74
+ inline OrtErrorCode Status::GetErrorCode() const {
75
+ return GetApi().GetErrorCode(p_);
76
+ }
77
+
78
+ inline bool Status::IsOK() const noexcept {
79
+ return (p_ == nullptr);
80
+ }
81
+
82
+ // This template converts a C++ type into it's ONNXTensorElementDataType
83
+ template <typename T>
84
+ struct TypeToTensorType;
85
+ template <>
86
+ struct TypeToTensorType<float> {
87
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT;
88
+ };
89
+ template <>
90
+ struct TypeToTensorType<Float16_t> {
91
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16;
92
+ };
93
+ template <>
94
+ struct TypeToTensorType<BFloat16_t> {
95
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16;
96
+ };
97
+ template <>
98
+ struct TypeToTensorType<double> {
99
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE;
100
+ };
101
+ template <>
102
+ struct TypeToTensorType<int8_t> {
103
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8;
104
+ };
105
+ template <>
106
+ struct TypeToTensorType<int16_t> {
107
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16;
108
+ };
109
+ template <>
110
+ struct TypeToTensorType<int32_t> {
111
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32;
112
+ };
113
+ template <>
114
+ struct TypeToTensorType<int64_t> {
115
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64;
116
+ };
117
+ template <>
118
+ struct TypeToTensorType<uint8_t> {
119
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8;
120
+ };
121
+ template <>
122
+ struct TypeToTensorType<uint16_t> {
123
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16;
124
+ };
125
+ template <>
126
+ struct TypeToTensorType<uint32_t> {
127
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32;
128
+ };
129
+ template <>
130
+ struct TypeToTensorType<uint64_t> {
131
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64;
132
+ };
133
+ template <>
134
+ struct TypeToTensorType<bool> {
135
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL;
136
+ };
137
+
138
+ template <>
139
+ struct TypeToTensorType<Float8E4M3FN_t> {
140
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FN;
141
+ };
142
+ template <>
143
+ struct TypeToTensorType<Float8E4M3FNUZ_t> {
144
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E4M3FNUZ;
145
+ };
146
+ template <>
147
+ struct TypeToTensorType<Float8E5M2_t> {
148
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2;
149
+ };
150
+ template <>
151
+ struct TypeToTensorType<Float8E5M2FNUZ_t> {
152
+ static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT8E5M2FNUZ;
153
+ };
154
+
155
+ inline bool BFloat16_t::operator==(const BFloat16_t& rhs) const noexcept {
156
+ if (IsNaN() || rhs.IsNaN()) {
157
+ // IEEE defines that NaN is not equal to anything, including itself.
158
+ return false;
159
+ }
160
+ return val == rhs.val;
161
+ }
162
+
163
+ inline bool BFloat16_t::operator<(const BFloat16_t& rhs) const noexcept {
164
+ if (IsNaN() || rhs.IsNaN()) {
165
+ // IEEE defines that NaN is unordered with respect to everything, including itself.
166
+ return false;
167
+ }
168
+
169
+ const bool left_is_negative = IsNegative();
170
+ if (left_is_negative != rhs.IsNegative()) {
171
+ // When the signs of left and right differ, we know that left is less than right if it is
172
+ // the negative value. The exception to this is if both values are zero, in which case IEEE
173
+ // says they should be equal, even if the signs differ.
174
+ return left_is_negative && !AreZero(*this, rhs);
175
+ }
176
+ return (val != rhs.val) && ((val < rhs.val) ^ left_is_negative);
177
+ }
178
+
179
+ inline MemoryAllocation::MemoryAllocation(OrtAllocator* allocator, void* p, size_t size)
180
+ : allocator_(allocator), p_(p), size_(size) {
181
+ }
182
+
183
+ inline MemoryAllocation::~MemoryAllocation() {
184
+ if (p_ != nullptr) {
185
+ // We do not throw out of destructor
186
+ auto ret = GetApi().AllocatorFree(allocator_, p_);
187
+ static_cast<void>(ret);
188
+ }
189
+ }
190
+
191
+ inline MemoryAllocation::MemoryAllocation(MemoryAllocation&& o) noexcept : allocator_(nullptr), p_(nullptr), size_(0) {
192
+ *this = std::move(o);
193
+ }
194
+
195
+ inline MemoryAllocation& MemoryAllocation::operator=(MemoryAllocation&& o) noexcept {
196
+ OrtAllocator* alloc = nullptr;
197
+ void* p = nullptr;
198
+ size_t sz = 0;
199
+
200
+ // Swap out this
201
+ std::swap(alloc, allocator_);
202
+ std::swap(p, p_);
203
+ std::swap(sz, size_);
204
+
205
+ // Swap with incoming
206
+ std::swap(allocator_, o.allocator_);
207
+ std::swap(p_, o.p_);
208
+ std::swap(size_, o.size_);
209
+
210
+ // Destroy this instance if needed
211
+ MemoryAllocation this_alloc(alloc, p, sz);
212
+ return *this;
213
+ }
214
+
215
+ namespace detail {
216
+
217
+ template <typename T>
218
+ inline void* AllocatorImpl<T>::Alloc(size_t size) {
219
+ void* out;
220
+ ThrowOnError(GetApi().AllocatorAlloc(this->p_, size, &out));
221
+ return out;
222
+ }
223
+
224
+ template <typename T>
225
+ inline MemoryAllocation AllocatorImpl<T>::GetAllocation(size_t size) {
226
+ void* out;
227
+ ThrowOnError(GetApi().AllocatorAlloc(this->p_, size, &out));
228
+ MemoryAllocation result(this->p_, out, size);
229
+ return result;
230
+ }
231
+
232
+ template <typename T>
233
+ inline void AllocatorImpl<T>::Free(void* p) {
234
+ ThrowOnError(GetApi().AllocatorFree(this->p_, p));
235
+ }
236
+
237
+ template <typename T>
238
+ inline ConstMemoryInfo AllocatorImpl<T>::GetInfo() const {
239
+ const OrtMemoryInfo* out;
240
+ ThrowOnError(GetApi().AllocatorGetInfo(this->p_, &out));
241
+ return ConstMemoryInfo{out};
242
+ }
243
+
244
+ } // namespace detail
245
+
246
+ inline AllocatorWithDefaultOptions::AllocatorWithDefaultOptions() {
247
+ ThrowOnError(GetApi().GetAllocatorWithDefaultOptions(&this->p_));
248
+ }
249
+
250
+ inline Allocator::Allocator(const Session& sess, const OrtMemoryInfo* mem_info) {
251
+ ThrowOnError(GetApi().CreateAllocator(sess, mem_info, &this->p_));
252
+ }
253
+
254
+ namespace detail {
255
+
256
+ template <typename T>
257
+ inline std::string MemoryInfoImpl<T>::GetAllocatorName() const {
258
+ const char* name = nullptr;
259
+ ThrowOnError(GetApi().MemoryInfoGetName(this->p_, &name));
260
+ return std::string(name);
261
+ }
262
+
263
+ template <typename T>
264
+ inline OrtAllocatorType MemoryInfoImpl<T>::GetAllocatorType() const {
265
+ OrtAllocatorType type;
266
+ ThrowOnError(GetApi().MemoryInfoGetType(this->p_, &type));
267
+ return type;
268
+ }
269
+
270
+ template <typename T>
271
+ inline int MemoryInfoImpl<T>::GetDeviceId() const {
272
+ int id = 0;
273
+ ThrowOnError(GetApi().MemoryInfoGetId(this->p_, &id));
274
+ return id;
275
+ }
276
+
277
+ template <typename T>
278
+ inline OrtMemoryInfoDeviceType MemoryInfoImpl<T>::GetDeviceType() const {
279
+ OrtMemoryInfoDeviceType type;
280
+ GetApi().MemoryInfoGetDeviceType(this->p_, &type);
281
+ return type;
282
+ }
283
+
284
+ template <typename T>
285
+ inline OrtMemType MemoryInfoImpl<T>::GetMemoryType() const {
286
+ OrtMemType type;
287
+ ThrowOnError(GetApi().MemoryInfoGetMemType(this->p_, &type));
288
+ return type;
289
+ }
290
+
291
+ template <typename T>
292
+ template <typename U>
293
+ inline bool MemoryInfoImpl<T>::operator==(const MemoryInfoImpl<U>& o) const {
294
+ int comp_result = 0;
295
+ ThrowOnError(Ort::GetApi().CompareMemoryInfo(this->p_, o, &comp_result));
296
+ return comp_result == 0;
297
+ }
298
+
299
+ } // namespace detail
300
+
301
+ inline MemoryInfo MemoryInfo::CreateCpu(OrtAllocatorType type, OrtMemType mem_type) {
302
+ OrtMemoryInfo* p;
303
+ ThrowOnError(GetApi().CreateCpuMemoryInfo(type, mem_type, &p));
304
+ return MemoryInfo(p);
305
+ }
306
+
307
+ inline MemoryInfo::MemoryInfo(const char* name, OrtAllocatorType type, int id, OrtMemType mem_type) {
308
+ ThrowOnError(GetApi().CreateMemoryInfo(name, type, id, mem_type, &this->p_));
309
+ }
310
+
311
+ namespace detail {
312
+ template <typename T>
313
+ inline std::vector<std::string> ConstIoBindingImpl<T>::GetOutputNames() const {
314
+ AllocatorWithDefaultOptions allocator;
315
+ return binding_utils::GetOutputNamesHelper(this->p_, allocator);
316
+ }
317
+
318
+ template <typename T>
319
+ inline std::vector<std::string> ConstIoBindingImpl<T>::GetOutputNames(OrtAllocator* allocator) const {
320
+ return binding_utils::GetOutputNamesHelper(this->p_, allocator);
321
+ }
322
+
323
+ template <typename T>
324
+ inline std::vector<Value> ConstIoBindingImpl<T>::GetOutputValues() const {
325
+ AllocatorWithDefaultOptions allocator;
326
+ return binding_utils::GetOutputValuesHelper(this->p_, allocator);
327
+ }
328
+
329
+ template <typename T>
330
+ inline std::vector<Value> ConstIoBindingImpl<T>::GetOutputValues(OrtAllocator* allocator) const {
331
+ return binding_utils::GetOutputValuesHelper(this->p_, allocator);
332
+ }
333
+
334
+ template <typename T>
335
+ inline void IoBindingImpl<T>::BindInput(const char* name, const Value& value) {
336
+ ThrowOnError(GetApi().BindInput(this->p_, name, value));
337
+ }
338
+
339
+ template <typename T>
340
+ inline void IoBindingImpl<T>::BindOutput(const char* name, const Value& value) {
341
+ ThrowOnError(GetApi().BindOutput(this->p_, name, value));
342
+ }
343
+
344
+ template <typename T>
345
+ inline void IoBindingImpl<T>::BindOutput(const char* name, const OrtMemoryInfo* mem_info) {
346
+ ThrowOnError(GetApi().BindOutputToDevice(this->p_, name, mem_info));
347
+ }
348
+
349
+ template <typename T>
350
+ inline void IoBindingImpl<T>::ClearBoundInputs() {
351
+ GetApi().ClearBoundInputs(this->p_);
352
+ }
353
+
354
+ template <typename T>
355
+ inline void IoBindingImpl<T>::ClearBoundOutputs() {
356
+ GetApi().ClearBoundOutputs(this->p_);
357
+ }
358
+
359
+ template <typename T>
360
+ inline void IoBindingImpl<T>::SynchronizeInputs() {
361
+ ThrowOnError(GetApi().SynchronizeBoundInputs(this->p_));
362
+ }
363
+
364
+ template <typename T>
365
+ inline void IoBindingImpl<T>::SynchronizeOutputs() {
366
+ ThrowOnError(GetApi().SynchronizeBoundOutputs(this->p_));
367
+ }
368
+
369
+ namespace binding_utils {
370
+ inline std::vector<std::string> GetOutputNamesHelper(const OrtIoBinding* binding, OrtAllocator* allocator) {
371
+ std::vector<std::string> result;
372
+ auto free_fn = detail::AllocatedFree(allocator);
373
+ using Ptr = std::unique_ptr<void, decltype(free_fn)>;
374
+
375
+ char* buffer = nullptr;
376
+ size_t* lengths = nullptr;
377
+ size_t count = 0;
378
+ ThrowOnError(GetApi().GetBoundOutputNames(binding, allocator, &buffer, &lengths, &count));
379
+
380
+ if (count == 0) {
381
+ return result;
382
+ }
383
+
384
+ Ptr buffer_g(buffer, free_fn);
385
+ Ptr lengths_g(lengths, free_fn);
386
+
387
+ result.reserve(count);
388
+ for (size_t i = 0; i < count; ++i) {
389
+ auto sz = *lengths;
390
+ result.emplace_back(buffer, sz);
391
+ buffer += sz;
392
+ ++lengths;
393
+ }
394
+ return result;
395
+ }
396
+
397
+ inline std::vector<Value> GetOutputValuesHelper(const OrtIoBinding* binding, OrtAllocator* allocator) {
398
+ std::vector<Value> result;
399
+ size_t owned = 0;
400
+ size_t output_count = 0;
401
+ // Lambda to release the buffer when no longer needed and
402
+ // make sure that we destroy all instances on exception
403
+ auto free_fn = [&owned, &output_count, allocator](OrtValue** buffer) {
404
+ if (buffer) {
405
+ while (owned < output_count) {
406
+ auto* p = buffer + owned++;
407
+ GetApi().ReleaseValue(*p);
408
+ }
409
+ allocator->Free(allocator, buffer);
410
+ }
411
+ };
412
+ using Ptr = std::unique_ptr<OrtValue*, decltype(free_fn)>;
413
+
414
+ OrtValue** output_buffer = nullptr;
415
+ ThrowOnError(GetApi().GetBoundOutputValues(binding, allocator, &output_buffer, &output_count));
416
+ if (output_count == 0) {
417
+ return result;
418
+ }
419
+
420
+ Ptr buffer_g(output_buffer, free_fn);
421
+
422
+ result.reserve(output_count);
423
+ for (size_t i = 0; i < output_count; ++i) {
424
+ result.emplace_back(output_buffer[i]);
425
+ ++owned;
426
+ }
427
+ return result;
428
+ }
429
+
430
+ } // namespace binding_utils
431
+ } // namespace detail
432
+
433
+ inline IoBinding::IoBinding(Session& session) {
434
+ ThrowOnError(GetApi().CreateIoBinding(session, &this->p_));
435
+ }
436
+
437
+ inline ArenaCfg::ArenaCfg(size_t max_mem, int arena_extend_strategy, int initial_chunk_size_bytes, int max_dead_bytes_per_chunk) {
438
+ ThrowOnError(GetApi().CreateArenaCfg(max_mem, arena_extend_strategy, initial_chunk_size_bytes, max_dead_bytes_per_chunk, &p_));
439
+ }
440
+
441
+ inline ThreadingOptions::ThreadingOptions() {
442
+ ThrowOnError(GetApi().CreateThreadingOptions(&p_));
443
+ }
444
+
445
+ inline ThreadingOptions& ThreadingOptions::SetGlobalIntraOpNumThreads(int intra_op_num_threads) {
446
+ ThrowOnError(GetApi().SetGlobalIntraOpNumThreads(p_, intra_op_num_threads));
447
+ return *this;
448
+ }
449
+
450
+ inline ThreadingOptions& ThreadingOptions::SetGlobalInterOpNumThreads(int inter_op_num_threads) {
451
+ ThrowOnError(GetApi().SetGlobalInterOpNumThreads(p_, inter_op_num_threads));
452
+ return *this;
453
+ }
454
+
455
+ inline ThreadingOptions& ThreadingOptions::SetGlobalSpinControl(int allow_spinning) {
456
+ ThrowOnError(GetApi().SetGlobalSpinControl(p_, allow_spinning));
457
+ return *this;
458
+ }
459
+
460
+ inline ThreadingOptions& ThreadingOptions::SetGlobalDenormalAsZero() {
461
+ ThrowOnError(GetApi().SetGlobalDenormalAsZero(p_));
462
+ return *this;
463
+ }
464
+
465
+ inline ThreadingOptions& ThreadingOptions::SetGlobalCustomCreateThreadFn(OrtCustomCreateThreadFn ort_custom_create_thread_fn) {
466
+ ThrowOnError(GetApi().SetGlobalCustomCreateThreadFn(p_, ort_custom_create_thread_fn));
467
+ return *this;
468
+ }
469
+
470
+ inline ThreadingOptions& ThreadingOptions::SetGlobalCustomThreadCreationOptions(void* ort_custom_thread_creation_options) {
471
+ ThrowOnError(GetApi().SetGlobalCustomThreadCreationOptions(p_, ort_custom_thread_creation_options));
472
+ return *this;
473
+ }
474
+
475
+ inline ThreadingOptions& ThreadingOptions::SetGlobalCustomJoinThreadFn(OrtCustomJoinThreadFn ort_custom_join_thread_fn) {
476
+ ThrowOnError(GetApi().SetGlobalCustomJoinThreadFn(p_, ort_custom_join_thread_fn));
477
+ return *this;
478
+ }
479
+
480
+ inline Env::Env(OrtLoggingLevel logging_level, _In_ const char* logid) {
481
+ ThrowOnError(GetApi().CreateEnv(logging_level, logid, &p_));
482
+ if (strcmp(logid, "onnxruntime-node") == 0) {
483
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_NODEJS));
484
+ } else {
485
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_CPLUSPLUS));
486
+ }
487
+ }
488
+
489
+ inline Env::Env(OrtLoggingLevel logging_level, const char* logid, OrtLoggingFunction logging_function, void* logger_param) {
490
+ ThrowOnError(GetApi().CreateEnvWithCustomLogger(logging_function, logger_param, logging_level, logid, &p_));
491
+ if (strcmp(logid, "onnxruntime-node") == 0) {
492
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_NODEJS));
493
+ } else {
494
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_CPLUSPLUS));
495
+ }
496
+ }
497
+
498
+ inline Env::Env(const OrtThreadingOptions* tp_options, OrtLoggingLevel logging_level, _In_ const char* logid) {
499
+ ThrowOnError(GetApi().CreateEnvWithGlobalThreadPools(logging_level, logid, tp_options, &p_));
500
+ if (strcmp(logid, "onnxruntime-node") == 0) {
501
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_NODEJS));
502
+ } else {
503
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_CPLUSPLUS));
504
+ }
505
+ }
506
+
507
+ inline Env::Env(const OrtThreadingOptions* tp_options, OrtLoggingFunction logging_function, void* logger_param,
508
+ OrtLoggingLevel logging_level, _In_ const char* logid) {
509
+ ThrowOnError(GetApi().CreateEnvWithCustomLoggerAndGlobalThreadPools(logging_function, logger_param, logging_level, logid, tp_options, &p_));
510
+ if (strcmp(logid, "onnxruntime-node") == 0) {
511
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_NODEJS));
512
+ } else {
513
+ ThrowOnError(GetApi().SetLanguageProjection(p_, OrtLanguageProjection::ORT_PROJECTION_CPLUSPLUS));
514
+ }
515
+ }
516
+
517
+ inline Env& Env::EnableTelemetryEvents() {
518
+ ThrowOnError(GetApi().EnableTelemetryEvents(p_));
519
+ return *this;
520
+ }
521
+
522
+ inline Env& Env::DisableTelemetryEvents() {
523
+ ThrowOnError(GetApi().DisableTelemetryEvents(p_));
524
+ return *this;
525
+ }
526
+
527
+ inline Env& Env::UpdateEnvWithCustomLogLevel(OrtLoggingLevel log_severity_level) {
528
+ ThrowOnError(GetApi().UpdateEnvWithCustomLogLevel(p_, log_severity_level));
529
+ return *this;
530
+ }
531
+
532
+ inline Env& Env::CreateAndRegisterAllocator(const OrtMemoryInfo* mem_info, const OrtArenaCfg* arena_cfg) {
533
+ ThrowOnError(GetApi().CreateAndRegisterAllocator(p_, mem_info, arena_cfg));
534
+ return *this;
535
+ }
536
+
537
+ inline Env& Env::CreateAndRegisterAllocatorV2(const std::string& provider_type, const OrtMemoryInfo* mem_info, const std::unordered_map<std::string, std::string>& options, const OrtArenaCfg* arena_cfg) {
538
+ std::vector<const char*> keys, values;
539
+ auto num_entries = options.size();
540
+ if (num_entries > 0) {
541
+ keys.reserve(num_entries);
542
+ values.reserve(num_entries);
543
+ for (const auto& entry : options) {
544
+ keys.push_back(entry.first.c_str());
545
+ values.push_back(entry.second.c_str());
546
+ }
547
+ }
548
+ ThrowOnError(GetApi().CreateAndRegisterAllocatorV2(p_, provider_type.c_str(), mem_info, arena_cfg, keys.data(), values.data(), num_entries));
549
+ return *this;
550
+ }
551
+
552
+ inline CustomOpDomain::CustomOpDomain(const char* domain) {
553
+ ThrowOnError(GetApi().CreateCustomOpDomain(domain, &p_));
554
+ }
555
+
556
+ inline void CustomOpDomain::Add(const OrtCustomOp* op) {
557
+ ThrowOnError(GetApi().CustomOpDomain_Add(p_, op));
558
+ }
559
+
560
+ inline RunOptions::RunOptions() {
561
+ ThrowOnError(GetApi().CreateRunOptions(&p_));
562
+ }
563
+
564
+ inline RunOptions& RunOptions::SetRunLogVerbosityLevel(int level) {
565
+ ThrowOnError(GetApi().RunOptionsSetRunLogVerbosityLevel(p_, level));
566
+ return *this;
567
+ }
568
+
569
+ inline RunOptions& RunOptions::SetRunLogSeverityLevel(int level) {
570
+ ThrowOnError(GetApi().RunOptionsSetRunLogSeverityLevel(p_, level));
571
+ return *this;
572
+ }
573
+
574
+ inline int RunOptions::GetRunLogVerbosityLevel() const {
575
+ int out;
576
+ ThrowOnError(GetApi().RunOptionsGetRunLogVerbosityLevel(p_, &out));
577
+ return out;
578
+ }
579
+
580
+ inline int RunOptions::GetRunLogSeverityLevel() const {
581
+ int out;
582
+ ThrowOnError(GetApi().RunOptionsGetRunLogSeverityLevel(p_, &out));
583
+ return out;
584
+ }
585
+
586
+ inline RunOptions& RunOptions::SetRunTag(const char* run_tag) {
587
+ ThrowOnError(GetApi().RunOptionsSetRunTag(p_, run_tag));
588
+ return *this;
589
+ }
590
+
591
+ inline const char* RunOptions::GetRunTag() const {
592
+ const char* out;
593
+ ThrowOnError(GetApi().RunOptionsGetRunTag(p_, &out));
594
+ return out;
595
+ }
596
+
597
+ inline RunOptions& RunOptions::AddConfigEntry(const char* config_key, const char* config_value) {
598
+ ThrowOnError(GetApi().AddRunConfigEntry(p_, config_key, config_value));
599
+ return *this;
600
+ }
601
+
602
+ inline RunOptions& RunOptions::SetTerminate() {
603
+ ThrowOnError(GetApi().RunOptionsSetTerminate(p_));
604
+ return *this;
605
+ }
606
+
607
+ inline RunOptions& RunOptions::UnsetTerminate() {
608
+ ThrowOnError(GetApi().RunOptionsUnsetTerminate(p_));
609
+ return *this;
610
+ }
611
+
612
+ namespace detail {
613
+
614
+ template <typename T>
615
+ inline Ort::SessionOptions ConstSessionOptionsImpl<T>::Clone() const {
616
+ OrtSessionOptions* out;
617
+ ThrowOnError(GetApi().CloneSessionOptions(this->p_, &out));
618
+ return SessionOptions{out};
619
+ }
620
+
621
+ template <typename T>
622
+ inline std::string ConstSessionOptionsImpl<T>::GetConfigEntry(const char* config_key) const {
623
+ size_t size = 0;
624
+ // Feed nullptr for the data buffer to query the true size of the string value
625
+ Ort::ThrowOnError(GetApi().GetSessionConfigEntry(this->p_, config_key, nullptr, &size));
626
+
627
+ std::string out;
628
+ out.resize(size);
629
+ Ort::ThrowOnError(GetApi().GetSessionConfigEntry(this->p_, config_key, &out[0], &size));
630
+ out.resize(size - 1); // remove the terminating character '\0'
631
+
632
+ return out;
633
+ }
634
+
635
+ template <typename T>
636
+ inline bool ConstSessionOptionsImpl<T>::HasConfigEntry(const char* config_key) const {
637
+ int out = 0;
638
+ Ort::ThrowOnError(GetApi().HasSessionConfigEntry(this->p_, config_key, &out));
639
+ return static_cast<bool>(out);
640
+ }
641
+
642
+ template <typename T>
643
+ inline std::string ConstSessionOptionsImpl<T>::GetConfigEntryOrDefault(const char* config_key, const std::string& def) {
644
+ if (!this->HasConfigEntry(config_key)) {
645
+ return def;
646
+ }
647
+
648
+ return this->GetConfigEntry(config_key);
649
+ }
650
+
651
+ template <typename T>
652
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetIntraOpNumThreads(int intra_op_num_threads) {
653
+ ThrowOnError(GetApi().SetIntraOpNumThreads(this->p_, intra_op_num_threads));
654
+ return *this;
655
+ }
656
+
657
+ template <typename T>
658
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetInterOpNumThreads(int inter_op_num_threads) {
659
+ ThrowOnError(GetApi().SetInterOpNumThreads(this->p_, inter_op_num_threads));
660
+ return *this;
661
+ }
662
+
663
+ template <typename T>
664
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetGraphOptimizationLevel(GraphOptimizationLevel graph_optimization_level) {
665
+ ThrowOnError(GetApi().SetSessionGraphOptimizationLevel(this->p_, graph_optimization_level));
666
+ return *this;
667
+ }
668
+
669
+ template <typename T>
670
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetDeterministicCompute(bool value) {
671
+ ThrowOnError(GetApi().SetDeterministicCompute(this->p_, value));
672
+ return *this;
673
+ }
674
+
675
+ template <typename T>
676
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetOptimizedModelFilePath(const ORTCHAR_T* optimized_model_filepath) {
677
+ ThrowOnError(GetApi().SetOptimizedModelFilePath(this->p_, optimized_model_filepath));
678
+ return *this;
679
+ }
680
+
681
+ template <typename T>
682
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::EnableProfiling(const ORTCHAR_T* profile_file_prefix) {
683
+ ThrowOnError(GetApi().EnableProfiling(this->p_, profile_file_prefix));
684
+ return *this;
685
+ }
686
+
687
+ template <typename T>
688
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::DisableProfiling() {
689
+ ThrowOnError(GetApi().DisableProfiling(this->p_));
690
+ return *this;
691
+ }
692
+
693
+ template <typename T>
694
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::EnableOrtCustomOps() {
695
+ ThrowOnError(GetApi().EnableOrtCustomOps(this->p_));
696
+ return *this;
697
+ }
698
+
699
+ template <typename T>
700
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::EnableMemPattern() {
701
+ ThrowOnError(GetApi().EnableMemPattern(this->p_));
702
+ return *this;
703
+ }
704
+
705
+ template <typename T>
706
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::DisableMemPattern() {
707
+ ThrowOnError(GetApi().DisableMemPattern(this->p_));
708
+ return *this;
709
+ }
710
+
711
+ template <typename T>
712
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::EnableCpuMemArena() {
713
+ ThrowOnError(GetApi().EnableCpuMemArena(this->p_));
714
+ return *this;
715
+ }
716
+
717
+ template <typename T>
718
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::DisableCpuMemArena() {
719
+ ThrowOnError(GetApi().DisableCpuMemArena(this->p_));
720
+ return *this;
721
+ }
722
+
723
+ template <typename T>
724
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetExecutionMode(ExecutionMode execution_mode) {
725
+ ThrowOnError(GetApi().SetSessionExecutionMode(this->p_, execution_mode));
726
+ return *this;
727
+ }
728
+
729
+ template <typename T>
730
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetLogId(const char* logid) {
731
+ ThrowOnError(GetApi().SetSessionLogId(this->p_, logid));
732
+ return *this;
733
+ }
734
+
735
+ template <typename T>
736
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetLogSeverityLevel(int level) {
737
+ ThrowOnError(GetApi().SetSessionLogSeverityLevel(this->p_, level));
738
+ return *this;
739
+ }
740
+
741
+ template <typename T>
742
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::Add(OrtCustomOpDomain* custom_op_domain) {
743
+ ThrowOnError(GetApi().AddCustomOpDomain(this->p_, custom_op_domain));
744
+ return *this;
745
+ }
746
+
747
+ template <typename T>
748
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AddConfigEntry(const char* config_key, const char* config_value) {
749
+ ThrowOnError(GetApi().AddSessionConfigEntry(this->p_, config_key, config_value));
750
+ return *this;
751
+ }
752
+
753
+ template <typename T>
754
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AddInitializer(const char* name, const OrtValue* ort_val) {
755
+ ThrowOnError(GetApi().AddInitializer(this->p_, name, ort_val));
756
+ return *this;
757
+ }
758
+
759
+ template <typename T>
760
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::DisablePerSessionThreads() {
761
+ ThrowOnError(GetApi().DisablePerSessionThreads(this->p_));
762
+ return *this;
763
+ }
764
+
765
+ template <typename T>
766
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AddExternalInitializers(const std::vector<std::string>& names,
767
+ const std::vector<Value>& ort_values) {
768
+ const size_t inputs_num = names.size();
769
+ if (inputs_num != ort_values.size()) {
770
+ ORT_CXX_API_THROW("Expecting names and ort_values to have the same length", ORT_INVALID_ARGUMENT);
771
+ }
772
+ std::vector<const char*> names_ptr;
773
+ std::vector<const OrtValue*> ort_values_ptrs;
774
+ names_ptr.reserve(inputs_num);
775
+ ort_values_ptrs.reserve(inputs_num);
776
+ for (size_t i = 0; i < inputs_num; ++i) {
777
+ names_ptr.push_back(names[i].c_str());
778
+ ort_values_ptrs.push_back(ort_values[i]);
779
+ }
780
+ ThrowOnError(GetApi().AddExternalInitializers(this->p_, names_ptr.data(), ort_values_ptrs.data(), inputs_num));
781
+ return *this;
782
+ }
783
+
784
+ template <typename T>
785
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AddExternalInitializersFromFilesInMemory(const std::vector<std::basic_string<ORTCHAR_T>>& file_names,
786
+ const std::vector<char*>& buffer_array,
787
+ const std::vector<size_t>& file_lengths) {
788
+ const size_t inputs_num = file_names.size();
789
+ if (inputs_num != buffer_array.size()) {
790
+ ORT_CXX_API_THROW("Expecting names and buffer_array to have the same length", ORT_INVALID_ARGUMENT);
791
+ }
792
+ if (inputs_num != file_lengths.size()) {
793
+ ORT_CXX_API_THROW("Expecting names and file_lengths to have the same length", ORT_INVALID_ARGUMENT);
794
+ }
795
+ std::vector<const ORTCHAR_T*> names_ptr;
796
+ names_ptr.reserve(inputs_num);
797
+ for (size_t i = 0; i < inputs_num; ++i) {
798
+ names_ptr.push_back(file_names[i].c_str());
799
+ }
800
+ ThrowOnError(GetApi().AddExternalInitializersFromFilesInMemory(this->p_, names_ptr.data(), buffer_array.data(),
801
+ file_lengths.data(), inputs_num));
802
+ return *this;
803
+ }
804
+
805
+ template <typename T>
806
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_CUDA(const OrtCUDAProviderOptions& provider_options) {
807
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_CUDA(this->p_, &provider_options));
808
+ return *this;
809
+ }
810
+
811
+ template <typename T>
812
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_CUDA_V2(const OrtCUDAProviderOptionsV2& provider_options) {
813
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_CUDA_V2(this->p_, &provider_options));
814
+ return *this;
815
+ }
816
+
817
+ template <typename T>
818
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_ROCM(const OrtROCMProviderOptions& provider_options) {
819
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_ROCM(this->p_, &provider_options));
820
+ return *this;
821
+ }
822
+
823
+ template <typename T>
824
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_TensorRT(const OrtTensorRTProviderOptions& provider_options) {
825
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_TensorRT(this->p_, &provider_options));
826
+ return *this;
827
+ }
828
+
829
+ template <typename T>
830
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_TensorRT_V2(const OrtTensorRTProviderOptionsV2& provider_options) {
831
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_TensorRT_V2(this->p_, &provider_options));
832
+ return *this;
833
+ }
834
+
835
+ template <typename T>
836
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_MIGraphX(const OrtMIGraphXProviderOptions& provider_options) {
837
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_MIGraphX(this->p_, &provider_options));
838
+ return *this;
839
+ }
840
+
841
+ template <typename T>
842
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_CANN(const OrtCANNProviderOptions& provider_options) {
843
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_CANN(this->p_, &provider_options));
844
+ return *this;
845
+ }
846
+
847
+ template <typename T>
848
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_Dnnl(const OrtDnnlProviderOptions& provider_options) {
849
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_Dnnl(this->p_, &provider_options));
850
+ return *this;
851
+ }
852
+
853
+ template <typename T>
854
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider(
855
+ const std::string& provider_name,
856
+ const std::unordered_map<std::string, std::string>& provider_options) {
857
+ auto num_entries = provider_options.size();
858
+ std::vector<const char*> keys, values;
859
+ if (num_entries > 0) {
860
+ keys.reserve(num_entries);
861
+ values.reserve(num_entries);
862
+
863
+ for (const auto& entry : provider_options) {
864
+ keys.push_back(entry.first.c_str());
865
+ values.push_back(entry.second.c_str());
866
+ }
867
+ }
868
+
869
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider(this->p_, provider_name.c_str(),
870
+ keys.data(), values.data(), num_entries));
871
+
872
+ return *this;
873
+ }
874
+
875
+ template <typename T>
876
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetCustomCreateThreadFn(OrtCustomCreateThreadFn ort_custom_create_thread_fn) {
877
+ ThrowOnError(GetApi().SessionOptionsSetCustomCreateThreadFn(this->p_, ort_custom_create_thread_fn));
878
+ return *this;
879
+ }
880
+
881
+ template <typename T>
882
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetCustomThreadCreationOptions(void* ort_custom_thread_creation_options) {
883
+ ThrowOnError(GetApi().SessionOptionsSetCustomThreadCreationOptions(this->p_, ort_custom_thread_creation_options));
884
+ return *this;
885
+ }
886
+
887
+ template <typename T>
888
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::SetCustomJoinThreadFn(OrtCustomJoinThreadFn ort_custom_join_thread_fn) {
889
+ ThrowOnError(GetApi().SessionOptionsSetCustomJoinThreadFn(this->p_, ort_custom_join_thread_fn));
890
+ return *this;
891
+ }
892
+
893
+ template <typename T>
894
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_OpenVINO(const OrtOpenVINOProviderOptions& provider_options) {
895
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_OpenVINO(this->p_, &provider_options));
896
+ return *this;
897
+ }
898
+
899
+ template <typename T>
900
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_OpenVINO_V2(const std::unordered_map<std::string, std::string>& provider_options) {
901
+ auto num_entries = provider_options.size();
902
+ std::vector<const char*> keys, values;
903
+ if (num_entries > 0) {
904
+ keys.reserve(num_entries);
905
+ values.reserve(num_entries);
906
+
907
+ for (const auto& entry : provider_options) {
908
+ keys.push_back(entry.first.c_str());
909
+ values.push_back(entry.second.c_str());
910
+ }
911
+ }
912
+
913
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_OpenVINO_V2(this->p_,
914
+ keys.data(), values.data(), num_entries));
915
+
916
+ return *this;
917
+ }
918
+
919
+ template <typename T>
920
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::AppendExecutionProvider_VitisAI(const std::unordered_map<std::string, std::string>& provider_options) {
921
+ auto num_entries = provider_options.size();
922
+ std::vector<const char*> keys, values;
923
+ if (num_entries > 0) {
924
+ keys.reserve(num_entries);
925
+ values.reserve(num_entries);
926
+
927
+ for (const auto& entry : provider_options) {
928
+ keys.push_back(entry.first.c_str());
929
+ values.push_back(entry.second.c_str());
930
+ }
931
+ }
932
+
933
+ ThrowOnError(GetApi().SessionOptionsAppendExecutionProvider_VitisAI(this->p_, keys.data(), values.data(), num_entries));
934
+
935
+ return *this;
936
+ }
937
+
938
+ template <typename T>
939
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::RegisterCustomOpsLibrary(const ORTCHAR_T* library_name,
940
+ const CustomOpConfigs& custom_op_configs) {
941
+ // Add custom op config entries before registering the custom op library. Otherwise, the config entries _may_ be ignored by
942
+ // the custom op library.
943
+ for (const auto& config_iter : custom_op_configs.GetFlattenedConfigs()) {
944
+ AddConfigEntry(config_iter.first.c_str(), config_iter.second.c_str());
945
+ }
946
+
947
+ ThrowOnError(GetApi().RegisterCustomOpsLibrary_V2(this->p_, library_name));
948
+ return *this;
949
+ }
950
+
951
+ template <typename T>
952
+ inline SessionOptionsImpl<T>& SessionOptionsImpl<T>::RegisterCustomOpsUsingFunction(const char* registration_function_name) {
953
+ ThrowOnError(GetApi().RegisterCustomOpsUsingFunction(this->p_, registration_function_name));
954
+ return *this;
955
+ }
956
+
957
+ /// Session
958
+ template <typename T>
959
+ inline size_t ConstSessionImpl<T>::GetInputCount() const {
960
+ size_t out;
961
+ ThrowOnError(GetApi().SessionGetInputCount(this->p_, &out));
962
+ return out;
963
+ }
964
+
965
+ template <typename T>
966
+ inline size_t ConstSessionImpl<T>::GetOutputCount() const {
967
+ size_t out;
968
+ ThrowOnError(GetApi().SessionGetOutputCount(this->p_, &out));
969
+ return out;
970
+ }
971
+
972
+ template <typename T>
973
+ inline size_t ConstSessionImpl<T>::GetOverridableInitializerCount() const {
974
+ size_t out;
975
+ ThrowOnError(GetApi().SessionGetOverridableInitializerCount(this->p_, &out));
976
+ return out;
977
+ }
978
+
979
+ template <typename T>
980
+ inline AllocatedStringPtr ConstSessionImpl<T>::GetInputNameAllocated(size_t index, OrtAllocator* allocator) const {
981
+ char* out;
982
+ ThrowOnError(GetApi().SessionGetInputName(this->p_, index, allocator, &out));
983
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
984
+ }
985
+
986
+ template <typename T>
987
+ inline AllocatedStringPtr ConstSessionImpl<T>::GetOutputNameAllocated(size_t index, OrtAllocator* allocator) const {
988
+ char* out;
989
+ ThrowOnError(GetApi().SessionGetOutputName(this->p_, index, allocator, &out));
990
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
991
+ }
992
+
993
+ template <typename T>
994
+ inline AllocatedStringPtr ConstSessionImpl<T>::GetOverridableInitializerNameAllocated(size_t index, OrtAllocator* allocator) const {
995
+ char* out;
996
+ ThrowOnError(GetApi().SessionGetOverridableInitializerName(this->p_, index, allocator, &out));
997
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
998
+ }
999
+
1000
+ template <typename T>
1001
+ inline uint64_t ConstSessionImpl<T>::GetProfilingStartTimeNs() const {
1002
+ uint64_t out;
1003
+ ThrowOnError(GetApi().SessionGetProfilingStartTimeNs(this->p_, &out));
1004
+ return out;
1005
+ }
1006
+
1007
+ template <typename T>
1008
+ inline ModelMetadata ConstSessionImpl<T>::GetModelMetadata() const {
1009
+ OrtModelMetadata* out;
1010
+ ThrowOnError(GetApi().SessionGetModelMetadata(this->p_, &out));
1011
+ return ModelMetadata{out};
1012
+ }
1013
+
1014
+ template <typename T>
1015
+ inline TypeInfo ConstSessionImpl<T>::GetInputTypeInfo(size_t index) const {
1016
+ OrtTypeInfo* out;
1017
+ ThrowOnError(GetApi().SessionGetInputTypeInfo(this->p_, index, &out));
1018
+ return TypeInfo{out};
1019
+ }
1020
+
1021
+ template <typename T>
1022
+ inline TypeInfo ConstSessionImpl<T>::GetOutputTypeInfo(size_t index) const {
1023
+ OrtTypeInfo* out;
1024
+ ThrowOnError(GetApi().SessionGetOutputTypeInfo(this->p_, index, &out));
1025
+ return TypeInfo{out};
1026
+ }
1027
+
1028
+ template <typename T>
1029
+ inline TypeInfo ConstSessionImpl<T>::GetOverridableInitializerTypeInfo(size_t index) const {
1030
+ OrtTypeInfo* out;
1031
+ ThrowOnError(GetApi().SessionGetOverridableInitializerTypeInfo(this->p_, index, &out));
1032
+ return TypeInfo{out};
1033
+ }
1034
+
1035
+ template <typename T>
1036
+ inline std::vector<Value> SessionImpl<T>::Run(const RunOptions& run_options, const char* const* input_names, const Value* input_values, size_t input_count,
1037
+ const char* const* output_names, size_t output_count) {
1038
+ std::vector<Value> output_values;
1039
+ output_values.reserve(output_count);
1040
+ for (size_t i = 0; i < output_count; i++)
1041
+ output_values.emplace_back(nullptr);
1042
+ Run(run_options, input_names, input_values, input_count, output_names, output_values.data(), output_count);
1043
+ return output_values;
1044
+ }
1045
+
1046
+ template <typename T>
1047
+ inline void SessionImpl<T>::Run(const RunOptions& run_options, const char* const* input_names, const Value* input_values, size_t input_count,
1048
+ const char* const* output_names, Value* output_values, size_t output_count) {
1049
+ static_assert(sizeof(Value) == sizeof(OrtValue*), "Value is really just an array of OrtValue* in memory, so we can reinterpret_cast safely");
1050
+ auto ort_input_values = reinterpret_cast<const OrtValue* const*>(input_values);
1051
+ auto ort_output_values = reinterpret_cast<OrtValue**>(output_values);
1052
+ ThrowOnError(GetApi().Run(this->p_, run_options, input_names, ort_input_values, input_count, output_names, output_count, ort_output_values));
1053
+ }
1054
+
1055
+ template <typename T>
1056
+ inline void SessionImpl<T>::Run(const RunOptions& run_options, const IoBinding& io_binding) {
1057
+ ThrowOnError(GetApi().RunWithBinding(this->p_, run_options, io_binding));
1058
+ }
1059
+
1060
+ template <typename T>
1061
+ inline void SessionImpl<T>::RunAsync(const RunOptions& run_options, const char* const* input_names, const Value* input_values, size_t input_count,
1062
+ const char* const* output_names, Value* output_values, size_t output_count, RunAsyncCallbackFn callback, void* user_data) {
1063
+ auto ort_input_values = reinterpret_cast<const OrtValue* const*>(input_values);
1064
+ auto ort_output_values = reinterpret_cast<OrtValue**>(output_values);
1065
+ ThrowOnError(GetApi().RunAsync(this->p_, run_options, input_names,
1066
+ ort_input_values, input_count, output_names, output_count,
1067
+ ort_output_values, callback, user_data));
1068
+ }
1069
+
1070
+ template <typename T>
1071
+ inline AllocatedStringPtr SessionImpl<T>::EndProfilingAllocated(OrtAllocator* allocator) {
1072
+ char* out = nullptr;
1073
+ ThrowOnError(GetApi().SessionEndProfiling(this->p_, allocator, &out));
1074
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1075
+ }
1076
+
1077
+ } // namespace detail
1078
+
1079
+ inline SessionOptions::SessionOptions() {
1080
+ ThrowOnError(GetApi().CreateSessionOptions(&this->p_));
1081
+ }
1082
+
1083
+ /// CustomOpConfigs
1084
+ inline std::string detail::MakeCustomOpConfigEntryKey(const char* custom_op_name, const char* config) {
1085
+ std::string config_key = "custom_op.";
1086
+
1087
+ config_key += custom_op_name;
1088
+ config_key += ".";
1089
+ config_key += config;
1090
+
1091
+ return config_key;
1092
+ }
1093
+
1094
+ inline CustomOpConfigs& CustomOpConfigs::AddConfig(const char* custom_op_name, const char* config_key, const char* config_value) {
1095
+ const std::string full_flat_key = detail::MakeCustomOpConfigEntryKey(custom_op_name, config_key);
1096
+ flat_configs_[full_flat_key] = config_value;
1097
+ return *this;
1098
+ }
1099
+
1100
+ inline const std::unordered_map<std::string, std::string>& CustomOpConfigs::GetFlattenedConfigs() const {
1101
+ return flat_configs_;
1102
+ }
1103
+
1104
+ inline Session::Session(const Env& env, const ORTCHAR_T* model_path, const SessionOptions& options) {
1105
+ ThrowOnError(GetApi().CreateSession(env, model_path, options, &this->p_));
1106
+ }
1107
+
1108
+ inline Session::Session(const Env& env, const ORTCHAR_T* model_path, const SessionOptions& options,
1109
+ OrtPrepackedWeightsContainer* prepacked_weights_container) {
1110
+ ThrowOnError(GetApi().CreateSessionWithPrepackedWeightsContainer(env, model_path, options, prepacked_weights_container, &this->p_));
1111
+ }
1112
+
1113
+ inline Session::Session(const Env& env, const void* model_data, size_t model_data_length, const SessionOptions& options) {
1114
+ ThrowOnError(GetApi().CreateSessionFromArray(env, model_data, model_data_length, options, &this->p_));
1115
+ }
1116
+
1117
+ inline Session::Session(const Env& env, const void* model_data, size_t model_data_length,
1118
+ const SessionOptions& options, OrtPrepackedWeightsContainer* prepacked_weights_container) {
1119
+ ThrowOnError(GetApi().CreateSessionFromArrayWithPrepackedWeightsContainer(env, model_data, model_data_length, options,
1120
+ prepacked_weights_container, &this->p_));
1121
+ }
1122
+
1123
+ inline AllocatedStringPtr ModelMetadata::GetProducerNameAllocated(OrtAllocator* allocator) const {
1124
+ char* out;
1125
+ ThrowOnError(GetApi().ModelMetadataGetProducerName(p_, allocator, &out));
1126
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1127
+ }
1128
+
1129
+ inline AllocatedStringPtr ModelMetadata::GetGraphNameAllocated(OrtAllocator* allocator) const {
1130
+ char* out;
1131
+ ThrowOnError(GetApi().ModelMetadataGetGraphName(p_, allocator, &out));
1132
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1133
+ }
1134
+
1135
+ inline AllocatedStringPtr ModelMetadata::GetDomainAllocated(OrtAllocator* allocator) const {
1136
+ char* out;
1137
+ ThrowOnError(GetApi().ModelMetadataGetDomain(p_, allocator, &out));
1138
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1139
+ }
1140
+
1141
+ inline AllocatedStringPtr Ort::ModelMetadata::GetDescriptionAllocated(OrtAllocator* allocator) const {
1142
+ char* out;
1143
+ ThrowOnError(GetApi().ModelMetadataGetDescription(p_, allocator, &out));
1144
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1145
+ }
1146
+
1147
+ inline AllocatedStringPtr ModelMetadata::GetGraphDescriptionAllocated(OrtAllocator* allocator) const {
1148
+ char* out;
1149
+ ThrowOnError(GetApi().ModelMetadataGetGraphDescription(p_, allocator, &out));
1150
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1151
+ }
1152
+
1153
+ inline AllocatedStringPtr ModelMetadata::LookupCustomMetadataMapAllocated(const char* key, OrtAllocator* allocator) const {
1154
+ char* out;
1155
+ ThrowOnError(GetApi().ModelMetadataLookupCustomMetadataMap(p_, allocator, key, &out));
1156
+ return AllocatedStringPtr(out, detail::AllocatedFree(allocator));
1157
+ }
1158
+
1159
+ inline std::vector<AllocatedStringPtr> ModelMetadata::GetCustomMetadataMapKeysAllocated(OrtAllocator* allocator) const {
1160
+ auto deletor = detail::AllocatedFree(allocator);
1161
+ std::vector<AllocatedStringPtr> result;
1162
+
1163
+ char** out = nullptr;
1164
+ int64_t num_keys = 0;
1165
+ ThrowOnError(GetApi().ModelMetadataGetCustomMetadataMapKeys(p_, allocator, &out, &num_keys));
1166
+ if (num_keys <= 0) {
1167
+ return result;
1168
+ }
1169
+
1170
+ // array of pointers will be freed
1171
+ std::unique_ptr<void, decltype(deletor)> array_guard(out, deletor);
1172
+ // reserve may throw
1173
+ auto strings_deletor = [&deletor, num_keys](char** out) { for(int64_t i = 0; i < num_keys; ++i) deletor(out[i]); };
1174
+ std::unique_ptr<char*, decltype(strings_deletor)> strings_guard(out, strings_deletor);
1175
+ result.reserve(static_cast<size_t>(num_keys));
1176
+ strings_guard.release();
1177
+ for (int64_t i = 0; i < num_keys; ++i) {
1178
+ result.push_back(AllocatedStringPtr(out[i], deletor));
1179
+ }
1180
+
1181
+ return result;
1182
+ }
1183
+
1184
+ inline int64_t ModelMetadata::GetVersion() const {
1185
+ int64_t out;
1186
+ ThrowOnError(GetApi().ModelMetadataGetVersion(p_, &out));
1187
+ return out;
1188
+ }
1189
+
1190
+ namespace detail {
1191
+
1192
+ template <typename T>
1193
+ inline ONNXTensorElementDataType TensorTypeAndShapeInfoImpl<T>::GetElementType() const {
1194
+ ONNXTensorElementDataType out;
1195
+ ThrowOnError(GetApi().GetTensorElementType(this->p_, &out));
1196
+ return out;
1197
+ }
1198
+
1199
+ template <typename T>
1200
+ inline size_t TensorTypeAndShapeInfoImpl<T>::GetElementCount() const {
1201
+ size_t out;
1202
+ ThrowOnError(GetApi().GetTensorShapeElementCount(this->p_, &out));
1203
+ return static_cast<size_t>(out);
1204
+ }
1205
+
1206
+ template <typename T>
1207
+ inline size_t TensorTypeAndShapeInfoImpl<T>::GetDimensionsCount() const {
1208
+ size_t out;
1209
+ ThrowOnError(GetApi().GetDimensionsCount(this->p_, &out));
1210
+ return out;
1211
+ }
1212
+
1213
+ template <typename T>
1214
+ inline void TensorTypeAndShapeInfoImpl<T>::GetDimensions(int64_t* values, size_t values_count) const {
1215
+ ThrowOnError(GetApi().GetDimensions(this->p_, values, values_count));
1216
+ }
1217
+
1218
+ template <typename T>
1219
+ inline void TensorTypeAndShapeInfoImpl<T>::GetSymbolicDimensions(const char** values, size_t values_count) const {
1220
+ ThrowOnError(GetApi().GetSymbolicDimensions(this->p_, values, values_count));
1221
+ }
1222
+
1223
+ template <typename T>
1224
+ inline std::vector<int64_t> TensorTypeAndShapeInfoImpl<T>::GetShape() const {
1225
+ std::vector<int64_t> out(GetDimensionsCount(), 0);
1226
+ ThrowOnError(GetApi().GetDimensions(this->p_, out.data(), out.size()));
1227
+ return out;
1228
+ }
1229
+
1230
+ template <typename T>
1231
+ inline ConstTensorTypeAndShapeInfo TypeInfoImpl<T>::GetTensorTypeAndShapeInfo() const {
1232
+ const OrtTensorTypeAndShapeInfo* out;
1233
+ ThrowOnError(GetApi().CastTypeInfoToTensorInfo(this->p_, &out));
1234
+ return ConstTensorTypeAndShapeInfo{out};
1235
+ }
1236
+
1237
+ template <typename T>
1238
+ inline ConstSequenceTypeInfo TypeInfoImpl<T>::GetSequenceTypeInfo() const {
1239
+ const OrtSequenceTypeInfo* out;
1240
+ ThrowOnError(GetApi().CastTypeInfoToSequenceTypeInfo(this->p_, &out));
1241
+ return ConstSequenceTypeInfo{out};
1242
+ }
1243
+
1244
+ template <typename T>
1245
+ inline ConstMapTypeInfo TypeInfoImpl<T>::GetMapTypeInfo() const {
1246
+ const OrtMapTypeInfo* out;
1247
+ ThrowOnError(GetApi().CastTypeInfoToMapTypeInfo(this->p_, &out));
1248
+ return ConstMapTypeInfo{out};
1249
+ }
1250
+
1251
+ template <typename T>
1252
+ inline ONNXType TypeInfoImpl<T>::GetONNXType() const {
1253
+ ONNXType out;
1254
+ ThrowOnError(GetApi().GetOnnxTypeFromTypeInfo(this->p_, &out));
1255
+ return out;
1256
+ }
1257
+
1258
+ template <typename T>
1259
+ inline TypeInfo SequenceTypeInfoImpl<T>::GetSequenceElementType() const {
1260
+ OrtTypeInfo* output;
1261
+ ThrowOnError(GetApi().GetSequenceElementType(this->p_, &output));
1262
+ return TypeInfo{output};
1263
+ }
1264
+
1265
+ template <typename T>
1266
+ inline TypeInfo OptionalTypeInfoImpl<T>::GetOptionalElementType() const {
1267
+ OrtTypeInfo* info;
1268
+ ThrowOnError(GetApi().GetOptionalContainedTypeInfo(this->p_, &info));
1269
+ return TypeInfo{info};
1270
+ }
1271
+
1272
+ template <typename T>
1273
+ inline ONNXTensorElementDataType MapTypeInfoImpl<T>::GetMapKeyType() const {
1274
+ ONNXTensorElementDataType out;
1275
+ ThrowOnError(GetApi().GetMapKeyType(this->p_, &out));
1276
+ return out;
1277
+ }
1278
+
1279
+ template <typename T>
1280
+ inline TypeInfo MapTypeInfoImpl<T>::GetMapValueType() const {
1281
+ OrtTypeInfo* output;
1282
+ ThrowOnError(GetApi().GetMapValueType(this->p_, &output));
1283
+ return TypeInfo{output};
1284
+ }
1285
+
1286
+ template <typename T>
1287
+ inline ConstOptionalTypeInfo TypeInfoImpl<T>::GetOptionalTypeInfo() const {
1288
+ const OrtOptionalTypeInfo* info;
1289
+ ThrowOnError(GetApi().CastTypeInfoToOptionalTypeInfo(this->p_, &info));
1290
+ return ConstOptionalTypeInfo{info};
1291
+ }
1292
+
1293
+ } // namespace detail
1294
+
1295
+ namespace detail {
1296
+
1297
+ template <typename T>
1298
+ template <typename R>
1299
+ inline void ConstValueImpl<T>::GetOpaqueData(const char* domain, const char* type_name, R& out) const {
1300
+ ThrowOnError(GetApi().GetOpaqueValue(domain, type_name, this->p_, &out, sizeof(R)));
1301
+ }
1302
+
1303
+ template <typename T>
1304
+ inline bool ConstValueImpl<T>::IsTensor() const {
1305
+ int out;
1306
+ ThrowOnError(GetApi().IsTensor(this->p_, &out));
1307
+ return out != 0;
1308
+ }
1309
+
1310
+ template <typename T>
1311
+ inline bool ConstValueImpl<T>::HasValue() const {
1312
+ int out;
1313
+ ThrowOnError(GetApi().HasValue(this->p_, &out));
1314
+ return out != 0;
1315
+ }
1316
+
1317
+ template <typename T>
1318
+ inline size_t ConstValueImpl<T>::GetCount() const {
1319
+ size_t out;
1320
+ ThrowOnError(GetApi().GetValueCount(this->p_, &out));
1321
+ return out;
1322
+ }
1323
+
1324
+ template <typename T>
1325
+ inline Value ConstValueImpl<T>::GetValue(int index, OrtAllocator* allocator) const {
1326
+ OrtValue* out;
1327
+ ThrowOnError(GetApi().GetValue(this->p_, index, allocator, &out));
1328
+ return Value{out};
1329
+ }
1330
+
1331
+ template <typename T>
1332
+ inline size_t ConstValueImpl<T>::GetStringTensorDataLength() const {
1333
+ size_t out;
1334
+ ThrowOnError(GetApi().GetStringTensorDataLength(this->p_, &out));
1335
+ return out;
1336
+ }
1337
+
1338
+ template <typename T>
1339
+ inline size_t ConstValueImpl<T>::GetStringTensorElementLength(size_t element_index) const {
1340
+ size_t out;
1341
+ ThrowOnError(GetApi().GetStringTensorElementLength(this->p_, element_index, &out));
1342
+ return out;
1343
+ }
1344
+
1345
+ template <typename T>
1346
+ template <typename R>
1347
+ inline const R* ConstValueImpl<T>::GetTensorData() const {
1348
+ R* out;
1349
+ ThrowOnError(GetApi().GetTensorMutableData(const_cast<OrtValue*>(this->p_), (void**)&out));
1350
+ return out;
1351
+ }
1352
+
1353
+ template <typename T>
1354
+ inline const void* ConstValueImpl<T>::GetTensorRawData() const {
1355
+ void* out;
1356
+ ThrowOnError(GetApi().GetTensorMutableData(const_cast<OrtValue*>(this->p_), &out));
1357
+ return out;
1358
+ }
1359
+
1360
+ template <typename T>
1361
+ inline TypeInfo ConstValueImpl<T>::GetTypeInfo() const {
1362
+ OrtTypeInfo* output;
1363
+ ThrowOnError(GetApi().GetTypeInfo(this->p_, &output));
1364
+ return TypeInfo{output};
1365
+ }
1366
+
1367
+ template <typename T>
1368
+ inline TensorTypeAndShapeInfo ConstValueImpl<T>::GetTensorTypeAndShapeInfo() const {
1369
+ OrtTensorTypeAndShapeInfo* output;
1370
+ ThrowOnError(GetApi().GetTensorTypeAndShape(this->p_, &output));
1371
+ return TensorTypeAndShapeInfo{output};
1372
+ }
1373
+
1374
+ template <typename T>
1375
+ inline ConstMemoryInfo ConstValueImpl<T>::GetTensorMemoryInfo() const {
1376
+ const OrtMemoryInfo* mem_info;
1377
+ ThrowOnError(GetApi().GetTensorMemoryInfo(this->p_, &mem_info));
1378
+ return ConstMemoryInfo(mem_info);
1379
+ }
1380
+
1381
+ template <typename T>
1382
+ inline void ConstValueImpl<T>::GetStringTensorElement(size_t buffer_length, size_t element_index, void* buffer) const {
1383
+ ThrowOnError(GetApi().GetStringTensorElement(this->p_, buffer_length, element_index, buffer));
1384
+ }
1385
+
1386
+ template <typename T>
1387
+ inline std::string ConstValueImpl<T>::GetStringTensorElement(size_t element_index) const {
1388
+ size_t buffer_length;
1389
+ ThrowOnError(GetApi().GetStringTensorElementLength(this->p_, element_index, &buffer_length));
1390
+
1391
+ std::string s;
1392
+ s.resize(buffer_length);
1393
+ ThrowOnError(GetApi().GetStringTensorElement(this->p_, buffer_length, element_index, &s[0]));
1394
+ return s;
1395
+ }
1396
+
1397
+ template <typename T>
1398
+ inline void ConstValueImpl<T>::GetStringTensorContent(void* buffer, size_t buffer_length, size_t* offsets, size_t offsets_count) const {
1399
+ ThrowOnError(GetApi().GetStringTensorContent(this->p_, buffer, buffer_length, offsets, offsets_count));
1400
+ }
1401
+
1402
+ #if !defined(DISABLE_SPARSE_TENSORS)
1403
+ template <typename T>
1404
+ inline OrtSparseFormat ConstValueImpl<T>::GetSparseFormat() const {
1405
+ OrtSparseFormat format;
1406
+ ThrowOnError(GetApi().GetSparseTensorFormat(this->p_, &format));
1407
+ return format;
1408
+ }
1409
+
1410
+ template <typename T>
1411
+ inline TensorTypeAndShapeInfo ConstValueImpl<T>::GetSparseTensorValuesTypeAndShapeInfo() const {
1412
+ OrtTensorTypeAndShapeInfo* output;
1413
+ ThrowOnError(GetApi().GetSparseTensorValuesTypeAndShape(this->p_, &output));
1414
+ return TensorTypeAndShapeInfo{output};
1415
+ }
1416
+
1417
+ template <typename T>
1418
+ inline TensorTypeAndShapeInfo ConstValueImpl<T>::GetSparseTensorIndicesTypeShapeInfo(OrtSparseIndicesFormat indices_format) const {
1419
+ OrtTensorTypeAndShapeInfo* output;
1420
+ ThrowOnError(GetApi().GetSparseTensorIndicesTypeShape(this->p_, indices_format, &output));
1421
+ return TensorTypeAndShapeInfo{output};
1422
+ }
1423
+
1424
+ template <typename T>
1425
+ template <typename R>
1426
+ inline const R* ConstValueImpl<T>::GetSparseTensorIndicesData(OrtSparseIndicesFormat indices_format, size_t& num_indices) const {
1427
+ const void* out;
1428
+ ThrowOnError(GetApi().GetSparseTensorIndices(this->p_, indices_format, &num_indices, &out));
1429
+ return reinterpret_cast<const R*>(out);
1430
+ }
1431
+
1432
+ template <typename T>
1433
+ inline bool ConstValueImpl<T>::IsSparseTensor() const {
1434
+ int out;
1435
+ ThrowOnError(GetApi().IsSparseTensor(this->p_, &out));
1436
+ return out != 0;
1437
+ }
1438
+
1439
+ template <typename T>
1440
+ template <typename R>
1441
+ inline const R* ConstValueImpl<T>::GetSparseTensorValues() const {
1442
+ const void* out;
1443
+ ThrowOnError(GetApi().GetSparseTensorValues(this->p_, &out));
1444
+ return reinterpret_cast<const R*>(out);
1445
+ }
1446
+
1447
+ #endif
1448
+
1449
+ template <typename T>
1450
+ void ValueImpl<T>::FillStringTensor(const char* const* s, size_t s_len) {
1451
+ ThrowOnError(GetApi().FillStringTensor(this->p_, s, s_len));
1452
+ }
1453
+
1454
+ template <typename T>
1455
+ void ValueImpl<T>::FillStringTensorElement(const char* s, size_t index) {
1456
+ ThrowOnError(GetApi().FillStringTensorElement(this->p_, s, index));
1457
+ }
1458
+
1459
+ template <typename T>
1460
+ inline char* ValueImpl<T>::GetResizedStringTensorElementBuffer(size_t index, size_t buffer_length) {
1461
+ char* result;
1462
+ ThrowOnError(GetApi().GetResizedStringTensorElementBuffer(this->p_, index, buffer_length, &result));
1463
+ return result;
1464
+ }
1465
+
1466
+ template <typename T>
1467
+ void* ValueImpl<T>::GetTensorMutableRawData() {
1468
+ void* out;
1469
+ ThrowOnError(GetApi().GetTensorMutableData(this->p_, &out));
1470
+ return out;
1471
+ }
1472
+
1473
+ template <typename T>
1474
+ template <typename R>
1475
+ R* ValueImpl<T>::GetTensorMutableData() {
1476
+ R* out;
1477
+ ThrowOnError(GetApi().GetTensorMutableData(this->p_, (void**)&out));
1478
+ return out;
1479
+ }
1480
+
1481
+ template <typename T>
1482
+ template <typename R>
1483
+ R& ValueImpl<T>::At(const std::vector<int64_t>& location) {
1484
+ static_assert(!std::is_same<T, std::string>::value, "this api does not support std::string");
1485
+ R* out;
1486
+ ThrowOnError(GetApi().TensorAt(this->p_, location.data(), location.size(), (void**)&out));
1487
+ return *out;
1488
+ }
1489
+
1490
+ #if !defined(DISABLE_SPARSE_TENSORS)
1491
+ template <typename T>
1492
+ void ValueImpl<T>::UseCooIndices(int64_t* indices_data, size_t indices_num) {
1493
+ ThrowOnError(GetApi().UseCooIndices(this->p_, indices_data, indices_num));
1494
+ }
1495
+
1496
+ template <typename T>
1497
+ void ValueImpl<T>::UseCsrIndices(int64_t* inner_data, size_t inner_num, int64_t* outer_data, size_t outer_num) {
1498
+ ThrowOnError(GetApi().UseCsrIndices(this->p_, inner_data, inner_num, outer_data, outer_num));
1499
+ }
1500
+
1501
+ template <typename T>
1502
+ void ValueImpl<T>::UseBlockSparseIndices(const Shape& indices_shape, int32_t* indices_data) {
1503
+ ThrowOnError(GetApi().UseBlockSparseIndices(this->p_, indices_shape.shape, indices_shape.shape_len, indices_data));
1504
+ }
1505
+
1506
+ template <typename T>
1507
+ void ValueImpl<T>::FillSparseTensorCoo(const OrtMemoryInfo* mem_info, const OrtSparseValuesParam& values_param,
1508
+ const int64_t* indices_data, size_t indices_num) {
1509
+ ThrowOnError(GetApi().FillSparseTensorCoo(this->p_, mem_info, values_param.values_shape,
1510
+ values_param.values_shape_len, values_param.data.p_data,
1511
+ indices_data, indices_num));
1512
+ }
1513
+
1514
+ template <typename T>
1515
+ void ValueImpl<T>::FillSparseTensorCsr(const OrtMemoryInfo* data_mem_info,
1516
+ const OrtSparseValuesParam& values,
1517
+ const int64_t* inner_indices_data, size_t inner_indices_num,
1518
+ const int64_t* outer_indices_data, size_t outer_indices_num) {
1519
+ ThrowOnError(GetApi().FillSparseTensorCsr(this->p_, data_mem_info, values.values_shape, values.values_shape_len, values.data.p_data,
1520
+ inner_indices_data, inner_indices_num,
1521
+ outer_indices_data, outer_indices_num));
1522
+ }
1523
+
1524
+ template <typename T>
1525
+ void ValueImpl<T>::FillSparseTensorBlockSparse(const OrtMemoryInfo* data_mem_info,
1526
+ const OrtSparseValuesParam& values,
1527
+ const Shape& indices_shape,
1528
+ const int32_t* indices_data) {
1529
+ ThrowOnError(GetApi().FillSparseTensorBlockSparse(this->p_, data_mem_info, values.values_shape, values.values_shape_len, values.data.p_data,
1530
+ indices_shape.shape, indices_shape.shape_len,
1531
+ indices_data));
1532
+ }
1533
+
1534
+ #endif // !defined(DISABLE_SPARSE_TENSORS)
1535
+
1536
+ } // namespace detail
1537
+
1538
+ template <typename T>
1539
+ inline Value Value::CreateTensor(const OrtMemoryInfo* info, T* p_data, size_t p_data_element_count, const int64_t* shape, size_t shape_len) {
1540
+ return CreateTensor(info, p_data, p_data_element_count * sizeof(T), shape, shape_len, TypeToTensorType<T>::type);
1541
+ }
1542
+
1543
+ inline Value Value::CreateTensor(const OrtMemoryInfo* info, void* p_data, size_t p_data_byte_count, const int64_t* shape, size_t shape_len,
1544
+ ONNXTensorElementDataType type) {
1545
+ OrtValue* out;
1546
+ ThrowOnError(GetApi().CreateTensorWithDataAsOrtValue(info, p_data, p_data_byte_count, shape, shape_len, type, &out));
1547
+ return Value{out};
1548
+ }
1549
+
1550
+ template <typename T>
1551
+ inline Value Value::CreateTensor(OrtAllocator* allocator, const int64_t* shape, size_t shape_len) {
1552
+ return CreateTensor(allocator, shape, shape_len, TypeToTensorType<T>::type);
1553
+ }
1554
+
1555
+ inline Value Value::CreateTensor(OrtAllocator* allocator, const int64_t* shape, size_t shape_len, ONNXTensorElementDataType type) {
1556
+ OrtValue* out;
1557
+ ThrowOnError(GetApi().CreateTensorAsOrtValue(allocator, shape, shape_len, type, &out));
1558
+ return Value{out};
1559
+ }
1560
+
1561
+ #if !defined(DISABLE_SPARSE_TENSORS)
1562
+
1563
+ template <typename T>
1564
+ inline Value Value::CreateSparseTensor(const OrtMemoryInfo* info, T* p_data, const Shape& dense_shape,
1565
+ const Shape& values_shape) {
1566
+ return CreateSparseTensor(info, p_data, dense_shape, values_shape, TypeToTensorType<T>::type);
1567
+ }
1568
+
1569
+ inline Value Value::CreateSparseTensor(const OrtMemoryInfo* info, void* p_data, const Shape& dense_shape,
1570
+ const Shape& values_shape, ONNXTensorElementDataType type) {
1571
+ OrtValue* out;
1572
+ ThrowOnError(GetApi().CreateSparseTensorWithValuesAsOrtValue(info, p_data, dense_shape.shape, dense_shape.shape_len,
1573
+ values_shape.shape, values_shape.shape_len, type, &out));
1574
+ return Value{out};
1575
+ }
1576
+
1577
+ template <typename T>
1578
+ inline Value Value::CreateSparseTensor(OrtAllocator* allocator, const Shape& dense_shape) {
1579
+ return CreateSparseTensor(allocator, dense_shape, TypeToTensorType<T>::type);
1580
+ }
1581
+
1582
+ inline Value Value::CreateSparseTensor(OrtAllocator* allocator, const Shape& dense_shape,
1583
+ ONNXTensorElementDataType type) {
1584
+ OrtValue* out;
1585
+ ThrowOnError(GetApi().CreateSparseTensorAsOrtValue(allocator, dense_shape.shape, dense_shape.shape_len, type, &out));
1586
+ return Value{out};
1587
+ }
1588
+ #endif // !defined(DISABLE_SPARSE_TENSORS)
1589
+
1590
+ inline Value Value::CreateMap(const Value& keys, const Value& values) {
1591
+ OrtValue* out;
1592
+ const OrtValue* inputs[2] = {keys, values};
1593
+ ThrowOnError(GetApi().CreateValue(inputs, 2, ONNX_TYPE_MAP, &out));
1594
+ return Value{out};
1595
+ }
1596
+
1597
+ inline Value Value::CreateSequence(const std::vector<Value>& values) {
1598
+ OrtValue* out;
1599
+ std::vector<const OrtValue*> values_ort{values.data(), values.data() + values.size()};
1600
+ ThrowOnError(GetApi().CreateValue(values_ort.data(), values_ort.size(), ONNX_TYPE_SEQUENCE, &out));
1601
+ return Value{out};
1602
+ }
1603
+
1604
+ template <typename T>
1605
+ inline Value Value::CreateOpaque(const char* domain, const char* type_name, const T& data_container) {
1606
+ OrtValue* out;
1607
+ ThrowOnError(GetApi().CreateOpaqueValue(domain, type_name, &data_container, sizeof(T), &out));
1608
+ return Value{out};
1609
+ }
1610
+
1611
+ //
1612
+ // Custom OP Inlines
1613
+ //
1614
+ inline Logger::Logger(const OrtLogger* logger) : logger_(logger) {
1615
+ Ort::ThrowOnError(GetApi().Logger_GetLoggingSeverityLevel(this->logger_, &this->cached_severity_level_));
1616
+ }
1617
+
1618
+ inline OrtLoggingLevel Logger::GetLoggingSeverityLevel() const noexcept {
1619
+ return cached_severity_level_;
1620
+ }
1621
+
1622
+ inline Status Logger::LogMessage(OrtLoggingLevel log_severity_level, const ORTCHAR_T* file_path, int line_number,
1623
+ const char* func_name, const char* message) const noexcept {
1624
+ OrtStatus* status = GetApi().Logger_LogMessage(logger_, log_severity_level, message, file_path, line_number,
1625
+ func_name);
1626
+ return Status{status};
1627
+ }
1628
+
1629
+ // Disable warnings about the format string not being a literal (-Wformat-nonliteral and -Wformat-security)
1630
+ // for gcc and clang. The alternative is to use actual C-style variadic parameters and apply
1631
+ // __attribute__(format(printf...)), which does not work with variadic templates.
1632
+ #if defined(__GNUC__)
1633
+ #pragma GCC diagnostic push
1634
+ #pragma GCC diagnostic ignored "-Wformat-nonliteral"
1635
+ #pragma GCC diagnostic ignored "-Wformat-security"
1636
+ #elif defined(__clang__)
1637
+ #pragma clang diagnostic push
1638
+ #pragma clang diagnostic ignored "-Wformat-nonliteral"
1639
+ #pragma clang diagnostic ignored "-Wformat-security"
1640
+ #endif
1641
+ template <typename... Args>
1642
+ inline Status Logger::LogFormattedMessage(OrtLoggingLevel log_severity_level, const ORTCHAR_T* file_path,
1643
+ int line_number, const char* func_name, const char* format,
1644
+ Args&&... args) const noexcept {
1645
+ int msg_len = std::snprintf(nullptr, 0U, format, std::forward<Args>(args)...);
1646
+
1647
+ if (msg_len < 0) { // Formatting error
1648
+ return Status("Failed to log message due to formatting error", OrtErrorCode::ORT_FAIL);
1649
+ }
1650
+
1651
+ OrtStatus* status = nullptr;
1652
+ const size_t buffer_size = static_cast<size_t>(msg_len) + 1U;
1653
+
1654
+ constexpr size_t kStackBufferSize = 1024;
1655
+
1656
+ if (buffer_size < kStackBufferSize) {
1657
+ char buffer[kStackBufferSize];
1658
+ snprintf(buffer, kStackBufferSize, format, std::forward<Args>(args)...);
1659
+ status = GetApi().Logger_LogMessage(logger_, log_severity_level, buffer, file_path, line_number, func_name);
1660
+ } else {
1661
+ // std::make_unique is only supported starting at C++14.
1662
+ #if (__cplusplus >= 201402L) || (_MSC_VER >= 1900)
1663
+ auto buffer = std::make_unique<char[]>(buffer_size);
1664
+ #else
1665
+ std::unique_ptr<char[]> buffer(new char[buffer_size]);
1666
+ #endif
1667
+ std::snprintf(buffer.get(), buffer_size, format, std::forward<Args>(args)...);
1668
+ status = GetApi().Logger_LogMessage(logger_, log_severity_level, buffer.get(), file_path, line_number, func_name);
1669
+ }
1670
+
1671
+ return Status{status};
1672
+ }
1673
+ // Re-enable -Wformat-nonliteral and -Wformat-security
1674
+ #if defined(__GNUC__)
1675
+ #pragma GCC diagnostic pop
1676
+ #elif defined(__clang__)
1677
+ #pragma clang diagnostic pop
1678
+ #endif
1679
+
1680
+ inline KernelContext::KernelContext(OrtKernelContext* context) : ctx_(context) {
1681
+ }
1682
+
1683
+ inline size_t KernelContext::GetInputCount() const {
1684
+ size_t out = 0;
1685
+ Ort::ThrowOnError(GetApi().KernelContext_GetInputCount(ctx_, &out));
1686
+ return out;
1687
+ }
1688
+
1689
+ inline size_t KernelContext::GetOutputCount() const {
1690
+ size_t out = 0;
1691
+ Ort::ThrowOnError(GetApi().KernelContext_GetOutputCount(ctx_, &out));
1692
+ return out;
1693
+ }
1694
+
1695
+ inline ConstValue KernelContext::GetInput(size_t index) const {
1696
+ const OrtValue* out = nullptr;
1697
+ Ort::ThrowOnError(GetApi().KernelContext_GetInput(ctx_, index, &out));
1698
+ return ConstValue{out};
1699
+ }
1700
+
1701
+ inline UnownedValue KernelContext::GetOutput(size_t index, const int64_t* dim_values, size_t dim_count) const {
1702
+ OrtValue* out = nullptr;
1703
+ Ort::ThrowOnError(GetApi().KernelContext_GetOutput(ctx_, index, dim_values, dim_count, &out));
1704
+ return UnownedValue(out);
1705
+ }
1706
+
1707
+ inline UnownedValue KernelContext::GetOutput(size_t index, const std::vector<int64_t>& dims) const {
1708
+ OrtValue* out = nullptr;
1709
+ Ort::ThrowOnError(GetApi().KernelContext_GetOutput(ctx_, index, dims.data(), dims.size(), &out));
1710
+ return UnownedValue(out);
1711
+ }
1712
+
1713
+ inline void* KernelContext::GetGPUComputeStream() const {
1714
+ void* out = nullptr;
1715
+ Ort::ThrowOnError(GetApi().KernelContext_GetGPUComputeStream(ctx_, &out));
1716
+ return out;
1717
+ }
1718
+
1719
+ inline OrtAllocator* KernelContext::GetAllocator(const OrtMemoryInfo& memory_info) const {
1720
+ OrtAllocator* out = nullptr;
1721
+ Ort::ThrowOnError(GetApi().KernelContext_GetAllocator(ctx_, &memory_info, &out));
1722
+ return out;
1723
+ }
1724
+
1725
+ inline Logger KernelContext::GetLogger() const {
1726
+ const OrtLogger* out = nullptr;
1727
+ ThrowOnError(GetApi().KernelContext_GetLogger(this->ctx_, &out));
1728
+ return Logger{out};
1729
+ }
1730
+
1731
+ inline void KernelContext::ParallelFor(void (*fn)(void*, size_t), size_t total, size_t num_batch, void* usr_data) const {
1732
+ ThrowOnError(GetApi().KernelContext_ParallelFor(ctx_, fn, total, num_batch, usr_data));
1733
+ }
1734
+
1735
+ inline OpAttr::OpAttr(const char* name, const void* data, int len, OrtOpAttrType type) {
1736
+ Ort::ThrowOnError(GetApi().CreateOpAttr(name, data, len, type, &p_));
1737
+ }
1738
+
1739
+ namespace detail {
1740
+ template <typename T>
1741
+ inline KernelInfo KernelInfoImpl<T>::Copy() const {
1742
+ OrtKernelInfo* info_copy = nullptr;
1743
+ Ort::ThrowOnError(GetApi().CopyKernelInfo(this->p_, &info_copy));
1744
+ return KernelInfo{info_copy};
1745
+ }
1746
+
1747
+ template <typename T>
1748
+ inline size_t KernelInfoImpl<T>::GetInputCount() const {
1749
+ size_t out = 0;
1750
+ ThrowOnError(GetApi().KernelInfo_GetInputCount(this->p_, &out));
1751
+ return out;
1752
+ }
1753
+
1754
+ template <typename T>
1755
+ inline size_t KernelInfoImpl<T>::GetOutputCount() const {
1756
+ size_t out = 0;
1757
+ ThrowOnError(GetApi().KernelInfo_GetOutputCount(this->p_, &out));
1758
+ return out;
1759
+ }
1760
+
1761
+ template <typename T>
1762
+ inline std::string KernelInfoImpl<T>::GetInputName(size_t index) const {
1763
+ size_t size = 0;
1764
+
1765
+ // Feed nullptr for the data buffer to query the true size of the string value
1766
+ Ort::ThrowOnError(GetApi().KernelInfo_GetInputName(this->p_, index, nullptr, &size));
1767
+
1768
+ std::string out;
1769
+ out.resize(size);
1770
+ Ort::ThrowOnError(GetApi().KernelInfo_GetInputName(this->p_, index, &out[0], &size));
1771
+ out.resize(size - 1); // remove the terminating character '\0'
1772
+
1773
+ return out;
1774
+ }
1775
+
1776
+ template <typename T>
1777
+ inline std::string KernelInfoImpl<T>::GetOutputName(size_t index) const {
1778
+ size_t size = 0;
1779
+
1780
+ // Feed nullptr for the data buffer to query the true size of the string value
1781
+ Ort::ThrowOnError(GetApi().KernelInfo_GetOutputName(this->p_, index, nullptr, &size));
1782
+
1783
+ std::string out;
1784
+ out.resize(size);
1785
+ Ort::ThrowOnError(GetApi().KernelInfo_GetOutputName(this->p_, index, &out[0], &size));
1786
+ out.resize(size - 1); // remove the terminating character '\0'
1787
+
1788
+ return out;
1789
+ }
1790
+
1791
+ template <typename T>
1792
+ inline TypeInfo KernelInfoImpl<T>::GetInputTypeInfo(size_t index) const {
1793
+ OrtTypeInfo* out = nullptr;
1794
+ ThrowOnError(GetApi().KernelInfo_GetInputTypeInfo(this->p_, index, &out));
1795
+ return TypeInfo{out};
1796
+ }
1797
+
1798
+ template <typename T>
1799
+ inline TypeInfo KernelInfoImpl<T>::GetOutputTypeInfo(size_t index) const {
1800
+ OrtTypeInfo* out = nullptr;
1801
+ ThrowOnError(GetApi().KernelInfo_GetOutputTypeInfo(this->p_, index, &out));
1802
+ return TypeInfo{out};
1803
+ }
1804
+
1805
+ template <typename T>
1806
+ inline Value KernelInfoImpl<T>::GetTensorAttribute(const char* name, OrtAllocator* allocator) const {
1807
+ OrtValue* out = nullptr;
1808
+ ThrowOnError(GetApi().KernelInfoGetAttribute_tensor(this->p_, name, allocator, &out));
1809
+ return Value{out};
1810
+ }
1811
+
1812
+ template <typename T>
1813
+ inline ConstValue KernelInfoImpl<T>::GetTensorConstantInput(size_t index, int* is_constant) const {
1814
+ const OrtValue* out = nullptr;
1815
+ ThrowOnError(GetApi().KernelInfoGetConstantInput_tensor(this->p_, index, is_constant, &out));
1816
+ return ConstValue{out};
1817
+ }
1818
+
1819
+ template <typename T>
1820
+ inline std::string KernelInfoImpl<T>::GetNodeName() const {
1821
+ size_t size = 0;
1822
+
1823
+ // Feed nullptr for the data buffer to query the true size of the string value
1824
+ Ort::ThrowOnError(GetApi().KernelInfo_GetNodeName(this->p_, nullptr, &size));
1825
+
1826
+ std::string out;
1827
+ out.resize(size);
1828
+ Ort::ThrowOnError(GetApi().KernelInfo_GetNodeName(this->p_, &out[0], &size));
1829
+ out.resize(size - 1); // remove the terminating character '\0'
1830
+
1831
+ return out;
1832
+ }
1833
+
1834
+ template <typename T>
1835
+ inline Logger KernelInfoImpl<T>::GetLogger() const {
1836
+ const OrtLogger* out = nullptr;
1837
+ ThrowOnError(GetApi().KernelInfo_GetLogger(this->p_, &out));
1838
+ return Logger{out};
1839
+ }
1840
+
1841
+ inline void attr_utils::GetAttr(const OrtKernelInfo* p, const char* name, float& out) {
1842
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttribute_float(p, name, &out));
1843
+ }
1844
+
1845
+ inline void attr_utils::GetAttr(const OrtKernelInfo* p, const char* name, int64_t& out) {
1846
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttribute_int64(p, name, &out));
1847
+ }
1848
+
1849
+ inline void attr_utils::GetAttr(const OrtKernelInfo* p, const char* name, std::string& result) {
1850
+ size_t size = 0;
1851
+ // Feed nullptr for the data buffer to query the true size of the string attribute
1852
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttribute_string(p, name, nullptr, &size));
1853
+
1854
+ std::string out;
1855
+ out.resize(size);
1856
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttribute_string(p, name, &out[0], &size));
1857
+ out.resize(size - 1); // remove the terminating character '\0'
1858
+ out.swap(result);
1859
+ }
1860
+
1861
+ inline void attr_utils::GetAttrs(const OrtKernelInfo* p, const char* name, std::vector<float>& result) {
1862
+ size_t size = 0;
1863
+ // Feed nullptr for the data buffer to query the true size of the attribute
1864
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttributeArray_float(p, name, nullptr, &size));
1865
+
1866
+ std::vector<float> out;
1867
+ out.resize(size);
1868
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttributeArray_float(p, name, out.data(), &size));
1869
+ out.swap(result);
1870
+ }
1871
+
1872
+ inline void attr_utils::GetAttrs(const OrtKernelInfo* p, const char* name, std::vector<int64_t>& result) {
1873
+ size_t size = 0;
1874
+
1875
+ // Feed nullptr for the data buffer to query the true size of the attribute
1876
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttributeArray_int64(p, name, nullptr, &size));
1877
+
1878
+ std::vector<int64_t> out;
1879
+ out.resize(size);
1880
+ Ort::ThrowOnError(GetApi().KernelInfoGetAttributeArray_int64(p, name, out.data(), &size));
1881
+ out.swap(result);
1882
+ }
1883
+ } // namespace detail
1884
+
1885
+ inline KernelInfo::KernelInfo(OrtKernelInfo* info) : detail::KernelInfoImpl<OrtKernelInfo>{info} {}
1886
+
1887
+ inline Op::Op(OrtOp* p) : Base<OrtOp>(p) {}
1888
+
1889
+ inline Op Op::Create(const OrtKernelInfo* info, const char* op_name, const char* domain, int version,
1890
+ const char** type_constraint_names,
1891
+ const ONNXTensorElementDataType* type_constraint_values,
1892
+ size_t type_constraint_count,
1893
+ const OpAttr* attr_values, size_t attr_count,
1894
+ size_t input_count, size_t output_count) {
1895
+ static_assert(sizeof(OpAttr) == sizeof(OrtOpAttr*),
1896
+ "OpAttr's is expected to be just an array of OrtOpAttr in memory so we can reinterpret safely");
1897
+ auto attr_input_values = reinterpret_cast<const OrtOpAttr* const*>(attr_values);
1898
+ OrtOp* op;
1899
+ Ort::ThrowOnError(GetApi().CreateOp(info, op_name, domain, version, type_constraint_names, type_constraint_values,
1900
+ static_cast<int>(type_constraint_count),
1901
+ attr_input_values,
1902
+ static_cast<int>(attr_count),
1903
+ static_cast<int>(input_count),
1904
+ static_cast<int>(output_count), &op));
1905
+ return Op{op};
1906
+ }
1907
+
1908
+ inline void Op::Invoke(const OrtKernelContext* context,
1909
+ const Value* input_values,
1910
+ size_t input_count,
1911
+ Value* output_values,
1912
+ size_t output_count) {
1913
+ static_assert(sizeof(Value) == sizeof(OrtValue*),
1914
+ "Value is really just an array of OrtValue* in memory, so we can reinterpret_cast safely");
1915
+ auto ort_input_values = reinterpret_cast<const OrtValue* const*>(input_values);
1916
+ auto ort_output_values = reinterpret_cast<OrtValue**>(output_values);
1917
+ Ort::ThrowOnError(GetApi().InvokeOp(context, p_, ort_input_values, static_cast<int>(input_count),
1918
+ ort_output_values, static_cast<int>(output_count)));
1919
+ }
1920
+
1921
+ inline void Op::Invoke(const OrtKernelContext* context,
1922
+ const OrtValue* const* input_values,
1923
+ size_t input_count,
1924
+ OrtValue* const* output_values,
1925
+ size_t output_count) {
1926
+ Ort::ThrowOnError(GetApi().InvokeOp(context, p_, input_values, static_cast<int>(input_count),
1927
+ output_values, static_cast<int>(output_count)));
1928
+ }
1929
+
1930
+ inline std::string GetVersionString() {
1931
+ return OrtGetApiBase()->GetVersionString();
1932
+ }
1933
+
1934
+ inline std::string GetBuildInfoString() {
1935
+ return GetApi().GetBuildInfoString();
1936
+ }
1937
+
1938
+ inline std::vector<std::string> GetAvailableProviders() {
1939
+ char** providers;
1940
+ int len;
1941
+
1942
+ auto release_fn = [&len](char** providers) {
1943
+ // This should always return nullptr.
1944
+ ThrowOnError(GetApi().ReleaseAvailableProviders(providers, len));
1945
+ };
1946
+
1947
+ ThrowOnError(GetApi().GetAvailableProviders(&providers, &len));
1948
+ std::unique_ptr<char*, decltype(release_fn)> guard(providers, release_fn);
1949
+ std::vector<std::string> available_providers;
1950
+ available_providers.reserve(static_cast<size_t>(len));
1951
+ for (int i = 0; i < len; ++i) {
1952
+ available_providers.emplace_back(providers[i]);
1953
+ }
1954
+ return available_providers;
1955
+ }
1956
+
1957
+ template <typename TOp, typename TKernel, bool WithStatus>
1958
+ void CustomOpBase<TOp, TKernel, WithStatus>::GetSessionConfigs(std::unordered_map<std::string, std::string>& out,
1959
+ ConstSessionOptions options) const {
1960
+ const TOp* derived = static_cast<const TOp*>(this);
1961
+ std::vector<std::string> keys = derived->GetSessionConfigKeys();
1962
+
1963
+ out.reserve(keys.size());
1964
+
1965
+ std::string config_entry_key = detail::MakeCustomOpConfigEntryKey(derived->GetName(), "");
1966
+ const size_t prefix_size = config_entry_key.length();
1967
+
1968
+ for (const auto& key : keys) {
1969
+ config_entry_key.resize(prefix_size);
1970
+ config_entry_key.append(key);
1971
+ out[key] = options.GetConfigEntryOrDefault(config_entry_key.c_str(), "");
1972
+ }
1973
+ }
1974
+
1975
+ inline ShapeInferContext::ShapeInferContext(const OrtApi* ort_api,
1976
+ OrtShapeInferContext* ctx) : ort_api_(ort_api), ctx_(ctx) {
1977
+ size_t input_count = 0;
1978
+ Ort::ThrowOnError(ort_api_->ShapeInferContext_GetInputCount(ctx_, &input_count));
1979
+ for (size_t ith_input = 0; ith_input < input_count; ++ith_input) {
1980
+ OrtTensorTypeAndShapeInfo* info{};
1981
+ Ort::ThrowOnError(ort_api_->ShapeInferContext_GetInputTypeShape(ctx, ith_input, &info));
1982
+ TensorTypeAndShapeInfo type_shape_info(info);
1983
+ auto integer_shape = type_shape_info.GetShape();
1984
+ std::vector<const char*> symbolic_shape(integer_shape.size(), {});
1985
+ type_shape_info.GetSymbolicDimensions(&symbolic_shape[0], integer_shape.size());
1986
+ Shape shape;
1987
+ for (size_t ith = 0; ith < integer_shape.size(); ++ith) {
1988
+ if (symbolic_shape[ith] && std::string{symbolic_shape[ith]}.size() > 0) {
1989
+ shape.emplace_back(symbolic_shape[ith]);
1990
+ } else {
1991
+ shape.emplace_back(integer_shape[ith]);
1992
+ }
1993
+ }
1994
+ input_shapes_.push_back(std::move(shape));
1995
+ type_shape_info.release();
1996
+ }
1997
+ }
1998
+
1999
+ inline Status ShapeInferContext::SetOutputShape(size_t indice, const Shape& shape) {
2000
+ OrtTensorTypeAndShapeInfo* info = {};
2001
+ ORT_CXX_RETURN_ON_API_FAIL(ort_api_->CreateTensorTypeAndShapeInfo(&info));
2002
+
2003
+ using InfoPtr = std::unique_ptr<OrtTensorTypeAndShapeInfo, std::function<void(OrtTensorTypeAndShapeInfo*)>>;
2004
+
2005
+ InfoPtr info_ptr(info, [this](OrtTensorTypeAndShapeInfo* obj) {
2006
+ ort_api_->ReleaseTensorTypeAndShapeInfo(obj);
2007
+ });
2008
+
2009
+ std::vector<int64_t> integer_dims;
2010
+ std::vector<const char*> symbolic_dims;
2011
+
2012
+ for (const auto dim : shape) {
2013
+ if (dim.IsInt()) {
2014
+ integer_dims.push_back(dim.IsInt());
2015
+ symbolic_dims.push_back("");
2016
+ } else {
2017
+ if (!dim.AsSym() || std::string{dim.AsSym()}.empty()) {
2018
+ ORT_CXX_API_THROW("Symbolic dim must not be an empty string", ORT_INVALID_ARGUMENT);
2019
+ }
2020
+ integer_dims.push_back(SymbolicInteger::INVALID_INT_DIM);
2021
+ symbolic_dims.push_back(dim.AsSym());
2022
+ }
2023
+ }
2024
+
2025
+ ORT_CXX_RETURN_ON_API_FAIL(ort_api_->SetDimensions(info, integer_dims.data(), integer_dims.size()));
2026
+ ORT_CXX_RETURN_ON_API_FAIL(ort_api_->SetSymbolicDimensions(info, symbolic_dims.data(), symbolic_dims.size()));
2027
+ ORT_CXX_RETURN_ON_API_FAIL(ort_api_->ShapeInferContext_SetOutputTypeShape(ctx_, indice, info));
2028
+ return Status{nullptr};
2029
+ }
2030
+
2031
+ inline int64_t ShapeInferContext::GetAttrInt(const char* attr_name) {
2032
+ const auto* attr = GetAttrHdl(attr_name);
2033
+ int64_t i = {};
2034
+ size_t out = {};
2035
+ Ort::ThrowOnError(ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_INT, &i, sizeof(i), &out));
2036
+ return i;
2037
+ }
2038
+
2039
+ inline ShapeInferContext::Ints ShapeInferContext::GetAttrInts(const char* attr_name) {
2040
+ const auto* attr = GetAttrHdl(attr_name);
2041
+ int64_t i = {};
2042
+ size_t out = {};
2043
+ // first call to get the bytes needed
2044
+ auto status = ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_INTS, &i, sizeof(i), &out);
2045
+ if (status) {
2046
+ size_t num_i = out / sizeof(int64_t);
2047
+ ShapeInferContext::Ints ints(num_i, 0);
2048
+ Ort::ThrowOnError(ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_INTS, ints.data(), out, &out));
2049
+ return ints;
2050
+ } else {
2051
+ return {i};
2052
+ }
2053
+ }
2054
+
2055
+ inline float ShapeInferContext::GetAttrFloat(const char* attr_name) {
2056
+ const auto* attr = GetAttrHdl(attr_name);
2057
+ float f = {};
2058
+ size_t out = {};
2059
+ Ort::ThrowOnError(ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_FLOAT, &f, sizeof(f), &out));
2060
+ return f;
2061
+ }
2062
+
2063
+ inline ShapeInferContext::Floats ShapeInferContext::GetAttrFloats(const char* attr_name) {
2064
+ const auto* attr = GetAttrHdl(attr_name);
2065
+ float f = {};
2066
+ size_t out = {};
2067
+ // first call to get the bytes needed
2068
+ auto status = ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_FLOATS, &f, sizeof(f), &out);
2069
+ if (status) {
2070
+ size_t num_f = out / sizeof(float);
2071
+ ShapeInferContext::Floats floats(num_f, 0);
2072
+ Ort::ThrowOnError(ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_FLOATS, floats.data(), out, &out));
2073
+ return floats;
2074
+ } else {
2075
+ return {f};
2076
+ }
2077
+ }
2078
+
2079
+ inline std::string ShapeInferContext::GetAttrString(const char* attr_name) {
2080
+ const auto* attr = GetAttrHdl(attr_name);
2081
+ char c = {};
2082
+ size_t out = {};
2083
+ // first call to get the bytes needed
2084
+ auto status = ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_STRING, &c, sizeof(char), &out);
2085
+ if (status) {
2086
+ std::vector<char> chars(out, '\0');
2087
+ Ort::ThrowOnError(ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_STRING, chars.data(), out, &out));
2088
+ return {chars.data()};
2089
+ } else {
2090
+ return {c};
2091
+ }
2092
+ }
2093
+
2094
+ inline ShapeInferContext::Strings ShapeInferContext::GetAttrStrings(const char* attr_name) {
2095
+ const auto* attr = GetAttrHdl(attr_name);
2096
+ char c = {};
2097
+ size_t out = {};
2098
+ // first call to get the bytes needed
2099
+ auto status = ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_STRINGS, &c, sizeof(char), &out);
2100
+ if (status) {
2101
+ std::vector<char> chars(out, '\0');
2102
+ Ort::ThrowOnError(ort_api_->ReadOpAttr(attr, ORT_OP_ATTR_STRINGS, chars.data(), out, &out));
2103
+ ShapeInferContext::Strings strings;
2104
+ char* char_st = chars.data();
2105
+ char* char_ed = char_st + out;
2106
+ while (char_st < char_ed) {
2107
+ strings.emplace_back(char_st);
2108
+ while (*char_st != '\0') {
2109
+ char_st++;
2110
+ }
2111
+ char_st++;
2112
+ }
2113
+ return strings;
2114
+ } else {
2115
+ return {std::string{c}};
2116
+ }
2117
+ }
2118
+
2119
+ inline const OrtOpAttr* ShapeInferContext::GetAttrHdl(const char* attr_name) const {
2120
+ const OrtOpAttr* attr_hdl = {};
2121
+ Ort::ThrowOnError(ort_api_->ShapeInferContext_GetAttribute(ctx_, attr_name, &attr_hdl));
2122
+ return attr_hdl;
2123
+ }
2124
+
2125
+ } // namespace Ort
1.18.1/onnxruntime.xcframework/Headers/onnxruntime_float16.h ADDED
@@ -0,0 +1,540 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ #pragma once
5
+
6
+ #include <stdint.h>
7
+ #include <cmath>
8
+ #include <cstring>
9
+ #include <limits>
10
+
11
+ namespace onnxruntime_float16 {
12
+
13
+ namespace detail {
14
+
15
+ enum class endian {
16
+ #if defined(_WIN32)
17
+ little = 0,
18
+ big = 1,
19
+ native = little,
20
+ #elif defined(__GNUC__) || defined(__clang__)
21
+ little = __ORDER_LITTLE_ENDIAN__,
22
+ big = __ORDER_BIG_ENDIAN__,
23
+ native = __BYTE_ORDER__,
24
+ #else
25
+ #error onnxruntime_float16::detail::endian is not implemented in this environment.
26
+ #endif
27
+ };
28
+
29
+ static_assert(
30
+ endian::native == endian::little || endian::native == endian::big,
31
+ "Only little-endian or big-endian native byte orders are supported.");
32
+
33
+ } // namespace detail
34
+
35
+ /// <summary>
36
+ /// Shared implementation between public and internal classes. CRTP pattern.
37
+ /// </summary>
38
+ template <class Derived>
39
+ struct Float16Impl {
40
+ protected:
41
+ /// <summary>
42
+ /// Converts from float to uint16_t float16 representation
43
+ /// </summary>
44
+ /// <param name="v"></param>
45
+ /// <returns></returns>
46
+ constexpr static uint16_t ToUint16Impl(float v) noexcept;
47
+
48
+ /// <summary>
49
+ /// Converts float16 to float
50
+ /// </summary>
51
+ /// <returns>float representation of float16 value</returns>
52
+ float ToFloatImpl() const noexcept;
53
+
54
+ /// <summary>
55
+ /// Creates an instance that represents absolute value.
56
+ /// </summary>
57
+ /// <returns>Absolute value</returns>
58
+ uint16_t AbsImpl() const noexcept {
59
+ return static_cast<uint16_t>(val & ~kSignMask);
60
+ }
61
+
62
+ /// <summary>
63
+ /// Creates a new instance with the sign flipped.
64
+ /// </summary>
65
+ /// <returns>Flipped sign instance</returns>
66
+ uint16_t NegateImpl() const noexcept {
67
+ return IsNaN() ? val : static_cast<uint16_t>(val ^ kSignMask);
68
+ }
69
+
70
+ public:
71
+ // uint16_t special values
72
+ static constexpr uint16_t kSignMask = 0x8000U;
73
+ static constexpr uint16_t kBiasedExponentMask = 0x7C00U;
74
+ static constexpr uint16_t kPositiveInfinityBits = 0x7C00U;
75
+ static constexpr uint16_t kNegativeInfinityBits = 0xFC00U;
76
+ static constexpr uint16_t kPositiveQNaNBits = 0x7E00U;
77
+ static constexpr uint16_t kNegativeQNaNBits = 0xFE00U;
78
+ static constexpr uint16_t kEpsilonBits = 0x4170U;
79
+ static constexpr uint16_t kMinValueBits = 0xFBFFU; // Minimum normal number
80
+ static constexpr uint16_t kMaxValueBits = 0x7BFFU; // Largest normal number
81
+ static constexpr uint16_t kOneBits = 0x3C00U;
82
+ static constexpr uint16_t kMinusOneBits = 0xBC00U;
83
+
84
+ uint16_t val{0};
85
+
86
+ Float16Impl() = default;
87
+
88
+ /// <summary>
89
+ /// Checks if the value is negative
90
+ /// </summary>
91
+ /// <returns>true if negative</returns>
92
+ bool IsNegative() const noexcept {
93
+ return static_cast<int16_t>(val) < 0;
94
+ }
95
+
96
+ /// <summary>
97
+ /// Tests if the value is NaN
98
+ /// </summary>
99
+ /// <returns>true if NaN</returns>
100
+ bool IsNaN() const noexcept {
101
+ return AbsImpl() > kPositiveInfinityBits;
102
+ }
103
+
104
+ /// <summary>
105
+ /// Tests if the value is finite
106
+ /// </summary>
107
+ /// <returns>true if finite</returns>
108
+ bool IsFinite() const noexcept {
109
+ return AbsImpl() < kPositiveInfinityBits;
110
+ }
111
+
112
+ /// <summary>
113
+ /// Tests if the value represents positive infinity.
114
+ /// </summary>
115
+ /// <returns>true if positive infinity</returns>
116
+ bool IsPositiveInfinity() const noexcept {
117
+ return val == kPositiveInfinityBits;
118
+ }
119
+
120
+ /// <summary>
121
+ /// Tests if the value represents negative infinity
122
+ /// </summary>
123
+ /// <returns>true if negative infinity</returns>
124
+ bool IsNegativeInfinity() const noexcept {
125
+ return val == kNegativeInfinityBits;
126
+ }
127
+
128
+ /// <summary>
129
+ /// Tests if the value is either positive or negative infinity.
130
+ /// </summary>
131
+ /// <returns>True if absolute value is infinity</returns>
132
+ bool IsInfinity() const noexcept {
133
+ return AbsImpl() == kPositiveInfinityBits;
134
+ }
135
+
136
+ /// <summary>
137
+ /// Tests if the value is NaN or zero. Useful for comparisons.
138
+ /// </summary>
139
+ /// <returns>True if NaN or zero.</returns>
140
+ bool IsNaNOrZero() const noexcept {
141
+ auto abs = AbsImpl();
142
+ return (abs == 0 || abs > kPositiveInfinityBits);
143
+ }
144
+
145
+ /// <summary>
146
+ /// Tests if the value is normal (not zero, subnormal, infinite, or NaN).
147
+ /// </summary>
148
+ /// <returns>True if so</returns>
149
+ bool IsNormal() const noexcept {
150
+ auto abs = AbsImpl();
151
+ return (abs < kPositiveInfinityBits) // is finite
152
+ && (abs != 0) // is not zero
153
+ && ((abs & kBiasedExponentMask) != 0); // is not subnormal (has a non-zero exponent)
154
+ }
155
+
156
+ /// <summary>
157
+ /// Tests if the value is subnormal (denormal).
158
+ /// </summary>
159
+ /// <returns>True if so</returns>
160
+ bool IsSubnormal() const noexcept {
161
+ auto abs = AbsImpl();
162
+ return (abs < kPositiveInfinityBits) // is finite
163
+ && (abs != 0) // is not zero
164
+ && ((abs & kBiasedExponentMask) == 0); // is subnormal (has a zero exponent)
165
+ }
166
+
167
+ /// <summary>
168
+ /// Creates an instance that represents absolute value.
169
+ /// </summary>
170
+ /// <returns>Absolute value</returns>
171
+ Derived Abs() const noexcept { return Derived::FromBits(AbsImpl()); }
172
+
173
+ /// <summary>
174
+ /// Creates a new instance with the sign flipped.
175
+ /// </summary>
176
+ /// <returns>Flipped sign instance</returns>
177
+ Derived Negate() const noexcept { return Derived::FromBits(NegateImpl()); }
178
+
179
+ /// <summary>
180
+ /// IEEE defines that positive and negative zero are equal, this gives us a quick equality check
181
+ /// for two values by or'ing the private bits together and stripping the sign. They are both zero,
182
+ /// and therefore equivalent, if the resulting value is still zero.
183
+ /// </summary>
184
+ /// <param name="lhs">first value</param>
185
+ /// <param name="rhs">second value</param>
186
+ /// <returns>True if both arguments represent zero</returns>
187
+ static bool AreZero(const Float16Impl& lhs, const Float16Impl& rhs) noexcept {
188
+ return static_cast<uint16_t>((lhs.val | rhs.val) & ~kSignMask) == 0;
189
+ }
190
+
191
+ bool operator==(const Float16Impl& rhs) const noexcept {
192
+ if (IsNaN() || rhs.IsNaN()) {
193
+ // IEEE defines that NaN is not equal to anything, including itself.
194
+ return false;
195
+ }
196
+ return val == rhs.val;
197
+ }
198
+
199
+ bool operator!=(const Float16Impl& rhs) const noexcept { return !(*this == rhs); }
200
+
201
+ bool operator<(const Float16Impl& rhs) const noexcept {
202
+ if (IsNaN() || rhs.IsNaN()) {
203
+ // IEEE defines that NaN is unordered with respect to everything, including itself.
204
+ return false;
205
+ }
206
+
207
+ const bool left_is_negative = IsNegative();
208
+ if (left_is_negative != rhs.IsNegative()) {
209
+ // When the signs of left and right differ, we know that left is less than right if it is
210
+ // the negative value. The exception to this is if both values are zero, in which case IEEE
211
+ // says they should be equal, even if the signs differ.
212
+ return left_is_negative && !AreZero(*this, rhs);
213
+ }
214
+ return (val != rhs.val) && ((val < rhs.val) ^ left_is_negative);
215
+ }
216
+ };
217
+
218
+ // The following Float16_t conversions are based on the code from
219
+ // Eigen library.
220
+
221
+ // The conversion routines are Copyright (c) Fabian Giesen, 2016.
222
+ // The original license follows:
223
+ //
224
+ // Copyright (c) Fabian Giesen, 2016
225
+ // All rights reserved.
226
+ // Redistribution and use in source and binary forms, with or without
227
+ // modification, are permitted.
228
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
229
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
230
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
231
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
232
+ // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
233
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
234
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
235
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
236
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
237
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
238
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
239
+
240
+ namespace detail {
241
+ union float32_bits {
242
+ unsigned int u;
243
+ float f;
244
+ };
245
+ } // namespace detail
246
+
247
+ template <class Derived>
248
+ inline constexpr uint16_t Float16Impl<Derived>::ToUint16Impl(float v) noexcept {
249
+ detail::float32_bits f{};
250
+ f.f = v;
251
+
252
+ constexpr detail::float32_bits f32infty = {255 << 23};
253
+ constexpr detail::float32_bits f16max = {(127 + 16) << 23};
254
+ constexpr detail::float32_bits denorm_magic = {((127 - 15) + (23 - 10) + 1) << 23};
255
+ constexpr unsigned int sign_mask = 0x80000000u;
256
+ uint16_t val = static_cast<uint16_t>(0x0u);
257
+
258
+ unsigned int sign = f.u & sign_mask;
259
+ f.u ^= sign;
260
+
261
+ // NOTE all the integer compares in this function can be safely
262
+ // compiled into signed compares since all operands are below
263
+ // 0x80000000. Important if you want fast straight SSE2 code
264
+ // (since there's no unsigned PCMPGTD).
265
+
266
+ if (f.u >= f16max.u) { // result is Inf or NaN (all exponent bits set)
267
+ val = (f.u > f32infty.u) ? 0x7e00 : 0x7c00; // NaN->qNaN and Inf->Inf
268
+ } else { // (De)normalized number or zero
269
+ if (f.u < (113 << 23)) { // resulting FP16 is subnormal or zero
270
+ // use a magic value to align our 10 mantissa bits at the bottom of
271
+ // the float. as long as FP addition is round-to-nearest-even this
272
+ // just works.
273
+ f.f += denorm_magic.f;
274
+
275
+ // and one integer subtract of the bias later, we have our final float!
276
+ val = static_cast<uint16_t>(f.u - denorm_magic.u);
277
+ } else {
278
+ unsigned int mant_odd = (f.u >> 13) & 1; // resulting mantissa is odd
279
+
280
+ // update exponent, rounding bias part 1
281
+ // Equivalent to `f.u += ((unsigned int)(15 - 127) << 23) + 0xfff`, but
282
+ // without arithmetic overflow.
283
+ f.u += 0xc8000fffU;
284
+ // rounding bias part 2
285
+ f.u += mant_odd;
286
+ // take the bits!
287
+ val = static_cast<uint16_t>(f.u >> 13);
288
+ }
289
+ }
290
+
291
+ val |= static_cast<uint16_t>(sign >> 16);
292
+ return val;
293
+ }
294
+
295
+ template <class Derived>
296
+ inline float Float16Impl<Derived>::ToFloatImpl() const noexcept {
297
+ constexpr detail::float32_bits magic = {113 << 23};
298
+ constexpr unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift
299
+ detail::float32_bits o{};
300
+
301
+ o.u = (val & 0x7fff) << 13; // exponent/mantissa bits
302
+ unsigned int exp = shifted_exp & o.u; // just the exponent
303
+ o.u += (127 - 15) << 23; // exponent adjust
304
+
305
+ // handle exponent special cases
306
+ if (exp == shifted_exp) { // Inf/NaN?
307
+ o.u += (128 - 16) << 23; // extra exp adjust
308
+ } else if (exp == 0) { // Zero/Denormal?
309
+ o.u += 1 << 23; // extra exp adjust
310
+ o.f -= magic.f; // re-normalize
311
+ }
312
+
313
+ // Attempt to workaround the Internal Compiler Error on ARM64
314
+ // for bitwise | operator, including std::bitset
315
+ #if (defined _MSC_VER) && (defined _M_ARM || defined _M_ARM64 || defined _M_ARM64EC)
316
+ if (IsNegative()) {
317
+ return -o.f;
318
+ }
319
+ #else
320
+ // original code:
321
+ o.u |= (val & 0x8000U) << 16U; // sign bit
322
+ #endif
323
+ return o.f;
324
+ }
325
+
326
+ /// Shared implementation between public and internal classes. CRTP pattern.
327
+ template <class Derived>
328
+ struct BFloat16Impl {
329
+ protected:
330
+ /// <summary>
331
+ /// Converts from float to uint16_t float16 representation
332
+ /// </summary>
333
+ /// <param name="v"></param>
334
+ /// <returns></returns>
335
+ static uint16_t ToUint16Impl(float v) noexcept;
336
+
337
+ /// <summary>
338
+ /// Converts bfloat16 to float
339
+ /// </summary>
340
+ /// <returns>float representation of bfloat16 value</returns>
341
+ float ToFloatImpl() const noexcept;
342
+
343
+ /// <summary>
344
+ /// Creates an instance that represents absolute value.
345
+ /// </summary>
346
+ /// <returns>Absolute value</returns>
347
+ uint16_t AbsImpl() const noexcept {
348
+ return static_cast<uint16_t>(val & ~kSignMask);
349
+ }
350
+
351
+ /// <summary>
352
+ /// Creates a new instance with the sign flipped.
353
+ /// </summary>
354
+ /// <returns>Flipped sign instance</returns>
355
+ uint16_t NegateImpl() const noexcept {
356
+ return IsNaN() ? val : static_cast<uint16_t>(val ^ kSignMask);
357
+ }
358
+
359
+ public:
360
+ // uint16_t special values
361
+ static constexpr uint16_t kSignMask = 0x8000U;
362
+ static constexpr uint16_t kBiasedExponentMask = 0x7F80U;
363
+ static constexpr uint16_t kPositiveInfinityBits = 0x7F80U;
364
+ static constexpr uint16_t kNegativeInfinityBits = 0xFF80U;
365
+ static constexpr uint16_t kPositiveQNaNBits = 0x7FC1U;
366
+ static constexpr uint16_t kNegativeQNaNBits = 0xFFC1U;
367
+ static constexpr uint16_t kSignaling_NaNBits = 0x7F80U;
368
+ static constexpr uint16_t kEpsilonBits = 0x0080U;
369
+ static constexpr uint16_t kMinValueBits = 0xFF7FU;
370
+ static constexpr uint16_t kMaxValueBits = 0x7F7FU;
371
+ static constexpr uint16_t kRoundToNearest = 0x7FFFU;
372
+ static constexpr uint16_t kOneBits = 0x3F80U;
373
+ static constexpr uint16_t kMinusOneBits = 0xBF80U;
374
+
375
+ uint16_t val{0};
376
+
377
+ BFloat16Impl() = default;
378
+
379
+ /// <summary>
380
+ /// Checks if the value is negative
381
+ /// </summary>
382
+ /// <returns>true if negative</returns>
383
+ bool IsNegative() const noexcept {
384
+ return static_cast<int16_t>(val) < 0;
385
+ }
386
+
387
+ /// <summary>
388
+ /// Tests if the value is NaN
389
+ /// </summary>
390
+ /// <returns>true if NaN</returns>
391
+ bool IsNaN() const noexcept {
392
+ return AbsImpl() > kPositiveInfinityBits;
393
+ }
394
+
395
+ /// <summary>
396
+ /// Tests if the value is finite
397
+ /// </summary>
398
+ /// <returns>true if finite</returns>
399
+ bool IsFinite() const noexcept {
400
+ return AbsImpl() < kPositiveInfinityBits;
401
+ }
402
+
403
+ /// <summary>
404
+ /// Tests if the value represents positive infinity.
405
+ /// </summary>
406
+ /// <returns>true if positive infinity</returns>
407
+ bool IsPositiveInfinity() const noexcept {
408
+ return val == kPositiveInfinityBits;
409
+ }
410
+
411
+ /// <summary>
412
+ /// Tests if the value represents negative infinity
413
+ /// </summary>
414
+ /// <returns>true if negative infinity</returns>
415
+ bool IsNegativeInfinity() const noexcept {
416
+ return val == kNegativeInfinityBits;
417
+ }
418
+
419
+ /// <summary>
420
+ /// Tests if the value is either positive or negative infinity.
421
+ /// </summary>
422
+ /// <returns>True if absolute value is infinity</returns>
423
+ bool IsInfinity() const noexcept {
424
+ return AbsImpl() == kPositiveInfinityBits;
425
+ }
426
+
427
+ /// <summary>
428
+ /// Tests if the value is NaN or zero. Useful for comparisons.
429
+ /// </summary>
430
+ /// <returns>True if NaN or zero.</returns>
431
+ bool IsNaNOrZero() const noexcept {
432
+ auto abs = AbsImpl();
433
+ return (abs == 0 || abs > kPositiveInfinityBits);
434
+ }
435
+
436
+ /// <summary>
437
+ /// Tests if the value is normal (not zero, subnormal, infinite, or NaN).
438
+ /// </summary>
439
+ /// <returns>True if so</returns>
440
+ bool IsNormal() const noexcept {
441
+ auto abs = AbsImpl();
442
+ return (abs < kPositiveInfinityBits) // is finite
443
+ && (abs != 0) // is not zero
444
+ && ((abs & kBiasedExponentMask) != 0); // is not subnormal (has a non-zero exponent)
445
+ }
446
+
447
+ /// <summary>
448
+ /// Tests if the value is subnormal (denormal).
449
+ /// </summary>
450
+ /// <returns>True if so</returns>
451
+ bool IsSubnormal() const noexcept {
452
+ auto abs = AbsImpl();
453
+ return (abs < kPositiveInfinityBits) // is finite
454
+ && (abs != 0) // is not zero
455
+ && ((abs & kBiasedExponentMask) == 0); // is subnormal (has a zero exponent)
456
+ }
457
+
458
+ /// <summary>
459
+ /// Creates an instance that represents absolute value.
460
+ /// </summary>
461
+ /// <returns>Absolute value</returns>
462
+ Derived Abs() const noexcept { return Derived::FromBits(AbsImpl()); }
463
+
464
+ /// <summary>
465
+ /// Creates a new instance with the sign flipped.
466
+ /// </summary>
467
+ /// <returns>Flipped sign instance</returns>
468
+ Derived Negate() const noexcept { return Derived::FromBits(NegateImpl()); }
469
+
470
+ /// <summary>
471
+ /// IEEE defines that positive and negative zero are equal, this gives us a quick equality check
472
+ /// for two values by or'ing the private bits together and stripping the sign. They are both zero,
473
+ /// and therefore equivalent, if the resulting value is still zero.
474
+ /// </summary>
475
+ /// <param name="lhs">first value</param>
476
+ /// <param name="rhs">second value</param>
477
+ /// <returns>True if both arguments represent zero</returns>
478
+ static bool AreZero(const BFloat16Impl& lhs, const BFloat16Impl& rhs) noexcept {
479
+ // IEEE defines that positive and negative zero are equal, this gives us a quick equality check
480
+ // for two values by or'ing the private bits together and stripping the sign. They are both zero,
481
+ // and therefore equivalent, if the resulting value is still zero.
482
+ return static_cast<uint16_t>((lhs.val | rhs.val) & ~kSignMask) == 0;
483
+ }
484
+ };
485
+
486
+ template <class Derived>
487
+ inline uint16_t BFloat16Impl<Derived>::ToUint16Impl(float v) noexcept {
488
+ uint16_t result;
489
+ if (std::isnan(v)) {
490
+ result = kPositiveQNaNBits;
491
+ } else {
492
+ auto get_msb_half = [](float fl) {
493
+ uint16_t result;
494
+ #ifdef __cpp_if_constexpr
495
+ if constexpr (detail::endian::native == detail::endian::little) {
496
+ #else
497
+ if (detail::endian::native == detail::endian::little) {
498
+ #endif
499
+ std::memcpy(&result, reinterpret_cast<char*>(&fl) + sizeof(uint16_t), sizeof(uint16_t));
500
+ } else {
501
+ std::memcpy(&result, &fl, sizeof(uint16_t));
502
+ }
503
+ return result;
504
+ };
505
+
506
+ uint16_t upper_bits = get_msb_half(v);
507
+ union {
508
+ uint32_t U32;
509
+ float F32;
510
+ };
511
+ F32 = v;
512
+ U32 += (upper_bits & 1) + kRoundToNearest;
513
+ result = get_msb_half(F32);
514
+ }
515
+ return result;
516
+ }
517
+
518
+ template <class Derived>
519
+ inline float BFloat16Impl<Derived>::ToFloatImpl() const noexcept {
520
+ if (IsNaN()) {
521
+ return std::numeric_limits<float>::quiet_NaN();
522
+ }
523
+ float result;
524
+ char* const first = reinterpret_cast<char*>(&result);
525
+ char* const second = first + sizeof(uint16_t);
526
+ #ifdef __cpp_if_constexpr
527
+ if constexpr (detail::endian::native == detail::endian::little) {
528
+ #else
529
+ if (detail::endian::native == detail::endian::little) {
530
+ #endif
531
+ std::memset(first, 0, sizeof(uint16_t));
532
+ std::memcpy(second, &val, sizeof(uint16_t));
533
+ } else {
534
+ std::memcpy(first, &val, sizeof(uint16_t));
535
+ std::memset(second, 0, sizeof(uint16_t));
536
+ }
537
+ return result;
538
+ }
539
+
540
+ } // namespace onnxruntime_float16
1.18.1/onnxruntime.xcframework/Headers/onnxruntime_run_options_config_keys.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ #pragma once
5
+
6
+ /*
7
+ * This file defines RunOptions Config Keys and format of the Config Values.
8
+ *
9
+ * The Naming Convention for a RunOptions Config Key,
10
+ * "[Area][.[SubArea1].[SubArea2]...].[Keyname]"
11
+ * Such as "ep.cuda.use_arena"
12
+ * The Config Key cannot be empty
13
+ * The maximum length of the Config Key is 128
14
+ *
15
+ * The string format of a RunOptions Config Value is defined individually for each Config.
16
+ * The maximum length of the Config Value is 1024
17
+ */
18
+
19
+ // Key for enabling shrinkages of user listed device memory arenas.
20
+ // Expects a list of semi-colon separated key value pairs separated by colon in the following format:
21
+ // "device_0:device_id_0;device_1:device_id_1"
22
+ // No white-spaces allowed in the provided list string.
23
+ // Currently, the only supported devices are : "cpu", "gpu" (case sensitive).
24
+ // If "cpu" is included in the list, DisableCpuMemArena() API must not be called (i.e.) arena for cpu should be enabled.
25
+ // Example usage: "cpu:0;gpu:0" (or) "gpu:0"
26
+ // By default, the value for this key is empty (i.e.) no memory arenas are shrunk
27
+ static const char* const kOrtRunOptionsConfigEnableMemoryArenaShrinkage = "memory.enable_memory_arena_shrinkage";
28
+
29
+ // Set to '1' to not synchronize execution providers with CPU at the end of session run.
30
+ // Per default it will be set to '0'
31
+ // Taking CUDA EP as an example, it omit triggering cudaStreamSynchronize on the compute stream.
32
+ static const char* const kOrtRunOptionsConfigDisableSynchronizeExecutionProviders = "disable_synchronize_execution_providers";
33
+
34
+ // Set HTP performance mode for QNN HTP backend before session run.
35
+ // options for HTP performance mode: "burst", "balanced", "default", "high_performance",
36
+ // "high_power_saver", "low_balanced", "extreme_power_saver", "low_power_saver", "power_saver",
37
+ // "sustained_high_performance". Default to "default".
38
+ static const char* const kOrtRunOptionsConfigQnnPerfMode = "qnn.htp_perf_mode";
39
+
40
+ // Set HTP performance mode for QNN HTP backend post session run.
41
+ static const char* const kOrtRunOptionsConfigQnnPerfModePostRun = "qnn.htp_perf_mode_post_run";
42
+
43
+ // Set RPC control latency for QNN HTP backend
44
+ static const char* const kOrtRunOptionsConfigQnnRpcControlLatency = "qnn.rpc_control_latency";
45
+
46
+ // Set graph annotation id for CUDA EP. Use with enable_cuda_graph=true.
47
+ // The value should be an integer. If the value is not set, the default value is 0 and
48
+ // ORT session only captures one cuda graph before another capture is requested.
49
+ // If the value is set to -1, cuda graph capture/replay is disabled in that run.
50
+ // User are not expected to set the value to 0 as it is reserved for internal use.
51
+ static const char* const kOrtRunOptionsConfigCudaGraphAnnotation = "gpu_graph_id";
1.18.1/onnxruntime.xcframework/Headers/onnxruntime_session_options_config_keys.h ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation. All rights reserved.
2
+ // Licensed under the MIT License.
3
+
4
+ #pragma once
5
+
6
+ /*
7
+ * This file defines SessionOptions Config Keys and format of the Config Values.
8
+ *
9
+ * The Naming Convention for a SessionOptions Config Key,
10
+ * "[Area][.[SubArea1].[SubArea2]...].[Keyname]"
11
+ * Such as "ep.cuda.use_arena"
12
+ * The Config Key cannot be empty
13
+ * The maximum length of the Config Key is 128
14
+ *
15
+ * The string format of a SessionOptions Config Value is defined individually for each Config.
16
+ * The maximum length of the Config Value is 1024
17
+ */
18
+
19
+ // Key for disable PrePacking,
20
+ // If the config value is set to "1" then the prepacking is disabled, otherwise prepacking is enabled (default value)
21
+ static const char* const kOrtSessionOptionsConfigDisablePrepacking = "session.disable_prepacking";
22
+
23
+ // A value of "1" means allocators registered in the env will be used. "0" means the allocators created in the session
24
+ // will be used. Use this to override the usage of env allocators on a per session level.
25
+ static const char* const kOrtSessionOptionsConfigUseEnvAllocators = "session.use_env_allocators";
26
+
27
+ // Set to 'ORT' (case sensitive) to load an ORT format model.
28
+ // If unset, model type will default to ONNX unless inferred from filename ('.ort' == ORT format) or bytes to be ORT
29
+ static const char* const kOrtSessionOptionsConfigLoadModelFormat = "session.load_model_format";
30
+
31
+ // Set to 'ORT' (case sensitive) to save optimized model in ORT format when SessionOptions.optimized_model_path is set.
32
+ // If unset, format will default to ONNX unless optimized_model_filepath ends in '.ort'.
33
+ static const char* const kOrtSessionOptionsConfigSaveModelFormat = "session.save_model_format";
34
+
35
+ // If a value is "1", flush-to-zero and denormal-as-zero are applied. The default is "0".
36
+ // When multiple sessions are created, a main thread doesn't override changes from succeeding session options,
37
+ // but threads in session thread pools follow option changes.
38
+ // When ORT runs with OpenMP, the same rule is applied, i.e. the first session option to flush-to-zero and
39
+ // denormal-as-zero is only applied to global OpenMP thread pool, which doesn't support per-session thread pool.
40
+ // Note that an alternative way not using this option at runtime is to train and export a model without denormals
41
+ // and that's recommended because turning this option on may hurt model accuracy.
42
+ static const char* const kOrtSessionOptionsConfigSetDenormalAsZero = "session.set_denormal_as_zero";
43
+
44
+ // It controls to run quantization model in QDQ (QuantizelinearDeQuantizelinear) format or not.
45
+ // "0": enable. ORT does fusion logic for QDQ format.
46
+ // "1": disable. ORT doesn't do fusion logic for QDQ format.
47
+ // Its default value is "0" unless the DirectML execution provider is registered, in which case it defaults to "1".
48
+ static const char* const kOrtSessionOptionsDisableQuantQDQ = "session.disable_quant_qdq";
49
+
50
+ // It controls whether to enable Double QDQ remover and Identical Children Consolidation
51
+ // "0": not to disable. ORT does remove the middle 2 Nodes from a Q->(QD->Q)->QD pairs
52
+ // "1": disable. ORT doesn't remove the middle 2 Nodes from a Q->(QD->Q)->QD pairs
53
+ // Its default value is "0"
54
+ static const char* const kOrtSessionOptionsDisableDoubleQDQRemover = "session.disable_double_qdq_remover";
55
+
56
+ // If set to "1", enables the removal of QuantizeLinear/DequantizeLinear node pairs once all QDQ handling has been
57
+ // completed. e.g. If after all QDQ handling has completed and we have -> FloatOp -> Q -> DQ -> FloatOp -> the
58
+ // Q -> DQ could potentially be removed. This will provide a performance benefit by avoiding going from float to
59
+ // 8-bit and back to float, but could impact accuracy. The impact on accuracy will be model specific and depend on
60
+ // other factors like whether the model was created using Quantization Aware Training or Post Training Quantization.
61
+ // As such, it's best to test to determine if enabling this works well for your scenario.
62
+ // The default value is "0"
63
+ // Available since version 1.11.
64
+ static const char* const kOrtSessionOptionsEnableQuantQDQCleanup = "session.enable_quant_qdq_cleanup";
65
+
66
+ // Enable or disable gelu approximation in graph optimization. "0": disable; "1": enable. The default is "0".
67
+ // GeluApproximation has side effects which may change the inference results. It is disabled by default due to this.
68
+ static const char* const kOrtSessionOptionsEnableGeluApproximation = "optimization.enable_gelu_approximation";
69
+
70
+ // This setting controls whether to enable AheadOfTime function inlining.
71
+ // AOT function inlining examines the graph and attempts to inline as many locally defined functions in the model
72
+ // as possible with the help of enabled execution providers.
73
+ // This can reduce the number of function calls and improve performance because it is done before
74
+ // Level1 optimizers and constant folding. However, under some circumstances, when the EPs are not available,
75
+ // one can disable the AOT inlining, produce an optimized model and postpone AOT until run time.
76
+ // "0": enable; "1": disable.
77
+ // Its default value is "0".
78
+ static const char* const kOrtSessionOptionsDisableAheadOfTimeFunctionInlining = "session.disable_aot_function_inlining";
79
+
80
+ #ifdef ENABLE_TRAINING
81
+ // Specifies a list of op types for memory footprint reduction.
82
+ // The value should be a ","-delimited list of pair of
83
+ // <subgraph string: optimization strategy: number of subgraph to apply>.
84
+ // For example, "Gelu+Cast+:1:0,Dropout+:1:1".
85
+ // A valid "subgraph string" should be one subgraph representation output by ORT graph transformations.
86
+ // "optimization strategy" currently has valid values: 0 - disabled, 1 - recompute.
87
+ // "number of subgraph to apply" is used to control how many subgraphs to apply optimization, to avoid "oversaving"
88
+ // the memory.
89
+ static const char* const kOrtSessionOptionsMemoryOptimizerEnabler = "optimization.memory_optimizer_config";
90
+
91
+ // Specifies the config for detecting subgraphs for memory footprint reduction.
92
+ // The value should be a string contains int separated using commas. The default value is "0:0".
93
+ static const char* const kOrtSessionOptionsMemoryOptimizerProbeConfig = "optimization.enable_memory_probe_recompute_config";
94
+ #endif
95
+
96
+ // This setting if set should contain a comma separated list of optimizers names that should be disabled.
97
+ // Optimizers may take time to execute and affect model loading time. If you feel that a specific optimizer
98
+ // does not provider runtime benefits, but affects your model loading time you may disable it using this config
99
+ // entry. This option is not enabled in ORT_MINIMAL_BUILD build.
100
+ // A list of optimizes is available in onnxruntime/core/optimizer/graph_transformer_utils.cc
101
+ //
102
+ // Default is an empty string which means no optimizers are disabled.
103
+ static const char* const kOrtSessionOptionsDisableSpecifiedOptimizers = "optimization.disable_specified_optimizers";
104
+
105
+ // Enable or disable using device allocator for allocating initialized tensor memory. "1": enable; "0": disable. The default is "0".
106
+ // Using device allocators means the memory allocation is made using malloc/new.
107
+ static const char* const kOrtSessionOptionsUseDeviceAllocatorForInitializers = "session.use_device_allocator_for_initializers";
108
+
109
+ // Configure whether to allow the inter_op/intra_op threads spinning a number of times before blocking
110
+ // "0": thread will block if found no job to run
111
+ // "1": default, thread will spin a number of times before blocking
112
+ static const char* const kOrtSessionOptionsConfigAllowInterOpSpinning = "session.inter_op.allow_spinning";
113
+ static const char* const kOrtSessionOptionsConfigAllowIntraOpSpinning = "session.intra_op.allow_spinning";
114
+
115
+ // Key for using model bytes directly for ORT format
116
+ // If a session is created using an input byte array contains the ORT format model data,
117
+ // By default we will copy the model bytes at the time of session creation to ensure the model bytes
118
+ // buffer is valid.
119
+ // Setting this option to "1" will disable copy the model bytes, and use the model bytes directly. The caller
120
+ // has to guarantee that the model bytes are valid until the ORT session using the model bytes is destroyed.
121
+ static const char* const kOrtSessionOptionsConfigUseORTModelBytesDirectly = "session.use_ort_model_bytes_directly";
122
+
123
+ /// <summary>
124
+ /// Key for using the ORT format model flatbuffer bytes directly for initializers.
125
+ /// This avoids copying the bytes and reduces peak memory usage during model loading and initialization.
126
+ /// Requires `session.use_ort_model_bytes_directly` to be true.
127
+ /// If set, the flatbuffer bytes provided when creating the InferenceSession MUST remain valid for the entire
128
+ /// duration of the InferenceSession.
129
+ /// </summary>
130
+ static const char* const kOrtSessionOptionsConfigUseORTModelBytesForInitializers =
131
+ "session.use_ort_model_bytes_for_initializers";
132
+
133
+ // This should only be specified when exporting an ORT format model for use on a different platform.
134
+ // If the ORT format model will be used on ARM platforms set to "1". For other platforms set to "0"
135
+ // Available since version 1.11.
136
+ static const char* const kOrtSessionOptionsQDQIsInt8Allowed = "session.qdqisint8allowed";
137
+
138
+ // x64 SSE4.1/AVX2/AVX512(with no VNNI) has overflow problem with quantizied matrix multiplication with U8S8.
139
+ // To avoid this we need to use slower U8U8 matrix multiplication instead. This option, if
140
+ // turned on, use slower U8U8 matrix multiplications. Only effective with AVX2 or AVX512
141
+ // platforms.
142
+ static const char* const kOrtSessionOptionsAvx2PrecisionMode = "session.x64quantprecision";
143
+
144
+ // Specifies how minimal build graph optimizations are handled in a full build.
145
+ // These optimizations are at the extended level or higher.
146
+ // Possible values and their effects are:
147
+ // "save": Save runtime optimizations when saving an ORT format model.
148
+ // "apply": Only apply optimizations available in a minimal build.
149
+ // ""/<unspecified>: Apply optimizations available in a full build.
150
+ // Available since version 1.11.
151
+ static const char* const kOrtSessionOptionsConfigMinimalBuildOptimizations =
152
+ "optimization.minimal_build_optimizations";
153
+
154
+ // Note: The options specific to an EP should be specified prior to appending that EP to the session options object in
155
+ // order for them to take effect.
156
+
157
+ // Specifies a list of stop op types. Nodes of a type in the stop op types and nodes downstream from them will not be
158
+ // run by the NNAPI EP.
159
+ // The value should be a ","-delimited list of op types. For example, "Add,Sub".
160
+ // If not specified, the default set of stop ops is used. To specify an empty stop ops types list and disable stop op
161
+ // exclusion, set the value to "".
162
+ static const char* const kOrtSessionOptionsConfigNnapiEpPartitioningStopOps = "ep.nnapi.partitioning_stop_ops";
163
+
164
+ // Enabling dynamic block-sizing for multithreading.
165
+ // With a positive value, thread pool will split a task of N iterations to blocks of size starting from:
166
+ // N / (num_of_threads * dynamic_block_base)
167
+ // As execution progresses, the size will decrease according to the diminishing residual of N,
168
+ // meaning the task will be distributed in smaller granularity for better parallelism.
169
+ // For some models, it helps to reduce the variance of E2E inference latency and boost performance.
170
+ // The feature will not function by default, specify any positive integer, e.g. "4", to enable it.
171
+ // Available since version 1.11.
172
+ static const char* const kOrtSessionOptionsConfigDynamicBlockBase = "session.dynamic_block_base";
173
+
174
+ // This option allows to decrease CPU usage between infrequent
175
+ // requests and forces any TP threads spinning stop immediately when the last of
176
+ // concurrent Run() call returns.
177
+ // Spinning is restarted on the next Run() call.
178
+ // Applies only to internal thread-pools
179
+ static const char* const kOrtSessionOptionsConfigForceSpinningStop = "session.force_spinning_stop";
180
+
181
+ // "1": all inconsistencies encountered during shape and type inference
182
+ // will result in failures.
183
+ // "0": in some cases warnings will be logged but processing will continue. The default.
184
+ // May be useful to expose bugs in models.
185
+ static const char* const kOrtSessionOptionsConfigStrictShapeTypeInference = "session.strict_shape_type_inference";
186
+
187
+ // "1": every model using a more recent opset than the latest released one will fail
188
+ // "0": the model may or may not work if onnxruntime cannot find an implementation, this option
189
+ // is used for development purpose.
190
+ static const char* const kOrtSessionOptionsConfigStrictAllowReleasedOpsetsOnly = "session.allow_released_opsets_only";
191
+
192
+ // The file saves configuration for partitioning node among logic streams
193
+ static const char* const kNodePartitionConfigFile = "session.node_partition_config_file";
194
+
195
+ // This Option allows setting affinities for intra op threads.
196
+ // Affinity string follows format:
197
+ // logical_processor_id,logical_processor_id;logical_processor_id,logical_processor_id
198
+ // Semicolon isolates configurations among threads, while comma split processors where ith thread expected to attach to.
199
+ // e.g.1,2,3;4,5
200
+ // specifies affinities for two threads, with the 1st thread attach to the 1st, 2nd, and 3rd processor, and 2nd thread to the 4th and 5th.
201
+ // To ease the configuration, an "interval" is also allowed:
202
+ // e.g. 1-8;8-16;17-24
203
+ // orders that the 1st thread runs on first eight processors, 2nd thread runs on next eight processors, and so forth.
204
+ // Note:
205
+ // 1. Once set, the number of thread affinities must equal to intra_op_num_threads - 1, since ort does not set affinity on the main thread which
206
+ // is started and managed by the calling app;
207
+ // 2. For windows, ort will infer the group id from a logical processor id, for example, assuming there are two groups with each has 64 logical processors,
208
+ // an id of 64 will be inferred as the last processor of the 1st group, while 65 will be interpreted as the 1st processor of the second group.
209
+ // Hence 64-65 is an invalid configuration, because a windows thread cannot be attached to processors across group boundary.
210
+ static const char* const kOrtSessionOptionsConfigIntraOpThreadAffinities = "session.intra_op_thread_affinities";
211
+
212
+ // This option will dump out the model to assist debugging any issues with layout transformation,
213
+ // and is primarily intended for developer usage. It is only relevant if an execution provider that requests
214
+ // NHWC layout is enabled such as NNAPI, XNNPACK or QNN.
215
+ //
216
+ // Default is off. Set to "1" to enable.
217
+ //
218
+ // If modified by layout transformation the model will be dumped after these steps:
219
+ // 1) insertion of the layout transformation Transpose nodes
220
+ // 2) after those are optimized using the transpose optimizer,
221
+ // 3) after the L1 transformers are applied to the updated graph.
222
+ // The model will be saved to filename post_layout_transform_step_<step_number>.onnx.
223
+ static const char* const kDebugLayoutTransformation = "session.debug_layout_transformation";
224
+
225
+ // Graph nodes that are not supported by the execution providers (EPs) explicitly added to the session are
226
+ // assigned (i.e., "fallback") to the CPU EP by default.
227
+ //
228
+ // This option allows the user to disable the fallback of unsupported graph nodes to the CPU EP.
229
+ // If this option is set to "1", session creation will fail if the execution providers other than the CPU EP cannot
230
+ // fully support all of the nodes in the graph.
231
+ //
232
+ // It is invalid to set this option and explicitly add the CPU EP to the session. In this case, session creation
233
+ // will also fail with an error.
234
+ //
235
+ // Option values:
236
+ // - "0": CPU EP fallback is not disabled. [DEFAULT]
237
+ // - "1": CPU EP fallback is disabled.
238
+ static const char* const kOrtSessionOptionsDisableCPUEPFallback = "session.disable_cpu_ep_fallback";
239
+
240
+ // Use this config when serializing a large model after optimization to specify an external initializers file
241
+ static const char* const kOrtSessionOptionsOptimizedModelExternalInitializersFileName =
242
+ "session.optimized_model_external_initializers_file_name";
243
+
244
+ // Use this config to control the minimum size of the initializer when externalizing it during serialization
245
+ static const char* const kOrtSessionOptionsOptimizedModelExternalInitializersMinSizeInBytes =
246
+ "session.optimized_model_external_initializers_min_size_in_bytes";
247
+
248
+ // Enable EP context feature to dump the partitioned graph which includes the EP context into Onnx file.
249
+ // The dumped Onnx model with EP context can be used for future inference to avoid the EP graph partitioning/compile overhead.
250
+ // "0": disable. (default)
251
+ // "1": enable.
252
+ static const char* const kOrtSessionOptionEpContextEnable = "ep.context_enable";
253
+
254
+ // Specify the file path for the Onnx model which has EP context.
255
+ // Default to original_file_name_ctx.onnx if not specified
256
+ static const char* const kOrtSessionOptionEpContextFilePath = "ep.context_file_path";
257
+
258
+ // Flag to specify whether to dump the EP context into the Onnx model.
259
+ // "0": dump the EP context into separate file, keep the file name in the Onnx model.
260
+ // "1": dump the EP context into the Onnx model. (default).
261
+ static const char* const kOrtSessionOptionEpContextEmbedMode = "ep.context_embed_mode";
262
+
263
+ // Gemm fastmath mode provides fp32 gemm acceleration with bfloat16 based matmul.
264
+ // Option values:
265
+ // - "0": Gemm FastMath mode is not enabled. [DEFAULT]
266
+ // - "1": Gemm FastMath mode is enabled.
267
+ static const char* const kOrtSessionOptionsMlasGemmFastMathArm64Bfloat16 = "mlas.enable_gemm_fastmath_arm64_bfloat16";
1.18.1/onnxruntime.xcframework/Info.plist ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
3
+ <plist version="1.0">
4
+ <dict>
5
+ <key>AvailableLibraries</key>
6
+ <array>
7
+ <dict>
8
+ <key>BinaryPath</key>
9
+ <string>onnxruntime.a</string>
10
+ <key>LibraryIdentifier</key>
11
+ <string>ios-arm64</string>
12
+ <key>LibraryPath</key>
13
+ <string>onnxruntime.a</string>
14
+ <key>SupportedArchitectures</key>
15
+ <array>
16
+ <string>arm64</string>
17
+ </array>
18
+ <key>SupportedPlatform</key>
19
+ <string>ios</string>
20
+ </dict>
21
+ <dict>
22
+ <key>BinaryPath</key>
23
+ <string>onnxruntime.a</string>
24
+ <key>LibraryIdentifier</key>
25
+ <string>ios-arm64_x86_64-simulator</string>
26
+ <key>LibraryPath</key>
27
+ <string>onnxruntime.a</string>
28
+ <key>SupportedArchitectures</key>
29
+ <array>
30
+ <string>arm64</string>
31
+ <string>x86_64</string>
32
+ </array>
33
+ <key>SupportedPlatform</key>
34
+ <string>ios</string>
35
+ <key>SupportedPlatformVariant</key>
36
+ <string>simulator</string>
37
+ </dict>
38
+ <dict>
39
+ <key>BinaryPath</key>
40
+ <string>onnxruntime.a</string>
41
+ <key>LibraryIdentifier</key>
42
+ <string>macos-arm64_x86_64</string>
43
+ <key>LibraryPath</key>
44
+ <string>onnxruntime.a</string>
45
+ <key>SupportedArchitectures</key>
46
+ <array>
47
+ <string>arm64</string>
48
+ <string>x86_64</string>
49
+ </array>
50
+ <key>SupportedPlatform</key>
51
+ <string>macos</string>
52
+ </dict>
53
+ </array>
54
+ <key>CFBundlePackageType</key>
55
+ <string>XFWK</string>
56
+ <key>XCFrameworkFormatVersion</key>
57
+ <string>1.0</string>
58
+ </dict>
59
+ </plist>
1.18.1/onnxruntime.xcframework/ios-arm64/libonnxruntime.a ADDED
@@ -0,0 +1 @@
 
 
1
+ onnxruntime.a
1.18.1/onnxruntime.xcframework/ios-arm64/onnxruntime.a ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7089c7e11a532e6b08cc641360c044899c44f01c9676eacda70fbc58ceac62d
3
+ size 67913296
1.18.1/onnxruntime.xcframework/ios-arm64_x86_64-simulator/libonnxruntime.a ADDED
@@ -0,0 +1 @@
 
 
1
+ onnxruntime.a
1.18.1/onnxruntime.xcframework/ios-arm64_x86_64-simulator/onnxruntime.a ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5969297c14351a2c6ab100db017ccf5d4e1a995114e76124d7efa6bd3cb4b9ea
3
+ size 138589256
1.18.1/onnxruntime.xcframework/macos-arm64_x86_64/libonnxruntime.a ADDED
@@ -0,0 +1 @@
 
 
1
+ onnxruntime.a
1.18.1/onnxruntime.xcframework/macos-arm64_x86_64/onnxruntime.a ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9095deb318534052176e64817ef3036b84d3fc8f122ebd4152280952fe345110
3
+ size 132954488