file_path
stringlengths
20
207
content
stringlengths
5
3.85M
size
int64
5
3.85M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.26
0.93
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/layers_pad_dqnn.h
/** ****************************************************************************** * @file layers_pad_dqnn.h * @author AIS * @brief header file of AI platform DQNN padding datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_PADDING_DQNN_H #define LAYERS_PADDING_DQNN_H #pragma once #include "layers_common.h" #include "layers_generic.h" /*! * @defgroup layers_generic_dqnn Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles padding with binary input and binary output * @ingroup layers_generic_dqnn * @param layer pad layer */ AI_INTERNAL_API void forward_pad_is1os1(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_PADDING_DQNN_H*/
1,499
C
26.777777
80
0.451634
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/lite_upsample.h
/** ****************************************************************************** * @file lite_upsample.h * @author AIS * @brief header file of AI platform lite pw kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_UPSAMPLE_H #define LITE_UPSAMPLE_H #pragma once #include "ai_lite_interface.h" void forward_lite_upsample_bilinear_if32of32(const ai_float* in_data, ai_float* out_data, const ai_size width_in, const ai_size height_in, const ai_float width_scale, const ai_float height_scale, const ai_size width_out, const ai_size height_out, const ai_bool center, const ai_size n_channel); void forward_lite_upsample_bilinear_is8os8(const ai_i8* in_data, ai_i8* out_data, const ai_size width_in, const ai_size height_in, const ai_float width_scale, const ai_float height_scale, const ai_size width_out, const ai_size height_out, const ai_bool center, const ai_size n_channel); void forward_lite_upsample_bilinear_iu8ou8(const ai_u8* in_data, ai_u8* out_data, const ai_size width_in, const ai_size height_in, const ai_float width_scale, const ai_float height_scale, const ai_size width_out, const ai_size height_out, const ai_bool center, const ai_size n_channel); void forward_lite_upsample_bilinear_is16os16(const ai_i16* in_data, ai_i16* out_data, const ai_size width_in, const ai_size height_in, const ai_float width_scale, const ai_float height_scale, const ai_size width_out, const ai_size height_out, const ai_bool center, const ai_size n_channel); void forward_lite_upsample_bilinear_iu16ou16(const ai_u16* in_data, ai_u16* out_data, const ai_size width_in, const ai_size height_in, const ai_float width_scale, const ai_float height_scale, const ai_size width_out, const ai_size height_out, const ai_bool center, const ai_size n_channel); #endif /*LITE_UPSAMPLE__H*/
3,970
C
48.024691
80
0.380605
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/layers_generic.h
/** ****************************************************************************** * @file layers_generic.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform generic layers datatypes ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_GENERIC_H #define LAYERS_GENERIC_H #pragma once #include "layers_common.h" typedef enum { KTfLiteNone = 0, KTfLiteActRelu, KTfLiteActRelu1, KTfLiteActRelu6, KTfLiteActTanh, KTfLiteActSignBit, KTfLiteActSigmoid } ai_tflitefused_activation; /*! * @defgroup layers_generic Generic Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_time_delay * @ingroup layers_generic * @brief TimeDelay layer with sparse kernel */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_time_delay_ { AI_LAYER_COMMON_FIELDS_DECLARE AI_CONST ai_array* mask; /*!< sparse filter mask */ } ai_layer_time_delay; /*! * @struct ai_layer_split * @ingroup layers_generic * @brief Split layer definition * * This layer defines the params of a splitting layer. It is intended to be used * by his associated forward function @ref forward_split */ //typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_split_ { // AI_LAYER_COMMON_FIELDS_DECLARE // ai_u16 out_layers_count; /*!< number of output layers to split*/ // ai_u16 out_layer_curr; /*!< current layer to split */ // ai_layer** out_layers; /*!< output layers list */ // ai_tensor** out_tensors; /*!< output tensors list */ // ai_tensor* in_tensor; /*!< input tensor */ // func_copy_tensor copy_to_out_tensor; /*!< pointer to copy tensor func // (NULL = no copy) */ //} ai_layer_split; typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_split_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape_dimension axis; //ai_tensor* num_or_size_splits; } ai_layer_split; /*! * @struct ai_layer_topK * @ingroup layers_generic * @brief topK layer definition */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_topK_{ AI_LAYER_COMMON_FIELDS_DECLARE ai_i16 axis; ai_i16 largest; } ai_layer_topK; typedef AI_ALIGNED_TYPE(struct,4)ai_layer_svdf_{ AI_LAYER_COMMON_FIELDS_DECLARE ai_size rank; ai_tflitefused_activation activation; } ai_layer_svdf; /*! * @struct ai_layer_slice * @ingroup layers_generic * @brief Slice layer definition * * This layer defines the params of a slicing layer. It is intended to be used * by his associated forward function @ref forward_slice */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_slice_ { AI_LAYER_COMMON_FIELDS_DECLARE AI_CONST ai_array* axes; /*!< Axes that 'starts' and 'ends' apply to. It's optional*/ AI_CONST ai_array* starts; /*!< Starting indices of corrisponding axis in axes*/ AI_CONST ai_array* ends; /*!< Ending indices (exclusive) of corrisponding axis in axes*/ } ai_layer_slice; /*! * @struct ai_layer_gather * @ingroup layers_generic * @brief Gather layer definition * * This layer defines the params of a gathering layer. It is intended to be used * by his associated forward function @ref forward_gather */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_gather_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_i16 axis; /*!< Which axis to gather on It's optional*/ ai_tensor* indices; /*!< Indices of corrisponding axis in axes*/ } ai_layer_gather; /*! * @struct ai_layer_tile * @ingroup layers generic * @brief Tile layer definition * * This layer defines the param of an tile layer. It constructs a tensor by tiling a * given tensor. It is intended to be used by its associated forward function * @ref forward_upsample */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_tile_{ AI_LAYER_COMMON_FIELDS_DECLARE AI_CONST ai_array* repeats; /*!< numbers of repeated copies along each dimension */ } ai_layer_tile; /*! * @struct ai_layer_shape * @ingroup layers generic * @brief Shape layer definition * * This layer defines the param of a shape layer. It returns the shape of the * input tensor. It is intended to be used by its associated forward function * @ref forward_shape */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_shape_{ AI_LAYER_COMMON_FIELDS_DECLARE } ai_layer_shape; /*! * @struct ai_layer_upsample * @ingroup layers generic * @brief Upsample layer definition * * This layer defines the param of an upsampling layer. It overloads its params * to allow zeros upsampling, helpful traspose convolutions, for instance. * It is intended to be used by its associated forward function @ref forward_upsample */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_upsample_{ AI_LAYER_COMMON_FIELDS_DECLARE ai_upsample_mode mode; /*!< upsample mode */ ai_bool center; /*!< center pixels */ AI_CONST ai_array* scales; /*!< scale array along each dimension */ ai_nearest_mode nearest_mode; /*!< used in nearest mode */ } ai_layer_upsample; /*! * @struct ai_layer_resize * @ingroup layers generic * @brief Resize layer definition * * This layer defines the param of a resize layer. * It is intended to be used by its associated forward function @ref forward_resize */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_resize_{ AI_LAYER_COMMON_FIELDS_DECLARE ai_coord_transf_mode coord_transf_mode; /*!< coordinate tranformation mode */ ai_float cubic_coeff_a; /*!< the coefficient 'a' used in cubic interpolation */ ai_bool exclude_outside; /*!< exclude outside pixels flag */ ai_float extrapol_val; /*!< used in tf_crop_and_resize cas */ ai_resize_mode mode; /*!< resize mode */ ai_nearest_mode nearest_mode; /*!< used in nearest mode */ AI_CONST ai_array* scales; /*!< scale array along each dimension */ AI_CONST ai_array* roi; /*!< roi array, used in tf_crop_and_resize case */ } ai_layer_resize; /*! * @struct ai_layer_instanceNormalization * @ingroup layers generic * @brief instance normalization layer definition * * This layer defines the params of an instance normalization layer. * It is intended to be used by its associated forward function @ref forward_instanceNormalization */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_instanceNormaization_{ AI_LAYER_COMMON_FIELDS_DECLARE ai_float eps; /*!< epsilon value, to avoid by zero division */ } ai_layer_instanceNormalization; /*! * @struct ai_layer_mode * @ingroup layers generic * @brief Pad layer definition * * This layer defines the param of an pad layer. It pad a tensor. * It is intended to be used by its associated forward function @ref forward_pad */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pad_{ AI_LAYER_COMMON_FIELDS_DECLARE ai_pad_mode mode; /*!< pad mode */ ai_shape pads; /*!< Number of padding to add or remove at the beginning and end of each axis */ const ai_array* value; /*!< Indicates the value to be filled */ } ai_layer_pad; /*! * @struct ai_layer_mode * @ingroup layers generic * @brief ConstantOfShape layer definition * * This layer defines the param of an constantofshape layer. It constantofshape a tensor. * It is intended to be used by its associated forward function @ref forward_constantofshape */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_constantofshape_{ AI_LAYER_COMMON_FIELDS_DECLARE const ai_array* value; /*!< Indicates the value to be filled */ } ai_layer_constantofshape; /*! * @struct ai_layer_add * @ingroup layers_generic * @brief Add layer definition * * This layer defines the params of an add layer. */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_add_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_u16 in_layers_count; /*!< number of input layers to concat */ ai_u16 in_layer_curr; /*!< current layer to concat */ ai_tensor** in_tensors; /*!< input tensors list (if NULL==no copy) */ ai_tensor* out_tensor; /*!< output tensor (if NULL==no copy) */ func_copy_tensor copy_to_out_tensor; /*!< pointer to copy tensor func (NULL = no copy) */ ai_layer_base* split_layer; /*!< pointer to associated split layer */ ai_layer_base* next_layer; /*!< pointer to next layer to process */ } ai_layer_add; typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_argmax_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_i16 axis; ai_i16 select_last_index; } ai_layer_argmax; typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_argmin_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_i16 axis; ai_i16 select_last_index; } ai_layer_argmin; // TODO: REMOVE This legacy typedef ai_layer_argmax ai_layer_ArgMax; typedef ai_layer_argmin ai_layer_ArgMin; /*! * @struct ai_layer_transpose * @ingroup layers_generic * @brief Transpose layer datastruct declaration. This defines the params of a * transpose layer. It is intended to be used by his associated forward function * @ref forward_transpose */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_transpose_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape out_mapping; /*!< transpose output mapping order. I.e. tt is a permutation of the input tensor shape */ } ai_layer_transpose; /*! * @struct ai_layer_transpose_batch * @ingroup layers_generic * @brief Transpose batch layer datastruct declaration. This defines the params of a * transpose layer. It is intended to be used by his associated forward function * @ref forward_transpose_batch */ typedef ai_layer_base ai_layer_transpose_batch; #define AI_TIME_DISTRIBUTED_AXIS (AI_SHAPE_HEIGHT) /*! * @struct ai_layer_time_distributed * @ingroup layers_generic * @brief Time distributed layer datastruct declaration. This defines the params * of a time distributed layer. It is intended to be used by his associated * forward function @ref forward_time_distributed */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_time_distributed_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_layer_base* inner_layer; /*!< inner layer to process */ } ai_layer_time_distributed; /*! * @struct ai_layer_concat * @ingroup layers_generic * @brief Concatenation layer * * Concat Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_concat_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape_dimension axis; /*!< which axis to concatenate on */ } ai_layer_concat; /*! * @struct ai_layer_pack * @ingroup layers_generic * @brief pack layer * * Pack Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pack_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape_dimension axis; /*!< which axis to concatenate on */ } ai_layer_pack; /*! * @struct ai_layer_unpack * @ingroup layers_generic * @brief unpack layer * * Unpack Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_unpack_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape_dimension axis; /*!< which axis to concatenate on */ } ai_layer_unpack; typedef void (*func_binary)(ai_handle out,const ai_handle a, const ai_handle b); typedef void (*func_buffer_binary)(ai_handle out,const ai_handle a, const ai_handle b, const ai_size loop); typedef void (*func_buffer_binary_integer)(ai_handle out,const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle scale1, const ai_handle zp1, const ai_handle scale2, const ai_handle zp2, const ai_handle scaleout, const ai_handle zpout, const ai_i32 scalar_op); /*! * @struct ai_layer_eltwise * @ingroup layers_generic * @brief General element-wise transformation layer * * Elementwise Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_eltwise_ { AI_LAYER_COMMON_FIELDS_DECLARE func_binary operation; /*!< operation to apply elementwise */ func_buffer_binary buffer_operation; /*!< operation to apply elementwise */ } ai_layer_eltwise; /*! * @struct ai_layer_eltwise_integer * @ingroup layers_generic * @brief General element-wise transformation layer for integer data * * Elementwise Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_eltwise_integer_ { AI_LAYER_COMMON_FIELDS_DECLARE func_binary operation; /*!< operation to apply elementwise */ func_buffer_binary_integer buffer_operation; /*!< operation to apply elementwise */ } ai_layer_eltwise_integer; /*! * @struct ai_layer_reduce * @ingroup layers_generic * @brief General dimension reduction layer * * reduction Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reduce_ { AI_LAYER_COMMON_FIELDS_DECLARE const ai_array* neutral_value; /*!< Initialization value for operation */ func_binary operation; /*!< operation to apply elementwise */ } ai_layer_reduce; /*! * @struct ai_layer_reduce_log_sum_exp * @ingroup layers_generic * @brief General dimension reduction layer * * reduction Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reduce_log_sum_exp_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape_dimension axis; } ai_layer_reduce_log_sum_exp; /*! * @struct ai_layer_reduce l1 * @ingroup layers_generic * @brief General dimension reduction layer * * reduction Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reduce_l1_ { AI_LAYER_COMMON_FIELDS_DECLARE AI_CONST ai_array* axes; } ai_layer_reduce_l1; /*! * @struct ai_layer_reduce l2 * @ingroup layers_generic * @brief General dimension reduction layer * * reduction Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reduce_l2_ { AI_LAYER_COMMON_FIELDS_DECLARE AI_CONST ai_array* axes; } ai_layer_reduce_l2; /*! * @struct ai_layer_where * @ingroup layers generic * @brief Where layer definition * * This layer operates on 3 input tensors: condition, X and Y. * It return elements, either from X or Y, depending on condition * (with Numpy-style broadcasting support). * @ref forward_where */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_where_ { AI_LAYER_COMMON_FIELDS_DECLARE const ai_array *shapes_len; ai_bool channel_first; } ai_layer_where; /*! * @struct ai_layer_reverse * @ingroup layers_reverse * @brief Reverse layer * * The type of reverse function is handled by the specific forward function * @ref forward_svm_regressor */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reverse_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_i32 axis; /*!< selected axis to perform the operation */ } ai_layer_reverse; /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Dummy forward routine with no processing. * @ingroup layers_generic * @param generic layer handle */ AI_INTERNAL_API void forward_nop(ai_layer* layer); /*! * @brief Computes the activations of a TimeDelay layer. * @ingroup layers_generic * @param layer the time delay layer */ AI_INTERNAL_API void forward_time_delay(ai_layer* layer); /*! * @brief Split network computation in N parallel branches. * @ingroup layers_generic * @param layer the split layer */ AI_INTERNAL_API void forward_split(ai_layer* layer); /*! * @brief Add network computation from N parallel branches. * @ingroup layers_generic * @param layer the add layer */ AI_INTERNAL_API void forward_add(ai_layer* layer); /*! * @brief Compute the indices of the max elements of the input tensor's element along the provided axis. * @ingroup layers_generic * @param layer argmax layer */ AI_INTERNAL_API void forward_argmax(ai_layer* layer); /*! * @brief Compute the indices of the min elements of the input tensor's element along the provided axis. * @ingroup layers_generic * @param layer argmin layer */ AI_INTERNAL_API void forward_argmin(ai_layer* layer); /*! * @brief Svdf layer. * @ingroup layers_generic * @param layer svdf layer */ AI_INTERNAL_API void forward_svdf(ai_layer* layer); /*! * @brief Transpose a tensor along a pivot and save transposed values into an output * tensor * @ingroup layers_generic * @param layer the transpose layer */ AI_INTERNAL_API void forward_transpose(ai_layer* layer); /*! * @brief Transpose batch and save transposed values of a determinate batch into an output * tensor * @ingroup layers_generic * @param layer the transpose batch layer */ AI_INTERNAL_API void forward_transpose_batch(ai_layer* layer); /*! * @brief TimeDistrubuted forward layer function. This forward function * implements the timedistributed layer. * @ingroup layers_generic * @param layer the time distributed layer */ AI_INTERNAL_API void forward_time_distributed(ai_layer* layer); /*! * @brief Packing a list of tensors in a single tensor * @ingroup layers generic * @param layer the packing layer */ AI_INTERNAL_API void forward_pack(ai_layer* layer); /*! * @brief Unpacking a single of tensors in a list tensor * @ingroup layers generic * @param layer the unpacking layer */ AI_INTERNAL_API void forward_unpack(ai_layer* layer); /*! * @brief Concatenates a list of tensors into a single tensor. * @ingroup layers_generic * @param layer the concatenation layer */ AI_INTERNAL_API void forward_concat(ai_layer* layer); /*! * @brief Gather an input tensor * @ingroup layers_generic * @param layer the gathered layer */ AI_INTERNAL_API void forward_gather(ai_layer* layer); /*! * @brief Slice an input tensors * @ingroup layers_generic * @param layer the sliced layer */ AI_INTERNAL_API void forward_slice(ai_layer* layer); /*! * @brief Tile an input tensors * @ingroup layers_generic * @param layer the tiled layer */ AI_INTERNAL_API void forward_tile(ai_layer* layer); /*! * @brief Returns the shape of an input tensors * @ingroup layers_generic * @param layer the Shape layer */ AI_INTERNAL_API void forward_shape(ai_layer* layer); /*! * @brief TopK an input tensors * @ingroup layers_generic * @param layer the Topked layer */ AI_INTERNAL_API void forward_topK(ai_layer* layer); /*! * @brief Pad an input tensors * @ingroup layers_generic * @param layer the pad layer */ AI_INTERNAL_API void forward_pad(ai_layer* layer); /*! * @brief ConstantofShape an input tensors * @ingroup layers_generic * @param layer the constantofshape layer */ AI_INTERNAL_API void forward_constantofshape(ai_layer* layer); /*! * @brief Upsample an input tensors * @ingroup layers_generic * @param layer the upsampled layer */ AI_INTERNAL_API void forward_upsample(ai_layer* layer); /*! * @brief Resize an input tensors * @ingroup layers_generic * @param layer the resized layer */ AI_INTERNAL_API void forward_resize(ai_layer* layer); /*! * @brief Instance Normalization on an input tensors * @ingroup layers_generic * @param layer the instance normalization layer */ AI_INTERNAL_API void forward_instanceNormalization(ai_layer* layer); /*! * @brief Apply an elementwise transformation to the input tensors * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_eltwise(ai_layer* layer); /*! * @brief Apply an elementwise transformation to the integer input tensors * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_eltwise_integer(ai_layer* layer); /*! * @brief Apply an elementwise transformation to the signed integer input tensors * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_eltwise_integer_INT8(ai_layer* layer); /*! * @brief Apply an elementwise transformation to the unsigned integer input tensors * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_eltwise_integer_UINT8(ai_layer* layer); /*! * @brief Apply a reduce transformation to the input tensors * @ingroup layers_generic * @param layer the reduce layer */ AI_INTERNAL_API void forward_reduce(ai_layer* layer); /*! * @brief Apply a reduce transformation to the input tensors * @ingroup layers_generic * @param layer the reduce layer */ AI_INTERNAL_API void forward_reduce_log_sum_exp(ai_layer* layer); /*! * @brief Apply a reduce transformation to the input tensors * @ingroup layers_generic * @param layer the reduce layer */ AI_INTERNAL_API void forward_reduce_l1(ai_layer* layer); /*! * @brief Apply a reduce transformation to the input tensors * @ingroup layers_generic * @param layer the reduce layer */ AI_INTERNAL_API void forward_reduce_l2(ai_layer* layer); /*! * @brief Behave like numpy.where with Numpy-style broadcasting support * @ingroup layers_generic * @param layer the where layer */ AI_INTERNAL_API void forward_where(ai_layer* layer); /*! * @brief Apply an elementwise addition to the input tensors * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_add_integer(ai_layer* layer); /*! * @brief Apply an elementwise addition to the input tensors * with int8 I/O * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_add_integer_INT8(ai_layer* layer); /*! * @brief Apply an elementwise addition to the input tensors * with uint8 I/O * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_add_integer_UINT8(ai_layer* layer); /*! * @brief Reverse layer. * @ingroup layers_generic * @param layer reverse layer */ AI_INTERNAL_API void forward_reverse(ai_layer *pLayer); /*! * @brief Upsample an input tensors with unsigned 8-bit integer input,. * It is to be used also for other formats, since the function only * performs memory copy. * @ingroup layers_generic * @param layer the upsampled layer */ AI_INTERNAL_API void forward_upsample_generic(ai_layer* layer); AI_API_DECLARE_END #endif /*LAYERS_GENERIC_H*/
22,964
C
28.292092
130
0.683505
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/ai_datatypes_defines.h
/** ****************************************************************************** * @file ai_datatypes_defines.h * @author AST Embedded Analytics Research Platform * @brief Definitions of AI platform private APIs types ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_DATATYPES_DEFINES_H #define AI_DATATYPES_DEFINES_H #pragma once #include "ai_platform.h" /*! * @defgroup datatypes_defines Internal Datatypes Defines Header * @brief Data structures used internally to implement neural networks * */ /* define to track datatypes used by codegen */ #define AI_INTERFACE_TYPE /* AI_INTERFACE_TYPE */ #define AI_INTERNAL_API /* AI_INTERNAL_API */ #define AI_CONST const #define AI_STATIC static #define AI_STATIC_CONST static const /******************************************************************************/ /* NOP operation used by codegen */ #define AI_NOP /* NOP */ #define AI_WRAP_FUNC(fn_) do { fn_ } while (0); #define AI_CAT(a, ...) AI_PRIMITIVE_CAT(a, __VA_ARGS__) #define AI_PRIMITIVE_CAT(a, ...) a ## __VA_ARGS__ /******************************************************************************/ #ifdef HAS_AI_ASSERT #include <assert.h> #define AI_ASSERT(cond) \ { assert(cond); } #else #define AI_ASSERT(cond) \ AI_WRAP_FUNC(/*AI_ASSERT*/) #endif /*HAS_AI_ASSERT*/ /******************************************************************************/ #define AI_NO_PACKED_STRUCTS /* Macro for defining packed structures (compiler dependent). * This just reduces memory requirements, but is not required. */ #if defined(AI_NO_PACKED_STRUCTS) /* Disable struct packing */ #define AI_PACKED_STRUCT_START /* AI_PACKED_STRUCT_START */ #define AI_PACKED_STRUCT_END /* AI_PACKED_STRUCT_END */ #define AI_PACKED /* AI_PACKED */ #elif defined(__GNUC__) || defined(__clang__) /* For GCC and clang */ #define AI_PACKED_STRUCT_START /* AI_PACKED_STRUCT_START */ #define AI_PACKED_STRUCT_END /* AI_PACKED_STRUCT_END */ #define AI_PACKED __attribute__((packed)) #elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__) || defined(__CC_ARM) /* For IAR ARM and Keil MDK-ARM compilers */ #define AI_PACKED_STRUCT_START _Pragma("pack(push, 1)") #define AI_PACKED_STRUCT_END _Pragma("pack(pop)") #define AI_PACKED /* AI_PACKED */ #elif defined(_MSC_VER) && (_MSC_VER >= 1500) /* For Microsoft Visual C++ */ #define AI_PACKED_STRUCT_START __pragma(pack(push, 1)) #define AI_PACKED_STRUCT_END __pragma(pack(pop)) #define AI_PACKED /* AI_PACKED */ #else /* Unknown compiler */ #define AI_PACKED_STRUCT_START /* AI_PACKED_STRUCT_START */ #define AI_PACKED_STRUCT_END /* AI_PACKED_STRUCT_END */ #define AI_PACKED /* AI_PACKED */ #endif /* AI_NO_PACKED_STRUCTS */ /******************************************************************************/ #define AI_STRINGIFY_ARG(contents) # contents #define AI_STRINGIFY(macro_or_string) AI_STRINGIFY_ARG (macro_or_string) /******************************************************************************/ #if defined(_MSC_VER) #define AI_DECLARE_STATIC static __inline // #define AI_FORCE_INLINE static __forceinline #define AI_FORCE_INLINE static __inline #define AI_HINT_INLINE static __inline #define AI_ALIGNED_TYPE(type, x) type __declspec(align(x)) #define AI_INTERFACE_ENTRY __declspec(dllexport) #elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__) #define AI_DECLARE_STATIC static inline // #define AI_FORCE_INLINE static _Pragma("inline=forced") // TODO: check this definition! #define AI_FORCE_INLINE static inline #define AI_HINT_INLINE static inline #define AI_ALIGNED_TYPE(type, x) type #define AI_INTERFACE_ENTRY /* AI_INTERFACE_ENTRY */ #elif defined(__GNUC__) #define AI_DECLARE_STATIC static __inline #define AI_FORCE_INLINE static __inline #define AI_HINT_INLINE static __inline #define AI_ALIGNED_TYPE(type, x) type __attribute__ ((aligned(x))) #define AI_INTERFACE_ENTRY /* AI_INTERFACE_ENTRY */ #else /* _MSC_VER */ #define AI_DECLARE_STATIC static __inline // #define AI_FORCE_INLINE static __forceinline #define AI_FORCE_INLINE static __inline #define AI_HINT_INLINE static __inline #define AI_ALIGNED_TYPE(type, x) type __attribute__ ((aligned(x))) #define AI_INTERFACE_ENTRY __attribute__((visibility("default"))) #endif /* _MSC_VER */ /******************************************************************************/ #define AI_ALIGN_MASKED(value, mask) ( ((value)+(mask))&(~(mask)) ) #define AI_GET_VERSION_STRING(major, minor, micro) \ AI_STRINGIFY_ARG(major) "." \ AI_STRINGIFY_ARG(minor) "." \ AI_STRINGIFY_ARG(micro) \ #define AI_PACK_TENSORS_PTR(...) \ AI_PACK(__VA_ARGS__) #define AI_PACK_INFO(size_) (ai_tensor_info[1]) { { \ .buffer = (ai_buffer[size_])AI_STRUCT_INIT, \ .state = (ai_tensor_state[size_])AI_STRUCT_INIT, \ } } #define AI_CR "\r\n" #if (defined HAS_AI_DEBUG || defined HAS_DEBUG_LIB) #include <stdio.h> #define AI_DEBUG(...) __VA_ARGS__ #define AI_DEBUG_PRINT(fmt, ...) { printf(fmt, ##__VA_ARGS__); } #else #define AI_DEBUG(...) AI_WRAP_FUNC(/*AI_DEBUG*/) #define AI_DEBUG_PRINT(fmt, ...) AI_WRAP_FUNC(/*AI_DEBUG_PRINT*/) #endif #define AI_FLAG_SET(mask, flag) (mask) |= (flag) #define AI_FLAG_UNSET(mask, flag) (mask) &= (~(flag)) #define AI_FLAG_IS_SET(mask, flag) ((flag)==((mask)&(flag))) #endif /*AI_DATATYPES_DEFINES_H*/
6,551
C
39.444444
105
0.526637
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/layers_dense_dqnn.h
/** ****************************************************************************** * @file layers_dense_dqnn.h * @author AST Embedded Analytics Research Platform * @brief header file of deeply quantized dense layers. ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_DENSE_DQNN_H #define LAYERS_DENSE_DQNN_H #pragma once #include "layers_common.h" /*! * @defgroup layers_dense_dqnn Quantized Dense Layers definition. * @brief Implements the kernels and the forward functions to implement * dense layers with quantized inputs, weights, or outputs. */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_dense_dqnn * @ingroup layers_dense_dqnn * @brief Specific instance of deeply quantized dense layers. */ typedef ai_layer_base ai_layer_dense_dqnn; /*****************************************************************************/ /* Forward Functions Section */ /*****************************************************************************/ /*! * @brief Forward function for a dense layer with signed binary input, * signed binary output, and signed binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os1ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * signed binary output, and signed binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os1ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 8-bit signed output, and signed binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os8ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 8-bit signed output, and signed binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os16ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 32-bit floating point output, and signed binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1of32ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 32-bit floating point output, and signed binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1of32ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 32-bit floating point output, and 32-bit floating point weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1of32wf32(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 32-bit floating point output, and 32-bit floating point weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1of32wf32_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 32-bit floating point output, and 8-bit signed weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1of32ws8(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 32-bit floating point output, and 8-bit signed weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1of32ws8_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * binary output, and 8-bit signed weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os1ws8(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * binary output, and 8-bit signed weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os1ws8_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 8-bit signed output, and 8-bit signed weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os8ws8(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 16-bit signed output, and 8-bit signed weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os16ws8(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * float output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8of32ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * float output, and binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8of32ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * 1-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8os1ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * 1-bit signed output, and binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8os1ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * binary weights and binary output. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8os1ws1_bn_fxp(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * 8-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8os8ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * 16-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8os16ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 16-bit input, * 1-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is16os1ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 16-bit input, * 1-bit signed output, and binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is16os1ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 16-bit input, * 8-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is16os8ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 16-bit input, * 16-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is16os16ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 16-bit input, * f32 output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is16of32ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 16-bit input, * f32 output, and binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is16of32ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed f32 input, * 1-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_if32os1ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed f32 input, * 1-bit signed output, and binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_if32os1ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed f32 input, * 8-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_if32os8ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed f32 input, * 16-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_if32os16ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed f32 input, * f32 output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_if32of32ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed f32 input, * f32 output, and binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_if32of32ws1_bn(ai_layer* layer); AI_API_DECLARE_END #endif /*LAYERS_DENSE_DQNN_H*/
14,182
C
34.546366
80
0.709632
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/ai_lite.h
/** ****************************************************************************** * @file ai_lite.h * @author AST Embedded Analytics Research Platform * @brief Definitions and implementations of runtime-lite public APIs ****************************************************************************** * @attention * * Copyright (c) 2022 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_LITE_H #define AI_LITE_H #pragma once #include "ai_platform.h" #include "ai_lite_inspect.h" #define LITE_API_ENTRY \ /* LITE_API_ENTRY */ #define LITE_GRAPH_INIT(_inputs, _outputs, _activations, _weights, _cb, _cb_cookie) { \ .inputs = (_inputs), \ .outputs = (_outputs), \ .activations = (_activations), \ .weights = (const ai_handle*)(_weights), \ .cb = ((ai_lite_inspect_cb)(_cb)), \ .cb_cookie = ((ai_handle)(_cb_cookie)), \ } AI_API_DECLARE_BEGIN typedef enum { LITE_OK = 0, LITE_KO_INPUTS, LITE_KO_OUTPUTS, LITE_KO_WEIGHTS, LITE_KO_ACTIVATIONS, LITE_KO_GRAPH, } lite_result; typedef struct { ai_handle* inputs; ai_handle* outputs; ai_handle* activations; const ai_handle* weights; ai_lite_inspect_cb cb; ai_handle cb_cookie; } lite_graph; AI_API_DECLARE_END #endif /* AI_LITE_H */
1,699
C
25.5625
87
0.521483
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/layers_ml_linearclassifier.h
/** ****************************************************************************** * @file layers_ml_linearclassifier.h * @author SRA * @brief header file of AI platform LinearClassifier datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_LINEARCLASSIFIER_H #define LAYERS_LINEARCLASSIFIER_H #pragma once #include "layers_common.h" #include "layers_nl.h" /*! * @defgroup layers_linearclassifier Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_linearclassifier * @ingroup layers_linearclassifier * @brief Linearclassifier layer * * The type of svmreg function is handled by the specific forward function * @ref forward_linearclassifier */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_linearclassifier_ { AI_LAYER_COMMON_FIELDS_DECLARE func_nl nl_func; /*!< function pointer to non linear transform */ \ ai_bool multi_class; /*!< Indicates whether to do OvR or multinomial */ ai_bool has_classlabels_int; /*!< if True, LinearClassifier returns classlabels int, else classlabels string */ } ai_layer_linearclassifier; /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Decodes the LinearClassifier ML operator. * @ingroup layers_linaerclassifier * @param layer linear classifier layer */ AI_INTERNAL_API void forward_linearclassifier(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_LINEARCLASSIFIER_H*/
2,176
C
29.661971
118
0.542279
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/lite_nl_list.h
/** ****************************************************************************** * @file lite_nl_list.h * @author AST Embedded Analytics Research Platform * @brief header file of lite supported non-linearities routines ****************************************************************************** * @attention * * Copyright (c) 2022 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ // #define LITE_NL_ENTRY(nl_id_, nl_name_, nl_op_, nl_op_args_) /* No sentry. This is deliberate!! */ LITE_NL_ENTRY(1, abs, AI_ABS, 1) LITE_NL_ENTRY(2, acos, AI_MATH_ACOS, 1) LITE_NL_ENTRY(3, acosh, AI_MATH_ACOSH, 1) LITE_NL_ENTRY(4, asin, AI_MATH_ASIN, 1) LITE_NL_ENTRY(5, asinh, AI_MATH_ASINH, 1) LITE_NL_ENTRY(6, atan, AI_MATH_ATAN, 1) LITE_NL_ENTRY(7, atanh, AI_MATH_ATANH, 1) LITE_NL_ENTRY(8, ceil, AI_CEIL, 1) LITE_NL_ENTRY(9, cos, AI_MATH_COS, 1) LITE_NL_ENTRY(10, cosh, AI_MATH_COSH, 1) LITE_NL_ENTRY(11, erf, AI_MATH_ERF, 1) LITE_NL_ENTRY(12, exp, AI_MATH_EXP, 1) LITE_NL_ENTRY(13, floor, AI_FLOOR, 1) LITE_NL_ENTRY(14, hardmax, /**/, 0) LITE_NL_ENTRY(15, log, AI_MATH_LOG, 1) LITE_NL_ENTRY(16, logistic, AI_MATH_LOGISTIC, 1) LITE_NL_ENTRY(17, neg, AI_NEG, 1) LITE_NL_ENTRY(18, rsqrt, AI_MATH_RSQRT, 1) LITE_NL_ENTRY(19, sin, AI_MATH_SIN, 1) LITE_NL_ENTRY(20, sinh, AI_MATH_SINH, 1) LITE_NL_ENTRY(21, tan, AI_MATH_TAN, 1) LITE_NL_ENTRY(22, square, AI_MATH_SQUARE, 1) LITE_NL_ENTRY(23, reciprocal, AI_RECIPROCAL, 1) LITE_NL_ENTRY(24, round, AI_ROUND, 1) LITE_NL_ENTRY(25, sigmoid, AI_MATH_SIGMOID, 1) LITE_NL_ENTRY(26, swish, AI_MATH_SWISH, 1) LITE_NL_ENTRY(27, hard_swish, AI_MATH_HARD_SWISH, 1) LITE_NL_ENTRY(28, sign, AI_SIGN, 1) LITE_NL_ENTRY(29, sqrt, AI_MATH_SQRT, 1) // LITE_NL_ENTRY(30, softmax, /**/, 0) // for future changes // LITE_NL_ENTRY(31, softmax_zero_channel, /**/, 0) // for future changes LITE_NL_ENTRY(32, soft_plus, AI_MATH_SOFT_PLUS, 1) LITE_NL_ENTRY(33, soft_sign, AI_MATH_SOFT_SIGN, 1) LITE_NL_ENTRY(34, tanh, AI_MATH_TANH, 1) LITE_NL_ENTRY(35, prelu, /**/, 0) LITE_NL_ENTRY(36, relu, AI_MATH_RELU, 1) LITE_NL_ENTRY(37, relu_generic, /**/, 0) LITE_NL_ENTRY(101, elu, AI_MATH_ELU, 2) LITE_NL_ENTRY(102, relu_thresholded, AI_MATH_RELU_THRESHOLDED, 2) LITE_NL_ENTRY(201, clip, AI_CLAMP, 3) LITE_NL_ENTRY(202, hard_sigmoid, AI_MATH_HARD_SIGMOID, 3) LITE_NL_ENTRY(203, selu, AI_MATH_SELU, 3) #undef LITE_NL_ENTRY #undef LITE_NL_IIF_0 #undef LITE_NL_IIF_1 #undef LITE_NL_IIF_2 #undef LITE_NL_IIF_3
2,844
C
35.474359
80
0.60443
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/ai_math_helpers.h
/** ****************************************************************************** * @file ai_math_helpers.h * @author AST Embedded Analytics Research Platform * @brief Math helpers routines header file. ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_MATH_HELPERS_H #define AI_MATH_HELPERS_H #include "ai_lite_math_helpers.h" //#if defined(HAS_X86) || defined(__CC_ARM) || defined(CM4) || defined(CM7) #define _AI_CONV_2D_LOOP_UNROLLING_OPTIM //#endif #define STM32_DOT_INLINE_OPTIM /* Modes for element wise integer optimized implementation */ #define AI_ELTWISE_NO_SCALAR (0) #define AI_ELTWISE_SCALAR1 (1) #define AI_ELTWISE_SCALAR2 (2) #define AI_ELTWISE_SCALAR_CH1 (3) #define AI_ELTWISE_SCALAR_CH2 (4) AI_API_DECLARE_BEGIN /*! * @typedef ai_vec4_float * @ingroup ai_datatypes_internal * @brief 32bit X 4 float (optimization for embedded MCU) */ typedef struct _ai_vec4_float { ai_float a1; ai_float a2; ai_float a3; ai_float a4; } ai_vec4_float; #define AI_VEC4_FLOAT(ptr_) \ _get_vec4_float((ai_handle)(ptr_)) AI_DECLARE_STATIC ai_vec4_float _get_vec4_float(const ai_handle fptr) { return *((const ai_vec4_float*)fptr); } #if defined(STM32_DOT_INLINE_OPTIM) AI_DECLARE_STATIC void __ai_math_dot_array( ai_float* out, const ai_float* data0, const ai_float* data1, ai_size data_size) { register ai_float sum = 0.0f; /* Temporary result storage */ /* Run the below code for Cortex-M4 and Cortex-M3 */ #if defined(_AI_CONV_2D_LOOP_UNROLLING_OPTIM) /* First part of the processing with loop unrolling. Compute 16 outputs at a time. ** a second loop below computes the remaining 1 to 15 samples. */ while (data_size >= 16u) { register ai_vec4_float ch_in_f = AI_VEC4_FLOAT(data1); register ai_vec4_float weights_in_f = AI_VEC4_FLOAT(data0); sum += weights_in_f.a1 * ch_in_f.a1; sum += weights_in_f.a2 * ch_in_f.a2; sum += weights_in_f.a3 * ch_in_f.a3; sum += weights_in_f.a4 * ch_in_f.a4; data1 += 4; data0 += 4; ch_in_f = AI_VEC4_FLOAT(data1); weights_in_f = AI_VEC4_FLOAT(data0); sum += weights_in_f.a1 * ch_in_f.a1; sum += weights_in_f.a2 * ch_in_f.a2; sum += weights_in_f.a3 * ch_in_f.a3; sum += weights_in_f.a4 * ch_in_f.a4; data1 += 4; data0 += 4; ch_in_f = AI_VEC4_FLOAT(data1); weights_in_f = AI_VEC4_FLOAT(data0); sum += weights_in_f.a1 * ch_in_f.a1; sum += weights_in_f.a2 * ch_in_f.a2; sum += weights_in_f.a3 * ch_in_f.a3; sum += weights_in_f.a4 * ch_in_f.a4; data1 += 4; data0 += 4; ch_in_f = AI_VEC4_FLOAT(data1); weights_in_f = AI_VEC4_FLOAT(data0); sum += weights_in_f.a1 * ch_in_f.a1; sum += weights_in_f.a2 * ch_in_f.a2; sum += weights_in_f.a3 * ch_in_f.a3; sum += weights_in_f.a4 * ch_in_f.a4; data1 += 4; data0 += 4; data_size -= 16u; } #else /* First part of the processing with loop unrolling. Compute 4 outputs at a time. ** a second loop below computes the remaining 1 to 3 samples. */ while (data_size >= 4u) { /* C = A[0]* B[0] + A[1]* B[1] + A[2]* B[2] + .....+ A[blockSize-1]* B[blockSize-1] */ /* Calculate dot product and then store the result in a temporary buffer */ sum += (*data0++) * (*data1++); sum += (*data0++) * (*data1++); sum += (*data0++) * (*data1++); sum += (*data0++) * (*data1++); /* Decrement the loop counter */ data_size -= 4u; } #endif while (data_size > 0u) { /* C = A[0]* B[0] + A[1]* B[1] + A[2]* B[2] + .....+ A[blockSize-1]* B[blockSize-1] */ /* Calculate dot product and then store the result in a temporary buffer. */ sum += (*data0++) * (*data1++); /* Decrement the loop counter */ data_size--; } /* Directly accumulate the result back in the destination buffer */ *out += sum; } #undef AI_MATH_DOT_ARRAY #define AI_MATH_DOT_ARRAY(dst, src0, src1, size) \ { __ai_math_dot_array(dst, src0, src1, size); } #else /* STM32_DOT_INLINE_OPTIM */ #undef AI_MATH_DOT_ARRAY #define AI_MATH_DOT_ARRAY(dst, src0, src1, size) \ { ai_math_dot_array(dst, src0, src1, size); } #endif /*! * @defgroup math_helpers Math helpers * @brief Common math functions * * Math functions are mapped to the underlying platform through those utility * functions. On x86 and ARM v7 they are mapped to the float math functions in * the C99 standard library; on MCUs they are mapped to the ARM DSP functions. */ /*! * @brief platform optimized dot product of float vectors * * Computes the dot product between vectors and adds the result to out. * @ingroup math_helpers * @param out scalar result of the dot product * @param data0 the first float vector * @param data1 the second float vector * @param data_size the size of both vectors */ AI_INTERFACE_ENTRY void ai_math_dot_array( ai_float* out, const ai_float* data0, const ai_float* data1, const ai_size data_size); /*! * @brief ErfInv a float value * @ingroup math_helpers * @param x input value * @return square root of the value */ AI_INTERFACE_ENTRY ai_float ai_math_erfinv(const ai_float x); /*! * @brief platform optimized exponential on a float value * @ingroup math_helpers * @param x input value * @return exponential of the value */ AI_INTERFACE_ENTRY ai_float ai_math_exp(const ai_float x); /*! * @brief platform logical not * @ingroup math_helpers * @param x input value * @return not of the value */ AI_INTERFACE_ENTRY ai_bool ai_logical_not(const ai_bool x); /*! * @brief platform optimized pow on a float value * @ingroup math_helpers * @param x input value * @param e input value * @return pow of the value ^ e */ AI_INTERFACE_ENTRY ai_float ai_math_pow(const ai_float x, const ai_float e); /*! * @brief platform optimized tangent on a float value * @ingroup math_helpers * @param x input value * @return hyperbolic tangent of the value */ AI_INTERFACE_ENTRY ai_float ai_math_tanh(const ai_float x); /*! * @brief platform optimized relu on a float value * @ingroup math_helpers * @param x input value * @return relu of the value ( x if x>0 else 0) */ AI_INTERFACE_ENTRY ai_float ai_math_relu(const ai_float x); /*! * @brief platform optimized parametric relu on a float value * @ingroup math_helpers * @param x input value * @param slope input value * @return parametric relu of the value */ AI_INTERFACE_ENTRY ai_float ai_math_prelu(const ai_float x, const ai_float slope); /*! * @brief platform optimized parametric sigmoid on a float value * @ingroup math_helpers * @param x input value * @return sigmoid of the value */ AI_INTERFACE_ENTRY ai_float ai_math_sigmoid(const ai_float x); /*! * @brief platform optimized parametric hard sigmoid on a float value * @ingroup math_helpers * @param x input value * @return hard sigmoid of the value */ AI_INTERFACE_ENTRY ai_float ai_math_hard_sigmoid(const ai_float x); // const ai_float alpha, const ai_float beta); /*! * @brief platform optimized parametric swish on a float value * @ingroup math_helpers * @param x input value * @return swish of the value */ AI_INTERFACE_ENTRY ai_float ai_math_swish(const ai_float x); /*! * @brief platform optimized parametric hard_swish on a float value * @ingroup math_helpers * @param x input value * @return hard_swish of the value */ AI_INTERFACE_ENTRY ai_float ai_math_hard_swish(const ai_float x); /*! * @brief platform optimized parametric sign function on a float value * @ingroup math_helpers * @param x input value * @return sign of the value */ AI_INTERFACE_ENTRY ai_float ai_math_sign(const ai_float x); /*! * @brief optimized parametric rectified linear unit on a float value * @ingroup math_helpers * @param x input value * @param slope parameter value * @return x if x is positive and x*slope otherwise */ AI_INTERFACE_ENTRY ai_float ai_fast_prelu(const ai_float x, const ai_float slope); AI_INTERFACE_ENTRY void ai_div(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_div_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_bitshift_right(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_right_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_right_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_right_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_right_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_right_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_right_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_right_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_left(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_left_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_left_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_left_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_left_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_left_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_left_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_left_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_floor_div(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_floor_div_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_floor_mod(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_floor_mod_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_max_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_min(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_min_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_mul(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_mul_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_pow(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_pow_buffer(ai_handle out, const ai_handle b, const ai_handle e, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_sub_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_sum(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_sum_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_and(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_and_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_or(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_or_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_xor(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_xor_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_squared_diff(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_squared_diff_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_API_DECLARE_END #endif /* AI_MATH_HELPERS_H */
34,676
C
61.820652
137
0.706252
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/layers_ml_treeensembleregressor.h
/** ****************************************************************************** * @file layers_svmregressor.h * @author AIS * @brief header file of AI platform SVM Regressor datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_TREE_ENSEMBLE_REGRESSOR_H #define LAYERS_TREE_ENSEMBLE_REGRESSOR_H #pragma once #include "layers_common.h" #include "layers_ml_treeensembleclassifier.h" #include "layers_nl.h" /*! * @defgroup layers_svmreg Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_tree_ensemble_regressor_ { AI_LAYER_COMMON_FIELDS_DECLARE func_nl nl_func; uint8_t all_weights_are_positive; ai_float nodes_values_offset; ai_float nodes_values_scale; ai_float target_weights_offset; ai_float target_weights_scale; } ai_layer_tree_ensemble_regressor; /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Decodes the TreeEnsembleRegressor ML operator. * @ingroup layers_svmreg * @param layer tree ensemble regressor layer */ AI_INTERNAL_API void forward_tree_ensemble_regressor(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_SVMREGRESSOR_H*/
1,923
C
29.0625
80
0.520021
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/lite_pool_f32.h
/** ****************************************************************************** * @file lite_maxpool_dqnn.h * @author AIS * @brief header file of AI platform lite maxpool kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_POOL_F32_H #define LITE_POOL_F32_H #include "ai_lite_interface.h" #define FUNC_POOL(handle) \ ((func_pool)(handle)) /*! * @typedef (*func_pool) * @ingroup layers_pool * @brief Fuction pointer for generic pooling transform * this function pointer abstracts a generic pooling layer. * see @ref pool_func_ap_array_f32 as examples */ typedef void (*func_pool)(ai_float* in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_float* out); /******************************************************************************/ /** Conv2d Functions Section **/ /******************************************************************************/ AI_INTERNAL_API void pool_func_mp_array_f32(ai_float* pData_in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_float* pData_out); AI_INTERNAL_API void pool_func_ap_array_f32(ai_float *pData_in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_float *pData_out); #endif // LITE_POOL_F32_H_
2,936
C
39.232876
80
0.466962
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/lite_gru_f32.h
#ifndef LITE_GRU_F32_H #define LITE_GRU_F32_H #pragma once #include "ai_lite_interface.h" /*! * @brief Forward function for a stateless GRU (gate recurrent unit) layer with * signed float input, signed float output, and float parameters. * @ingroup lite_gru_f32 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param gru_kernel The pointer to gru kernel param. * @param gru_recurrent The pointer to gru recurrent param. * @param gru_bias The pointer to bias. * @param gru_scratch The pointer to GRU scratch. * @param n_units The number of GRU cells (dimensionality of output space). * @param n_timesteps The number of timesteps of the input sequence. * @param n_features The number of features of the input sequence. * @param activation_nl The activation function used to update memory state. * @param recurrent_nl The activation function to use for the recurrent step. * @param return_seq If True, returns the full output sequence, else only the last output. * @param go_backwards If True, process the input sequence backwards. * @param reverse_seq If True, reverse the input sequence * @param reset_after Whether to apply reset gate after (True) or before (False) matmul. * @param activation_param The parameters for activation_nl (can be NULL) * @param recurrent_param The parameters for recurrent_nl (can be NULL) */ LITE_API_ENTRY void forward_lite_gru_if32of32wf32( ai_float* output, const ai_float* input, const ai_float* gru_kernel, const ai_float* gru_recurrent, const ai_float* gru_bias, ai_float* gru_scratch, const ai_u32 n_units, const ai_size n_timesteps, const ai_size n_features, ai_handle activation_nl, ai_handle recurrent_nl, ai_bool return_seq, ai_bool go_backwards, ai_bool reverse_seq, ai_bool reset_after, const ai_float* activation_param, const ai_float* recurrent_param); #endif /* LITE_GRU_F32_H */
1,910
C
46.774999
90
0.746597
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/lite_dense_is1.h
#ifndef _LITE_DENSE_IS1_H #define _LITE_DENSE_IS1_H #pragma once #include "ai_lite_interface.h" /*! * @brief Forward function for a dense layer with signed binary input, * signed float output, and float weights. * @ingroup lite_dense_is1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param bias The pointer to bias (NULL if not available). * @param scratch The pointer to the scratch buffer (unused). * @param n_channel_in The number of channels of the input. * @param n_channel_ouy The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_is1of32wf32( ai_float *output, const ai_pbits *input, const ai_float *weights, const ai_float *bias, ai_float *scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out ); /*! * @brief Forward function for a dense layer with signed binary input, * signed float output, and float weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup lite_dense_is1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param scale The pointer to scale. * @param offset The pointer to offset. * @param scratch The pointer to the scratch buffer (unused). * @param n_channel_in The number of channels of the input. * @param n_channel_ouy The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_is1of32wf32_bn( ai_float *output, const ai_pbits *input, const ai_float *weights, const ai_float *scale, const ai_float *offset, ai_float *scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out ); #endif /*_LITE_DENSE_IS1_H*/
2,078
C
36.799999
80
0.720404
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/lite_nl_generic_float.h
#ifndef LITE_NL_GENERIC_FLOAT_H #define LITE_NL_GENERIC_FLOAT_H #pragma once #include "ai_lite_interface.h" #define LITE_NL_ENTRY(nl_id_, nl_name_, nl_op_, nl_op_args_) \ /** \ * @brief lite function for a templated non-linearity nl_op_. \ * @ingroup lite_nl_generic_float \ * @param out_ptr The pointer to output buffer. \ * @param in_ptr The pointer to input buffer. \ * @param in_size. The size of the input. \ * @param params opaque handler to optional NL params (not used). \ */ \ LITE_API_ENTRY \ void forward_lite_nl_ ## nl_name_ ## _if32of32( \ ai_handle out_ptr, const ai_handle in_ptr, const ai_i32 in_size, const ai_handle params); #include "lite_nl_list.h" /** * @brief lite function for a float softmax non-linearity where the softmax is applied per channel. * @ingroup lite_nl_generic_float * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param in_size. The size of the input. * @param channel_size The nsize of each channel. * @param in_channel_step * @param out_channel_step */ LITE_API_ENTRY void forward_lite_nl_softmax_if32of32( ai_handle out_ptr, const ai_handle in_ptr, const ai_i32 in_size, const ai_size ch_size, const ai_i32 in_ch_step, const ai_i32 out_ch_step); /** * @brief lite function for a float softmax zero channel non-linearity where the softmax is applied per channel. * @ingroup lite_nl_generic_float * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param in_size. The size of the input. * @param channel_size The nsize of each channel. * @param in_channel_step * @param out_channel_step */ LITE_API_ENTRY void forward_lite_nl_softmax_zero_channel_if32of32( ai_handle out_ptr, const ai_handle in_ptr, const ai_i32 in_size, const ai_size ch_size, const ai_i32 in_ch_step, const ai_i32 out_ch_step); #endif /* LITE_NL_GENERIC_FLOAT_H */
1,907
C
33.071428
112
0.708967
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/lite_pw.h
/** ****************************************************************************** * @file lite_pw.h * @author AIS * @brief header file of AI platform lite pw kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_PW_H #define LITE_PW_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles pw convolutions generic case * @ingroup lite_pw */ LITE_API_ENTRY void forward_lite_pw_sssa8_ch(const ai_i8 *pData_in, const ai_u16 width_in, const ai_u16 height_in, const ai_u16 n_channel_in, const ai_i8 *pWeights, const ai_u16 n_channel_out, const ai_i32 *pBias, const ai_i8 in_zeropoint, const ai_i8 out_zeropoint, const ai_layer_format_type out_ch_format, ai_i8 *pData_out, ai_u32 height_loop_cnt, ai_u16 weights_prefetch_enabled, ai_i32 scratch_size, ai_i16 *pBuffer_a); #endif /*LITE_PW_H*/
1,973
C
34.249999
80
0.387228
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/ai_lite_inspect.h
/** ****************************************************************************** * @file ai_lite_inspect.h * @author AST Embedded Analytics Research Platform * @brief Definitions and implementations of runtime-lite inspection routines ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_LITE_INSPECT_H #define AI_LITE_INSPECT_H #pragma once #include "ai_platform.h" //#define HAS_LITE_INSPECT AI_API_DECLARE_BEGIN /* Types needed by inspect callback signature */ typedef ai_i32 ai_data_format; typedef ai_i32 ai_data_id; /* Lite inspect callback definition */ typedef void (*ai_lite_inspect_cb)( const ai_handle cookie, const ai_data_id node_id, const ai_handle data, const ai_size data_size, const ai_data_format data_fmt, const ai_data_id data_id); #ifdef HAS_LITE_INSPECT #define LITE_INSPECT_CB(_node_id, _data, _data_size, _data_fmt, _data_id) { \ if (graph->cb) { \ graph->cb(graph->cb_cookie, \ (ai_data_id)(_node_id), (ai_handle)(_data), (ai_size)(_data_size), \ (ai_data_format)(_data_fmt), (ai_data_id)(_data_id)); \ } \ } #else #define LITE_INSPECT_CB(_node_id, _data, _data_size, _data_fmt, _data_id) { \ do { /* LITE_INSPECT_CB() */ } while (0); \ } #endif /* HAS_LITE_INSPECT */ AI_API_DECLARE_END #endif /* AI_LITE_INSPECT_H */
1,858
C
28.507936
82
0.545748
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/lite_maxpool_dqnn.h
/** ****************************************************************************** * @file lite_maxpool_dqnn.h * @author AIS * @brief header file of AI platform lite maxpool kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_MAXPOOL_DQNN_H #define LITE_MAXPOOL_DQNN_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles maxpool with binary input and binary output - Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_is1os1(const ai_u32 *pDataIn_init, ai_u32 *pDataOut_init, const ai_i32 width_in, const ai_i32 width_out, const ai_i32 height_in, const ai_i32 height_out, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 pool_width, const ai_i32 pool_height, const ai_i32 pool_pad_x, const ai_i32 pool_pad_y, const ai_i32 pool_stride_x, const ai_i32 pool_stride_y, const ai_u32 pool_pad_value, ai_float *pScratch_32); /*! * @brief Handles maxpool with 8 bits signed input and output with a positive scale of the input- Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_is8os8_scalepos(const ai_i8 *pDataIn, ai_i8 *pDataOut, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_float InOut_ScaleRatio, const ai_i8 In_ZeroPoint, const ai_i8 Out_ZeroPoint); /*! * @brief Handles maxpool with 8 bits signed input and output with a negative scale of the input- Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_is8os8_scaleneg(const ai_i8 *pDataIn, ai_i8 *pDataOut, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_float InOut_ScaleRatio, const ai_i8 In_ZeroPoint, const ai_i8 Out_ZeroPoint); /*! * @brief Handles maxpool with 8 bits unsigned input and output with a positive scale of the input- Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_iu8ou8_scalepos(const ai_u8 *pDataIn, ai_u8 *pDataOut, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_float InOut_ScaleRatio, const ai_u8 In_ZeroPoint, const ai_u8 Out_ZeroPoint); /*! * @brief Handles maxpool with 8 bits unsigned input and output with a negative scale of the input- Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_iu8ou8_scaleneg(const ai_u8 *pDataIn, ai_u8 *pDataOut, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_float InOut_ScaleRatio, const ai_u8 In_ZeroPoint, const ai_u8 Out_ZeroPoint); /*! * @brief Handles maxpool with 16 bits signed input and output with a positive scale of the input- Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_is16os16_scalepos(const ai_i16 *pApInput, ai_i16 *pApOutput, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_float InOut_ScaleRatio, const ai_i16 In_ZeroPoint, const ai_i16 Out_ZeroPoint); /*! * @brief Handles maxpool with 16 bits unsigned input and output with a positive scale of the input- Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_iu16ou16_scalepos(const ai_u16 *pApInput, ai_u16 *pApOutput, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_float InOut_ScaleRatio, const ai_u16 In_ZeroPoint, const ai_u16 Out_ZeroPoint); #endif /*LITE_MAXPOOL_DQNN_H*/
8,459
C
51.546584
109
0.422154
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares_backup/ST/AI/Inc/layers_common.h
/** ****************************************************************************** * @file layers_common.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform layers datatypes ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_COMMON_H #define LAYERS_COMMON_H #pragma once // #include <stdlib.h> #ifdef USE_CYCLE_MEASUREMENTS #include "layers_cycles_estimation.h" #endif #include "ai_platform.h" #include "ai_common_config.h" #include "core_common.h" /* optimizations */ #define AI_OPTIM_DICT8_DOT_ARRAY_F32 (1) #define AI_OPTIM_DICT8_DTCM (1) #define AI_OPTIM_FUNC_MP_ARRAY_F32 (0) #define AI_LAYER_OBJ(obj_) \ ((ai_layer_base*)(obj_)) #define AI_LAYER_FUNC(func_) \ ((layer_func)(func_)) #define AI_LAYER_TYPE(type_) \ ( (ai_layer_type)((ai_u32)(type_)&0xFFFF) ) #define AI_LAYER_TYPE_ENTRY(type_) \ AI_CONCAT(AI_CONCAT(AI_LAYER_, type_), _TYPE) #define AI_LAYER_TYPE_NAME(type_) \ ai_layer_type_name(AI_LAYER_TYPE(type_)) #if (AI_TOOLS_API_VERSION <= AI_TOOLS_API_VERSION_1_3) #pragma message ("Including deprecated AI_LAYER_OBJ_INIT, AI_LAYER_OBJ_DECLARE") AI_DEPRECATED #define AI_LAYER_OBJ_INIT(type_, id_, network_, \ next_, forward_, ...) \ { \ AI_NODE_COMMON_INIT(AI_CONCAT(AI_LAYER_, type_), id_, 0x0, \ NULL, network_, next_, forward_), \ ## __VA_ARGS__ \ } AI_DEPRECATED #define AI_LAYER_OBJ_DECLARE(varname_, id_, type_, struct_, forward_func_, \ network_, next_, attr_, ...) \ AI_ALIGNED(4) \ attr_ AI_CONCAT(ai_layer_, struct_) varname_ = \ AI_LAYER_OBJ_INIT(type_, id_, network_, \ next_, forward_func_, \ ## __VA_ARGS__); #else #define AI_LAYER_OBJ_INIT(type_, id_, flags_, klass_, network_, \ next_, forward_, tensors_, ...) \ { \ AI_NODE_COMMON_INIT(AI_CONCAT(AI_LAYER_, type_), id_, flags_, \ klass_, network_, next_, forward_), \ .tensors = (tensors_), \ ## __VA_ARGS__ \ } #define AI_LAYER_OBJ_DECLARE( \ varname_, id_, \ type_, flags_, klass_obj_, \ struct_, forward_func_, \ tensors_chain_, \ network_, next_, attr_, ...) \ AI_ALIGNED(4) \ attr_ AI_CONCAT(ai_layer_, struct_) varname_ = \ AI_LAYER_OBJ_INIT(type_, id_, flags_, klass_obj_, network_, \ next_, forward_func_, tensors_chain_, ## __VA_ARGS__); #endif /* AI_TOOLS_API_VERSION_1_3 */ #ifdef HAS_AI_ASSERT #define AI_LAYER_IO_GET(layer_, in_, out_) \ ASSERT_LAYER_SANITY(layer_) \ const ai_tensor* in_ = GET_TENSOR_IN((layer_)->tensors, 0); \ ai_tensor* out_ = GET_TENSOR_OUT((layer_)->tensors, 0); \ ASSERT_TENSOR_DATA_SANITY(in_) \ ASSERT_TENSOR_DATA_SANITY(out_) #define AI_LAYER_TENSOR_LIST_IO_GET(layer_, tlist_in_, tlist_out_) \ ASSERT_LAYER_SANITY(layer_) \ const ai_tensor_list* tlist_in_ = GET_TENSOR_LIST_IN((layer_)->tensors); \ ai_tensor_list* tlist_out_ = GET_TENSOR_LIST_OUT((layer_)->tensors); \ ASSERT_TENSOR_LIST_SANITY(tlist_in_) \ ASSERT_TENSOR_LIST_SANITY(tlist_out_) #define AI_LAYER_WEIGHTS_GET(layer_, weights_, bias_) \ const ai_tensor* weights_ = GET_TENSOR_WEIGHTS((layer_)->tensors, 0); \ const ai_tensor* bias_ = (GET_TENSOR_LIST_SIZE(GET_TENSOR_LIST_WEIGTHS((layer_)->tensors))>1) \ ? GET_TENSOR_WEIGHTS((layer_)->tensors, 1) \ : NULL; \ ASSERT_TENSOR_DATA_SANITY(weights_) \ if (bias_) { ASSERT_TENSOR_DATA_SANITY(bias_) } #else #define AI_LAYER_IO_GET(layer_, in_, out_) \ const ai_tensor* in_ = GET_TENSOR_IN((layer_)->tensors, 0); \ ai_tensor* out_ = GET_TENSOR_OUT((layer_)->tensors, 0); #define AI_LAYER_TENSOR_LIST_IO_GET(layer_, tlist_in_, tlist_out_) \ const ai_tensor_list* tlist_in_ = GET_TENSOR_LIST_IN((layer_)->tensors); \ ai_tensor_list* tlist_out_ = GET_TENSOR_LIST_OUT((layer_)->tensors); #define AI_LAYER_WEIGHTS_GET(layer_, weights_, bias_) \ const ai_tensor* weights_ = GET_TENSOR_WEIGHTS((layer_)->tensors, 0); \ const ai_tensor* bias_ = (GET_TENSOR_LIST_SIZE(GET_TENSOR_LIST_WEIGTHS((layer_)->tensors))>1) \ ? GET_TENSOR_WEIGHTS((layer_)->tensors, 1) \ : NULL; \ #endif /*HAS_AI_ASSERT*/ AI_API_DECLARE_BEGIN /*! * @defgroup layers_common Layers Common * @brief Implementation of the common layers datastructures * This header enumerates the layers specific definition implemented in the * library toghether with the macros and datatypes used to manipulate them. */ /*! * @typedef (*func_copy_tensor) * @ingroup layers_common * @brief Fuction pointer for generic tensor copy routines * this function pointer abstracts a generic tensor copy routine. */ typedef ai_bool (*func_copy_tensor)(ai_tensor* dst, const ai_tensor* src); /*! * @enum ai_layer_type * @ingroup layers_common * @brief ai_tools supported layers type id */ typedef enum { #define LAYER_ENTRY(type_, id_, struct_, forward_func_, init_func_, destroy_func_) \ AI_LAYER_TYPE_ENTRY(type_) = id_, #include "layers_list.h" } ai_layer_type; #define AI_LAYER_COMMON_FIELDS_DECLARE \ AI_NODE_COMMON_FIELDS_DECLARE #define AI_LAYER_STATEFUL_FIELDS_DECLARE \ AI_NODE_STATEFUL_FIELDS_DECLARE /*! * @typedef void (*layer_func)(struct ai_layer_* layer) * @ingroup layers_common * @brief Callback signatures for all layers forward functions */ typedef node_func layer_func; /*! * @struct ai_layer_base * @ingroup layers_common * @brief Structure encoding a base layer in the network * */ typedef ai_node ai_layer_base; /*! * @struct ai_layer_stateful * @ingroup layers_common * @brief Structure encoding a stateful layer in the network * */ typedef ai_node_stateful ai_layer_stateful; /*! * @brief Check the custom network types against the internally compiled ones * Helper function to check if the private APIs where compiled with a different * `datatypes_network.h` than the one provided to the caller. * @ingroup layers_common * @param signatures list of type sizes signatures (first element is the number of types) * @return false if there is a type size mismatch */ AI_INTERNAL_API ai_bool ai_check_custom_types(const ai_custom_type_signature* signatures); /*! * @brief Helper API to retrieve a human readable layer type from enum * @ingroup layers_common * @param type in type of layer * @return string defining the type of the layer */ AI_INTERNAL_API const char* ai_layer_type_name(const ai_layer_type type); /*! * @brief Helper API to check if a node is a valid layer type * @ingroup layers_common * @param type in type of layer * @return true if the layer is one of the ones listed in the enum, * false otherwise */ AI_INTERNAL_API ai_bool ai_layer_type_is_valid(const ai_layer_type type); #ifdef HAS_AI_ASSERT /*! * @brief chack scratch size computed with actual scratch buffer size * @ingroup layers * @param layer_type the layer type * @param fmt buffers format * @param filt_width filter width (when relevant) * @param filt_height filter height (when relevant) * @param n_channel_in the number of channels in * @param n_channel_out the number of channels out * @param is_pointwise is pointwise convulation (conv2d) * @param is_rgb is rgb convolution (conv2d) * @param is depthwise is depthwise convolution (conv2d) * @param is_ch_wise has weights per channel * @param is_sssa is signed * @param p_tensor_scratch the scratch tensor * @param p_function_name the name of the function * @param line_nb the the line of the function */ AI_INTERNAL_API ai_size ai_layer_get_scratch_size( ai_layer_type layer_type, ai_array_format fmt, ai_size filt_width, ai_size filt_height, ai_u16 n_channel_in, ai_u16 n_channel_out, ai_bool is_pointwise, ai_bool is_rgb, ai_bool is_depthwise, ai_bool is_ch1st, ai_bool is_ch_wise, ai_bool is_sss); /*! * @brief chack scratch size computed with actual scratch buffer size * @ingroup layers * @param layer_type the layer type * @param fmt buffers format * @param filt_width filter width (when relevant) * @param filt_height filter height (when relevant) * @param n_channel_in the number of channels in * @param n_channel_out the number of channels out * @param is_pointwise is pointwise convulation (conv2d) * @param is_rgb is rgb convolution (conv2d) * @param is depthwise is depthwise convolution (conv2d) * @param is_ch_wise has weights per channel * @param is_sssa is signed * @param p_tensor_scratch the scratch tensor * @param p_function_name the name of the function * @param line_nb the the line of the function */ AI_INTERNAL_API void ai_layer_check_scratch_size( ai_layer_type layer_type, ai_array_format fmt, ai_size filt_width, ai_size filt_height, ai_u16 n_channel_in, ai_u16 n_channel_out, ai_bool is_pointwise, ai_bool is_rgb, ai_bool is_depthwise, ai_bool is_ch1st, ai_bool is_ch_wise, ai_bool is_sssa, ai_tensor *p_tensor_scratch, const char *p_function_name, int line_nb); #define CHECK_SCRATCH_BUFFER_SIZE( layer_type, fmt, \ filt_width, filt_height, \ n_channel_in, n_channel_out, \ is_pointwise, is_rgb, \ is_depthwise, is_ch1st, is_ch_wise, \ is_sssa_ch, p_tensor_scratch) \ ai_layer_check_scratch_size(layer_type, fmt, \ filt_width, filt_height, \ n_channel_in, n_channel_out, \ is_pointwise, is_rgb, \ is_depthwise, is_ch1st, is_ch_wise, \ is_sssa_ch, p_tensor_scratch,\ __FUNCTION__, __LINE__); #endif AI_API_DECLARE_END #endif /*LAYERS_COMMON_H*/
10,739
C
34.681063
99
0.607133
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/formats_list.h
/** ****************************************************************************** * @file format_list.h * @author AST Embedded Analytics Research Platform * @brief Definitions of AI platform public APIs types ****************************************************************************** * @attention * * Copyright (c) 2019 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ /* FMT_ENTRY( exp_(0/1 only), name_, type_id_, * sign_bit_, float_bit_, pmask_, bits_, fbits_, ldiv_bits_) * Specifications (in order of the bit fields, little endian): - name_ : it is the enum used to define both the ai_array_format and ai_buffer_format. - exp_ (1bit) : it is a boolean flag (0 or 1) indicating whether the format is available as a public APIs ai_buffer format. in this case the field exp_name_ indicates the enum name of the ai_buffer format - (7 bits): reserved for flags - sign_bit_ (1bit) : codes whether or not the format is of a signed type - float_bit_ (1bit) : codes if the format is float - ldiv_bits (2 bits) : right shift value for computing the byte size of the format - type_id_ (4bits) : it is used to define the "family" of the format: see @ref AI_FMT_Q as an example. Currently supported types are: AI_FMT_Q (fixed point types), AI_FMT_FLOAT (floating point values), AI_FMT_LUT4 or AI_FMT_LUT8 (compressed formats) - pmask_ (3bits) : padding mask bits for the format - bits_ (7bits) : size in bits of the format (NB: integer+fractional bits) - fbits_ (7bits) : number of fractional bits for the format (for AI_FMT_Q only) */ /* Format none entry */ FMT_ENTRY(1, NONE, AI_FMT_NONE, 0, 0, 0x0, 0, 0, 0) /* Floating point formats */ FMT_ENTRY(1, FLOAT, AI_FMT_FLOAT, 1, 1, 0x0, 32, 0, 0) FMT_ENTRY(0, FLOAT64, AI_FMT_FLOAT, 1, 1, 0x0, 64, 0, 0) FMT_ENTRY(0, FLOAT16, AI_FMT_FLOAT, 1, 1, 0x0, 16, 0, 0) /* Integer formats (i.e. fractional bits = 0!) */ FMT_ENTRY(1, U8, AI_FMT_Q, 0, 0, 0x0, 8, 0, 0) FMT_ENTRY(1, U16, AI_FMT_Q, 0, 0, 0x0, 16, 0, 0) FMT_ENTRY(1, U32, AI_FMT_Q, 0, 0, 0x0, 32, 0, 0) FMT_ENTRY(0, U64, AI_FMT_Q, 0, 0, 0x0, 64, 0, 0) FMT_ENTRY(1, U1, AI_FMT_Q, 0, 0, 0x0, 1, 0, 0) FMT_ENTRY(0, U4, AI_FMT_Q, 0, 0, 0x0, 4, 0, 0) FMT_ENTRY(1, S8, AI_FMT_Q, 1, 0, 0x0, 8, 0, 0) FMT_ENTRY(1, S16, AI_FMT_Q, 1, 0, 0x0, 16, 0, 0) FMT_ENTRY(1, S32, AI_FMT_Q, 1, 0, 0x0, 32, 0, 0) FMT_ENTRY(0, S64, AI_FMT_Q, 1, 0, 0x0, 64, 0, 0) FMT_ENTRY(1, S1, AI_FMT_Q, 1, 0, 0x0, 1, 0, 0) FMT_ENTRY(0, S4, AI_FMT_Q, 1, 0, 0x0, 4, 0, 0) /* Fixed-point formats including ARM CMSIS Q7, Q15, Q31 ones */ FMT_ENTRY(1, Q, AI_FMT_Q, 1, 0, 0x0, 0, 0, 0) FMT_ENTRY(1, Q7, AI_FMT_Q, 1, 0, 0x0, 8, 7, 0) FMT_ENTRY(1, Q15, AI_FMT_Q, 1, 0, 0x0, 16, 15, 0) FMT_ENTRY(0, Q31, AI_FMT_Q, 1, 0, 0x0, 32, 31, 0) FMT_ENTRY(1, UQ, AI_FMT_Q, 0, 0, 0x0, 0, 0, 0) FMT_ENTRY(1, UQ7, AI_FMT_Q, 0, 0, 0x0, 8, 7, 0) FMT_ENTRY(1, UQ15, AI_FMT_Q, 0, 0, 0x0, 16, 15, 0) FMT_ENTRY(0, UQ31, AI_FMT_Q, 0, 0, 0x0, 32, 31, 0) /* Compressed formats */ FMT_ENTRY(0, LUT4_FLOAT, AI_FMT_LUT4, 1, 1, 0x0, 32, 0, 3) FMT_ENTRY(0, LUT8_FLOAT, AI_FMT_LUT8, 1, 1, 0x0, 32, 0, 2) FMT_ENTRY(0, LUT4_Q15, AI_FMT_LUT4, 1, 0, 0x0, 16, 15, 2) FMT_ENTRY(0, LUT8_Q15, AI_FMT_LUT8, 1, 0, 0x0, 16, 15, 1) FMT_ENTRY(0, LUT4_UQ15, AI_FMT_LUT4, 0, 0, 0x0, 16, 15, 2) FMT_ENTRY(0, LUT8_UQ15, AI_FMT_LUT8, 0, 0, 0x0, 16, 15, 1) /* Boolean format */ FMT_ENTRY(1, BOOL, AI_FMT_BOOL, 0, 0, 0x0, 8, 0, 0) #undef FMT_ENTRY
3,930
C
41.72826
80
0.564885
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/core_convert.h
/** ****************************************************************************** * @file core_convert.h * @author AST Embedded Analytics Research Platform * @brief header file of core utils routines ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef CORE_CONVERT_H #define CORE_CONVERT_H #pragma once #include "ai_platform.h" #include "ai_platform_interface.h" #include "core_common.h" AI_API_DECLARE_BEGIN /*! * @defgroup core_convert Core Convert Routines * @brief Implementation of core node format convertion routines * (Q7 to float, ... etc.) */ /*! * @brief Convert tensors from float to quantized or viceversa * @ingroup core_convert * @param[in] pNode in a handler to node (layer or operator) */ AI_INTERNAL_API void node_convert(ai_node *pNode); /*! * @brief Convert integer tensors between QM.N formats (8/16 bits) * @ingroup core_convert * @param[in] pNode in a handler to node (layer or operator) */ AI_INTERNAL_API void node_convert_fixed(ai_node *pNode); /*! * @brief Convert integer tensors between signed and usigned (int8/uint8) formats * @ingroup core_convert * @param[in] pNode in a handler to node (layer or operator) */ AI_INTERNAL_API void node_convert_integer(ai_node *pNode); /*! * @brief Convert float tensor to binary * @ingroup core_convert * @param[in] pNode in a handler to node (layer or operator) */ AI_INTERNAL_API void node_convert_if32os1(ai_node *pNode); /*! * @brief Convert binary tensor to float * @ingroup core_convert * @param[in] pNode in a handler to node (layer or operator) */ AI_INTERNAL_API void node_convert_is8os1(ai_node *pNode); /*! * @brief Convert binary tensor to signed int 8 bit * @ingroup core_convert * @param[in] pNode in a handler to node (layer or operator) */ AI_INTERNAL_API void node_convert_is1os8(ai_node *pNode); /*! * @brief Convert binary tensor to signed int 16 bit * @ingroup core_convert * @param[in] pNode in a handler to node (layer or operator) */ AI_INTERNAL_API void node_convert_is1os16(ai_node *pNode); /*! * @brief Convert binary tensor to float * @ingroup core_convert * @param[in] pNode in a handler to node (layer or operator) */ AI_INTERNAL_API void node_convert_is1of32(ai_node *pNode); /*! * @brief Convert signed int 16 bit tensor to float * @ingroup core_convert * @param[in] pNode in a handler to node (layer or operator) */ AI_INTERNAL_API void node_convert_is16of32(ai_node *pNode); /*! * @brief Convert unsigned int 16 bit tensor to float * @ingroup core_convert * @param[in] pNode in a handler to node (layer or operator) */ AI_INTERNAL_API void node_convert_iu16of32(ai_node *pNode); /*! * @brief Convert float tensor to signed int 16 bit * @ingroup core_convert * @param[in] pNode in a handler to node (layer or operator) */ AI_INTERNAL_API void node_convert_if32os16(ai_node *pNode); /*! * @brief Convert float tensor to unsigned int 16 bit * @ingroup core_convert * @param[in] pNode in a handler to node (layer or operator) */ AI_INTERNAL_API void node_convert_if32ou16(ai_node *pNode); /*! * @brief Convert signed int 16 bit tensor to unsigned int 16 bit * @ingroup core_convert * @param[in] pNode in a handler to node (layer or operator) */ AI_INTERNAL_API void node_convert_is16ou16(ai_node *pNode); /*! * @brief Convert a shape struct into a stride struct * @ingroup core_convert * @param[in] in a pointer to a shape to convert * @return a condverted stride datastruct */ AI_INTERNAL_API void core_shape_to_stride(ai_stride* out, const ai_shape* in); #endif /*CORE_CONVERT_H*/
4,123
C
23.993939
81
0.65171
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_conv2d.h
/** ****************************************************************************** * @file layers_conv2d.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform conv2d layers datatypes ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_CONV2D_H #define LAYERS_CONV2D_H #pragma once #include "layers_nl.h" #include "layers_pool.h" #define AI_LAYER_CONV2D_FIELDS_DECLARE \ AI_LAYER_COMMON_FIELDS_DECLARE \ ai_u32 groups; /*!< groups for separable convolution */ \ AI_CONST ai_array* nl_params; /*!< array pointer to non linear parameters */ \ func_nl nl_func; /*!< function pointer to non linear transform */ \ ai_shape_2d filter_stride; /*!< filter stride, how much the filter moves */ \ ai_shape_2d dilation; /*!< dilation value along axis of the filter */ \ ai_shape filter_pad; /*!< filter pad 4d */ \ ai_layer_format_type in_ch_format; /*!< Input format (Channel 1st vs Channel last */ \ ai_layer_format_type out_ch_format; /*!< Output format (Channel 1st vs Channel last */ /*! * @defgroup layers_conv2d Convolutive Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_dense * @ingroup layers_conv2d * @brief Dense (fully connected) layer */ typedef ai_layer_base ai_layer_dense; /*! * @struct ai_layer_gemm * @ingroup layers_conv2d * @brief layer for General Matrix Multiplication * * Layer for General Matrix Multiplication (GEMM): * \f{equation}{ Y = \alpha A \cdot B + \beta C \f} * \f$\alpha\f$ and \f$\beta\f$ are paramaters, A and B are matrices, * C is a matrix or an array. Size checks for A, B, C, and Y are performed and * broadcast is applied on C if necessary. * This is a sequential layer (see @ref ai_layer). */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_gemm_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_float alpha; /*!< alpha coefficient */ ai_float beta; /*!< beta coefficient */ ai_u8 tA; /*!< transpose A flag */ ai_u8 tB; /*!< transpose B flag */ } ai_layer_gemm; /*! * @struct ai_layer_conv2d * @ingroup layers_conv2d * @brief 2D convolutional layer with strides and pads */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_conv2d_ { AI_LAYER_CONV2D_FIELDS_DECLARE } ai_layer_conv2d; /*! * @struct ai_layer_conv2d_nl_pool * @ingroup layers_conv2d * @brief 2D convolutional layer + nl + pooling with strides and pads */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_conv2d_nl_pool_ { AI_LAYER_CONV2D_FIELDS_DECLARE ai_shape_2d pool_size; /*!< pooling size */ ai_shape_2d pool_stride; /*!< pooling stride */ ai_shape pool_pad; /*!< pooling pad */ ai_handle pool_func; /*!< function pointer to pooling transform */ } ai_layer_conv2d_nl_pool; AI_INTERNAL_API void ai_dict8_dot_array_f32(ai_handle out, ai_ptr_const data0, ai_ptr_const lut, const ai_float* data1, const ai_size data_size); AI_INTERNAL_API void ai_dict4_dot_array_f32(ai_handle out, ai_ptr_const data0, ai_ptr_const lut, const ai_float* data1, const ai_size data_size); /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Computes the activations of a floating point 32 2D convolutional layer. * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_if32of32wf32(ai_layer* layer); /*! * @brief Computes the activations of a floating point 32 2D dw layer. * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_dw_if32of32wf32(ai_layer* layer); /*! * @brief Computes the activations of a floating point 32 2D convolutional group layer. * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_if32of32wf32_group(ai_layer* layer); /*! * @brief Computes the activations of a 2D floating point 32 pool fused convolutional layer. * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_if32of32wf32_pool(ai_layer* layer); /*! * @brief Computes the activations of a 2D floating point 32 pool fused dw layer. * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_dw_if32of32wf32_pool(ai_layer* layer); /*! * @brief Computes the activations of a 2D floating point 32 pool fused convolutional group layer. * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_if32of32wf32_group_pool(ai_layer* layer); /*! * @brief Computes the activations of a GEMM layer. * @ingroup layers * @param layer the layer including output and input tensors */ AI_INTERNAL_API void forward_gemm(ai_layer* layer); /*! * @brief Computes matmul layer, intended as numpy.matmul(A,B). * @ingroup layers * @param layer the layer including output and input tensors */ AI_INTERNAL_API void forward_matmul(ai_layer* layer); /*! * @brief Computes the activations of a dense (fully connected) layer. * @ingroup layers_conv2d * @param layer the dense layer */ AI_INTERNAL_API void forward_dense(ai_layer* layer); /*! * @brief Computes the activations of a fixed point 2D convolutional layer. * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_fixed(ai_layer *pLayer); /*! * @brief Computes the activations of a fixed point @ref ai_layer_conv2d_nl_pool * layer. * The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear * layer + optional pooling / nonlinearity (average, max) * @ingroup layers_conv2d * @param layer see @ai_layer_conv2d_nl_pool */ AI_INTERNAL_API void forward_conv2d_nl_pool_fixed(ai_layer *pLayer); /*! * @brief Computes the activations of a integer quantized 2D convolutional layer. * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_integer(ai_layer *pLayer); /*! * @brief Computes the activations of a integer quantized 2D convolutional layer * for SSSA per layer quantized scheme * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_integer_SSSA(ai_layer *pLayer); /*! * @brief Computes the activations of a integer quantized 2D convolutional layer * for SSSA per channel quantized scheme * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_is8os8ws8_sssa_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized DW layer * for SSSA per channel quantized scheme * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_dw_sssa8_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized DW layer * for SSSA per channel quantized scheme, with 3x3 kernels * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_dw_3x3_sssa8_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized DW layer * for SSSA per channel quantized scheme, with 3x3 kernels and input are * channel first * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_dw_3x3_ch1st_sssa8_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized DW layer * for SSSA per channel quantized scheme with depth multiplier > 1 * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_dw_dm_sssa8_ch(ai_layer *pLayer); /*! * @brief Computes the activations of int8 quantized DW layers. * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_dw_all_sssa8_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized PW layer * for SSSA per channel quantized scheme * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_pw_sssa8_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized dilated Conv2d layer * for SSSA per channel quantized scheme (valid padding) * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_dilated_sssa8_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 non dilated Conv2d layer * for SSSA per channel quantized scheme (valid padding) * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_deep_sssa8_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 non dilated Conv2d layer * for SSSA per channel quantized scheme (valid padding) * number of output channel is greater than 8 * Kernels shall be 3x3 and stride is (1,1) * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_deep_3x3_sssa8_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 non dilated Conv2d layer * for SSSA per channel quantized scheme (valid or same padding) * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_sssa8_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized Conv2d layer * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_all_sssa8_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized RGB Conv2d layer * for SSSA per channel quantized scheme * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_rgb_sssa8_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized DW layer * for SSSA per channel quantized scheme with pooling fused * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_dw_sssa8_ch_nl_pool(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized DW layer * for SSSA per channel quantized scheme, with 3x3 kernels, * with pooling fused * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_dw_3x3_sssa8_ch_nl_pool(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized DW layer * for SSSA per channel quantized scheme, with 3x3 kernels, * with pooling fused * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_dw_3x3_ch1st_sssa8_ch_nl_pool(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized DW layer * for SSSA per channel quantized scheme with depth multiplier > 1 * with pooling fused * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_dw_dm_sssa8_ch_nl_pool(ai_layer *pLayer); /*! * @brief Computes the activations of int8 quantized DW layers, with pooling fused * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_dw_all_sssa8_ch_nl_pool(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized PW layer, * with pooling fused * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_pw_sssa8_ch_nl_pool(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized dilated Conv2d layer * for SSSA per channel quantized scheme (valid padding) and pooling fused * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_dilated_sssa8_ch_nl_pool(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized non dilated Conv2d layer * for SSSA per channel quantized scheme (valid padding) and pooling fused * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_deep_sssa8_ch_nl_pool(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 non dilated Conv2d layer * for SSSA per channel quantized scheme (valid padding) and pooling fused * number of output channel is greater than 8 * Kernels shall be 3x3 and stride is (1,1) * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_deep_3x3_sssa8_ch_nl_pool(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized non dilated Conv2d layer * for SSSA per channel quantized scheme (valid or same padding) and pooling fused * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_sssa8_ch_nl_pool(ai_layer *pLayer); /*! * @brief Computes the activations of a int8 quantized Conv2d layer and pooling fused * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_all_sssa8_ch_nl_pool(ai_layer *pLayer); /*! * @brief Computes the activations of a integer quantized 2D convolutional layer * for SSUA per layer quantized scheme * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_integer_SSUA(ai_layer *pLayer); /*! * @brief Computes the activations of a integer quantized 2D convolutional layer * for SSUA per channel quantized scheme * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_integer_SSUA_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a integer quantized 2D convolutional layer * for UAUA per layer quantized scheme * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_integer_UAUA(ai_layer *pLayer); /*! * @brief Computes the activations of a integer quantized 2D convolutional layer * for UAUA per channel quantized scheme * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_integer_UAUA_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer. * The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear * layer + optional pooling / nonlinearity (average, max) * @ingroup layers_conv2d * @param layer see @ai_layer_conv2d_nl_pool */ AI_INTERNAL_API void forward_conv2d_nl_pool_integer(ai_layer *pLayer); /*! * @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer * for SSSA per layer quantized scheme * The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear * layer + optional pooling / nonlinearity (average, max) * @ingroup layers_conv2d * @param layer see @ai_layer_conv2d_nl_pool */ AI_INTERNAL_API void forward_conv2d_nl_pool_integer_SSSA(ai_layer *pLayer); /*! * @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer * for SSSA per channel quantized scheme * The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear * layer + optional pooling / nonlinearity (average, max) * @ingroup layers_conv2d * @param layer see @ai_layer_conv2d_nl_pool */ AI_INTERNAL_API void forward_conv2d_nl_pool_integer_SSSA_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer * for SSUA per layer quantized scheme * The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear * layer + optional pooling / nonlinearity (average, max) * @ingroup layers_conv2d * @param layer see @ai_layer_conv2d_nl_pool */ AI_INTERNAL_API void forward_conv2d_nl_pool_integer_SSUA(ai_layer *pLayer); /*! * @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer * for SSUA per channel quantized scheme * The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear * layer + optional pooling / nonlinearity (average, max) * @ingroup layers_conv2d * @param layer see @ai_layer_conv2d_nl_pool */ AI_INTERNAL_API void forward_conv2d_nl_pool_integer_SSUA_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer * for UAUA per layer quantized scheme * The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear * layer + optional pooling / nonlinearity (average, max) * @ingroup layers_conv2d * @param layer see @ai_layer_conv2d_nl_pool */ AI_INTERNAL_API void forward_conv2d_nl_pool_integer_UAUA(ai_layer *pLayer); /*! * @brief Computes the activations of a integer @ref ai_layer_conv2d_nl_pool layer * for UAUA per channel quantized scheme * The @ref ai_layer_conv2d_nl_pool is a fused conv2D + optional nonlinear * layer + optional pooling / nonlinearity (average, max) * @ingroup layers_conv2d * @param layer see @ai_layer_conv2d_nl_pool */ AI_INTERNAL_API void forward_conv2d_nl_pool_integer_UAUA_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a integer dense (fully connected) layer. * @ingroup layers_dense * @param layer the dense layer */ AI_INTERNAL_API void forward_dense_integer(ai_layer *pLayer); /*! * @brief Computes the activations of a integer dense (fully connected) layer * for SSSA per layer quantized scheme * @ingroup layers_dense * @param layer the dense layer */ AI_INTERNAL_API void forward_dense_integer_SSSA(ai_layer *pLayer); /*! * @brief Computes the activations of a integer dense (fully connected) layer * for SSSA per channel quantized scheme * @ingroup layers_dense * @param layer the dense layer */ AI_INTERNAL_API void forward_dense_integer_SSSA_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a integer dense (fully connected) layer * for SSUA per layer quantized scheme * @ingroup layers_dense * @param layer the dense layer */ AI_INTERNAL_API void forward_dense_integer_SSUA(ai_layer *pLayer); /*! * @brief Computes the activations of a integer dense (fully connected) layer * for SSUA per channel quantized scheme * @ingroup layers_dense * @param layer the dense layer */ AI_INTERNAL_API void forward_dense_integer_SSUA_ch(ai_layer *pLayer); /*! * @brief Computes the activations of a integer dense (fully connected) layer * for UAUA per layer quantized scheme * @ingroup layers_dense * @param layer the dense layer */ AI_INTERNAL_API void forward_dense_integer_UAUA(ai_layer *pLayer); /*! * @brief Computes the activations of a integer dense (fully connected) layer * for UAUA per channel quantized scheme * @ingroup layers_dense * @param layer the dense layer */ AI_INTERNAL_API void forward_dense_integer_UAUA_ch(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_CONV2D_H*/
19,921
C
30.8752
98
0.69359
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/ai_lite_math_helpers.h
#ifndef AI_LITE_MATH_HELPERS_H #define AI_LITE_MATH_HELPERS_H #include <math.h> #include "ai_platform.h" #include "ai_platform_interface.h" #include "ai_datatypes_defines.h" #define AI_FLOAT_TOLERANCE (6.19209290e-5F) /* Used for small calculation noise issues */ #define AI_FLOAT_EPSILON (1.19209290e-7F) #define AI_I8_EPSILON (0.00787401F) /* 1/(2^7 - 1) */ #define AI_I16_EPSILON (3.051851e-5F) /* 1/(2^15 - 1) */ #define AI_FLT_MAX (3.40282346638528859812e+38f) #define AI_MIN(x,y) ( ((x)<(y)) ? (x) : (y) ) #define AI_MAX(x,y) ( ((x)>(y)) ? (x) : (y) ) #define AI_SIGN(x) (((x)>0) ? 1 : -1) #define AI_CLAMP(x, min, max) AI_MIN(AI_MAX(x,min), max) #define AI_ABS(x) fabsf(x) #define AI_ABS_DIFF(x, y) ( ((x)>(y)) ? ((x)-(y)) : ((y)-(x)) ) #define AI_NEG(x) ( -1 * (x) ) #define AI_NOT(x) ( ((x)==true) ? false : true) #define AI_RECIPROCAL(x) ( 1.0f / (x) ) #define AI_CEIL(x) ceilf(x) #define AI_FLOOR(x) floorf(x) #define AI_FLOOR_DIV(x, y) AI_FLOOR((x)/(y)) /* floor division: x // y */ #define AI_FLOOR_MOD(x, y) fmodf(x, y) #define AI_ROUND(x) roundf(x) #define AI_POW(x,y) powf(x, y) #define AI_SQUARED_DIFF(x, y) (((x)-(y)) * ((x)-(y))) #define AI_FLOAT_NEGATIVE_HALF (-0.5f + AI_FLOAT_EPSILON) #define AI_FLOAT_POSITIVE_HALF (0.5f) #define AI_MATH_ACOS(x) acosf(x) #define AI_MATH_ACOSH(x) acoshf(x) #define AI_MATH_ASIN(x) asinf(x) #define AI_MATH_ASINH(x) asinhf(x) #define AI_MATH_ATAN(x) atanf(x) #define AI_MATH_ATANH(x) atanhf(x) #define AI_MATH_COS(x) cosf(x) #define AI_MATH_COSH(x) coshf(x) #define AI_MATH_ERF(x) erff(x) #define AI_MATH_EXP(x) expf(x) #define AI_MATH_LOG(x) logf(x) #define AI_MATH_POW(x, e) powf((x), (e)) #define AI_MATH_RSQRT(x) (1.0f / AI_MATH_SQRT(x)) #define AI_MATH_SIN(x) sinf(x) #define AI_MATH_SINH(x) sinhf(x) #define AI_MATH_SQRT(x) ai_math_sqrt(x) #define AI_MATH_TAN(x) tanf(x) #define AI_MATH_TANH(x) tanhf(x) #define AI_MATH_SQUARE(x) AI_MATH_POW(x, 2.0f) #define AI_MATH_ACOS(x) acosf(x) #define AI_MATH_ACOSH(x) acoshf(x) #define AI_MATH_ASIN(x) asinf(x) #define AI_MATH_ASINH(x) asinhf(x) #define AI_MATH_ATAN(x) atanf(x) #define AI_MATH_ATANH(x) atanhf(x) #define AI_MATH_COS(x) cosf(x) #define AI_MATH_COSH(x) coshf(x) #define AI_MATH_ERF(x) erff(x) #define AI_MATH_EXP(x) expf(x) #define AI_MATH_LOG(x) logf(x) #define AI_MATH_POW(x, e) powf((x), (e)) #define AI_MATH_RSQRT(x) (1.0f / AI_MATH_SQRT(x)) #define AI_MATH_SIN(x) sinf(x) #define AI_MATH_SINH(x) sinhf(x) #define AI_MATH_SQRT(x) ai_math_sqrt(x) #define AI_MATH_TAN(x) tanf(x) #define AI_MATH_TANH(x) tanhf(x) #define AI_MATH_SQUARE(x) AI_MATH_POW(x, 2.0f) #define AI_MATH_RELU_TEST(x, thr, min, max) \ (((x)<=(thr)) ? (min) : (max)) #define AI_MATH_CLIP_LINEAR_REMAP(x, alpha, beta) \ (AI_MAX(0, AI_MIN(1, ((x) * (alpha) + (beta))))) #define AI_MATH_RELU_GENERIC(x, thr, alpha, max) \ AI_MATH_RELU_TEST(x, max, AI_MATH_RELU_GENERIC_NO_MAX(x, thr, alpha), max) #define AI_MATH_RELU_GENERIC_NO_MAX(x, thr, alpha) \ AI_MATH_RELU_TEST(x, thr, ((alpha)*((x)-(thr))), x) #define AI_MATH_RELU_THRESHOLDED(x, thr) \ AI_MATH_RELU_TEST(x, thr, 0, (x)) #define AI_MATH_LEAKY_RELU(x, neg_slope, pos_slope) \ AI_MATH_RELU_TEST(x, 0, (x)*(neg_slope), (x)*(pos_slope)) // ( ((x)>0) ? (x)*(pos_slope) : (x)*(neg_slope) ) #define AI_MATH_PRELU(x, slope) \ AI_MATH_RELU_TEST(x, 0, (x)*(slope), (x)) // AI_MATH_LEAKY_RELU(x, slope, 1) #define AI_MATH_RELU(x) \ AI_MATH_RELU_TEST(x, 0, 0, x) // AI_MAX(x, 0) #define AI_MATH_ELU(x, alpha) \ (AI_MAX(0.0f, (x)) + AI_MIN(0.0f, (alpha) * (AI_MATH_EXP(x)-1.0f))) #define AI_MATH_SELU(x, alpha, scale) \ ((scale)*AI_MATH_ELU(x, alpha)) #define AI_MATH_SCALED_TANH(x, alpha, beta) \ ((alpha)*AI_MATH_TANH((beta)*(x))) #define AI_MATH_SIGMOID(x) \ (1.0f / (1.0f + AI_MATH_EXP(-(x)))) #define AI_MATH_LOGISTIC(x)\ (x < 0) ? (1.0f -(1.0f / (1.0f + AI_MATH_EXP(-AI_ABS(x))))) :\ (1.0f / (1.0f + AI_MATH_EXP(-AI_ABS(x)))) #define AI_MATH_HARD_SIGMOID(x, alpha, beta) \ AI_MATH_CLIP_LINEAR_REMAP(x, alpha, beta) /* Formula with higher accuracy */ #define AI_MATH_SWISH(x) \ ((x) * AI_MATH_SIGMOID(x)) #define AI_MATH_HARD_SWISH(x) \ ((x) * AI_MATH_CLIP_LINEAR_REMAP(x, 1.0f/6, 0.5f)) #define AI_MATH_SOFT_PLUS(x) \ AI_MATH_LOG(1.0f + AI_MATH_EXP(x)) #define AI_MATH_SOFT_SIGN(x) \ ((x) / (1.0f + AI_ABS(x))) AI_API_DECLARE_BEGIN /*! * @brief platform optimized square root on a float value * @ingroup math_helpers * @param x input value * @return square root of the value */ AI_INTERFACE_ENTRY ai_float ai_math_sqrt(const ai_float x); AI_API_DECLARE_END #endif /* AI_LITE_MATH_HELPERS_H */
5,197
C
33.197368
79
0.556667
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_pad_dqnn.h
/** ****************************************************************************** * @file lite_pad_dqnn.h * @author AIS * @brief header file of AI platform lite padding kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_PADDING_DQNN_H #define LITE_PADDING_DQNN_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles padding with binary input and binary output - Lite I/F * @ingroup lite_padding_dqnn */ LITE_API_ENTRY void forward_lite_pad_is1os1(const ai_u32 *pDataIn_init, ai_u32 *pDataOut_init, const ai_i32 width_in, const ai_i32 width_out, const ai_i32 height_in, const ai_i32 height_out, const ai_u32 n_channel_out, const ai_i32 mode, const ai_u16 pads_x, const ai_u16 pads_y, const ai_u16 pads_x_r, const ai_u16 pads_y_b, const ai_u32 pad_value); #endif /*LITE_PADDING_DQNN_H*/
2,016
C
37.788461
80
0.381448
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_conv2d_dqnn.h
/** ****************************************************************************** * @file lite_conv2d_dqnn.h * @author AIS * @brief header file of AI platform lite conv kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_CONV2D_DQNN_H #define LITE_CONV2D_DQNN_H #pragma once #include "ai_lite_interface.h" # define AI_16_OVERFLOW_CHECK(val_) (val_ <= 32767) /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ AI_API_DECLARE_BEGIN /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights - with 0 padding (QKeras like) - Lite I/F * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is1os1ws1_bn_pad0(const ai_u32 *pDataIn_init, ai_u32 *pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_i32 *pThreshold); /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights - with 0 padding (QKeras like) - Lite I/F * - Optimized thanks to Optim0 assumptions * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is1os1ws1_bn_pad0_optim0(const ai_u32 *pDataIn_init, ai_u32 *pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_i32 *pThreshold); /*! * @brief Handles 2D convolution with binary input, 8-bits output and * binary weights - with 0 padding (QKeras like) - Lite I/F * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is1os8ws1_bn_pad0(const ai_u32 *pDataIn_init, ai_i8 *pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_float *pScale, const ai_float *pOffset); /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is1os1ws1_bn_pad1(const ai_u32 *pDataIn_init, ai_u32 *pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_i32 *pThreshold, const ai_i32 pad_value); /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F * - Optimized thanks to Optim2 assumptions * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is1os1ws1_bn_pad1_optim2(const ai_u32 *pDataIn_init, ai_u32 *pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_i32 *pThreshold, const ai_i32 pad_value); /*! * @brief Handles 2D convolution with binary input, 8-bits output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is1os8ws1_bn_pad1(const ai_u32 *pDataIn_init, ai_i8 *pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_float *pScale, const ai_float *pOffset, const ai_i32 pad_value); /*! * @brief Handles 2D convolution with binary input, 8-bits output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F * - Optimized thanks to Optim1 assumptions * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is1os8ws1_bn_pad1_optim1(const ai_u32 *pDataIn_init, ai_i8 *pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_float *pScale, const ai_float *pOffset, const ai_i32 pad_value); /** * @brief Handles 2D convolution with binary input, fixed point 16-bits output and * binary weights - with 0 padding (QKeras like) - Lite I/F * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is1os16ws1_bn_pad0_fxp(const ai_u32 *pDataIn_init, ai_i16 *pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_float *pScale_init, const ai_float *pOffset_init); /*! * @brief Handles 2D convolution with binary input, fixed point 16-bits output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F * * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is1os16ws1_bn_pad1_fxp(const ai_u32 *pDataIn_init, ai_i16 *pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_float *pScale_init, const ai_float *pOffset_init, const ai_i32 pad_value); /*! * @brief Handles 2D convolution with binary input, fixed point 16-bits output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F * - Optimized thanks to Optim1 assumptions * * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is1os16ws1_bn_pad1_optim1_fxp(const ai_u32 *pDataIn_init, ai_i16 *pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_float *pScale_init, const ai_float *pOffset_init, const ai_i32 pad_value); /** * @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and * binary weights - with 0 padding (QKeras like) - Lite I/F * * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is1ou16ws1_bn_pad1_fxp(const ai_u32 *pDataIn_init, ai_u16 *pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_float *pScale_init, const ai_float *pOffset_init, const ai_i32 pad_value); /*! * @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F * * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is1ou16ws1_bn_pad0_fxp(const ai_u32 *pDataIn_init, ai_u16 *pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_float *pScale_init, const ai_float *pOffset_init); /*! * @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F. * - Optimized thanks to Optim1 assumptions * * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is1ou16ws1_bn_pad1_optim1_fxp(const ai_u32 *pDataIn_init, ai_u16 *pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_float *pScale_init, const ai_float *pOffset_init, const ai_i32 pad_value); /*! * @brief Handles 2D convolution with 8-bits quantized Input and weights and * binary output - Lite I/F * @ingroup lite_conv2d_dqnn * @param layer conv2d_dqnn layer */ LITE_API_ENTRY void forward_lite_conv2d_is8os1ws8(const ai_i8 *pDataIn_init, ai_u32 *pDataOut_init, const ai_i8 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_i32 *pThreshold, const ai_i8 in_zeropoint); /*! * @brief Handles 2D convolution with 8-bits quantized Input and weights and * binary output - Lite I/F - Optimized thanks to Optim2 assumptions * @ingroup lite_conv2d_dqnn * @param layer conv2d_dqnn layer */ LITE_API_ENTRY void forward_lite_conv2d_is8os1ws8_optim2(const ai_i8 *pDataIn_init, ai_u32 *pDataOut_init, const ai_i8 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_i32 *pThreshold, const ai_i8 in_zeropoint); /*! * @brief Handles 2D convolution with 8-bits quantized Input and weights and * binary output - quantized with DoReFa SotA quantizer, lite I/F * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_dorefa_is8os1ws8(const ai_i8 *pDataIn_init, ai_u32 *pDataOut_init, const ai_u8 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_i32 *pThreshold, const ai_i8 in_zeropoint); /*! * @brief Handles 2D convolution with 8-bits quantized input, output and weights * - quantized with with different quantization for channel * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is8os8ws8_sssa_ch(const ai_i8 *pData_in, ai_i8 *pData_out, const ai_i8 *pWeights, const ai_i32 *pBias, ai_u16 *pBuffer_a, const ai_size width_in, const ai_size height_in, const ai_size width_out, const ai_size height_out, const ai_u16 n_channel_in, const ai_u16 n_channel_out, const ai_size filt_width, const ai_size filt_height, const ai_u16 filt_pad_x, const ai_u16 filt_pad_y, const ai_u16 filt_stride_x, const ai_u16 filt_stride_y, const ai_u16 dilation_x, const ai_u16 dilation_y, const ai_float in_scale, const ai_float out_scale, const ai_float *pWt_scale, const ai_i8 in_zeropoint, const ai_i8 out_zeropoint, const ai_i32 scratch_size); /*! * @brief Handles 2D convolution with 16-bits quantized inputs, binary outputs and binary weights - Lite I/F. * Vanilla version. * @ingroup lite_conv2d_dqnn * @param layer conv2d_dqnn layer */ LITE_API_ENTRY void forward_lite_conv2d_is16os1ws1_bn_fxp(const ai_i16 *pIn, ai_u32 *pOut_32, const ai_u32 *pWeights, const ai_i32 *pThreshold, ai_i8 *pBufferA, const ai_i32 dim_kernel, const ai_i16 dim_im_in_x, const ai_i16 dim_im_in_y, const ai_i16 dim_im_out_x, const ai_i16 dim_im_out_y, const ai_i16 ch_im_in, const ai_i16 ch_im_out, const ai_i16 dim_kernel_x, const ai_i16 dim_kernel_y, const ai_i16 padding_x, const ai_i16 padding_y, const ai_i16 stride_x, const ai_i16 stride_y, const ai_i16 dilation_x, const ai_i16 dilation_y, const ai_i16 in_zeropoint); /** * @brief Handles 2D convolution with 16-bits quantized inputs, 16-bits quantized outputs and binary weights - Lite I/F * * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_conv2d_is16os16ws1_fxp(const ai_i16 *pIn, ai_i16 *pOut, const ai_u32 *pWeights, ai_i8 *pBufferA, const ai_i16 dim_im_in_x, const ai_i16 dim_im_in_y, const ai_i16 dim_im_out_x, const ai_i16 dim_im_out_y, const ai_i16 ch_im_in, const ai_i16 ch_im_out, const ai_u32 dim_kernel, const ai_i16 dim_kernel_x, const ai_i16 dim_kernel_y, const ai_i16 padding_x, const ai_i16 padding_y, const ai_i16 stride_x, const ai_i16 stride_y, const ai_i16 dilation_x, const ai_i16 dilation_y, const ai_i16 in_zeropoint); AI_API_DECLARE_END #endif /*LITE_CONV2D_DQNN_H*/
30,870
C
55.333942
119
0.351085
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/core_private.h
/** ****************************************************************************** * @file core_private.h * @author AST Embedded Analytics Research Platform * @brief private header file of common private core module defines ****************************************************************************** * @attention * * Copyright (c) 2019 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef CORE_PRIVATE_H #define CORE_PRIVATE_H #pragma once #include "ai_math_helpers.h" #include "ai_datatypes_internal.h" #include "core_log.h" /*! * @defgroup core_private Core Library Private macros and datatypes * @brief Common macros, datatypes and routines for core private rounites * @details This module contains the definitons and implementations of some * internal routines and datatypes that are supposed to not be exposed as * public headers. So usually this file should be include only on .c files or * headers that are private as well */ /*** Foreground Colors ****************************************************/ #define CORE_COLOR_BLACK "\x1b[30m" #define CORE_COLOR_RED "\x1b[31m" #define CORE_COLOR_GREEN "\x1b[32m" #define CORE_COLOR_YELLOW "\x1b[33m" #define CORE_COLOR_BLUE "\x1b[94m" #define CORE_COLOR_MAGENTA "\x1b[35m" #define CORE_COLOR_CYAN "\x1b[36m" #define CORE_COLOR_WHYTE "\x1b[37m" #define CORE_COLOR_DEFAULT "\x1b[39m" #define CORE_COLOR_LGRAY "\x1b[90m" #define CORE_COLOR_LRED "\x1b[91m" #define CORE_COLOR_LGREEN "\x1b[92m" #define CORE_COLOR_LYELLOW "\x1b[93m" #define CORE_COLOR_LBLUE "\x1b[94m" #define CORE_COLOR_LMAGENTA "\x1b[95m" #define CORE_COLOR_LCYAN "\x1b[96m" #define CORE_COLOR_LWHITE "\x1b[97m" /*** Text Attributes Colors *********************************************/ #define CORE_COLOR_OFF "\x1b[0m" #define CORE_COLOR_BOLD "\x1b[1m" #define CORE_COLOR_UNDERLINE "\x1b[4m" #define CORE_COLOR_BLINK "\x1b[5m" #define CORE_COLOR_BOLD_OFF "\x1b[21m" #define CORE_COLOR_UNDERLINE_OFF "\x1b[24m" #define CORE_COLOR_BLINK_OFF "\x1b[25m" /*** Background Colors ****************************************************/ #define CORE_COLOR_BG_BLACK "\x1b[40m" #define CORE_COLOR_BG_RED "\x1b[41m" #define CORE_COLOR_BG_GREEN "\x1b[42m" #define CORE_COLOR_BG_YELLOW "\x1b[43m" #define CORE_COLOR_BG_BLUE "\x1b[44m" #define CORE_COLOR_BG_MAGENTA "\x1b[45m" #define CORE_COLOR_BG_CYAN "\x1b[46m" #define CORE_COLOR_BG_WHITE "\x1b[47m" #define CORE_COLOR_BG_DEFAULT "\x1b[49m" #define CORE_COLOR_BG_LGRAY "\x1b[100m" #define CORE_COLOR_BG_LRED "\x1b[101m" #define CORE_COLOR_BG_LGREEN "\x1b[102m" #define CORE_COLOR_BG_LYELLOW "\x1b[103m" #define CORE_COLOR_BG_LBLUE "\x1b[104m" #define CORE_COLOR_BG_LMAGENTA "\x1b[105m" #define CORE_COLOR_BG_LCYAN "\x1b[106m" #define CORE_COLOR_BG_LWHITE "\x1b[107m" /*****************************************************************************/ #define CORE_ADDRESS_RANGE_INIT(start_, end_) \ core_address_range_init(start_, end_) #define CORE_GET_BUFFER_META_INFO(meta_info_, tensor_ptr_) \ core_get_buffer_meta_info(meta_info_, tensor_ptr_) #define CORE_ADDRESS_RANGE_END(range_) \ ( (ai_ptr)(((range_)->start)+((range_)->size)) ) #define CORE_ADDRESS_RANGE_OVERLAP(overlap_) \ ( ((overlap_)->start) && (((overlap_)->size)>0) ) #define CORE_ADDRESS_RANGE_OVERLAP_PARTIAL(overlap_, ref_) \ ( ((overlap_)->start) && (((overlap_)->size)<((ref_)->size)) ) #define CORE_MEMORY_OVERLAP_INIT(partial_, range_, chain_id_, tensor_id_) { \ .partial = (partial_), .range = AI_PACK(range_), \ .chain_id = (chain_id_), .tensor_id = (tensor_id_) \ } #define CORE_OFFSET(offset_, max_) \ ((ai_i32)(((offset_)<0) ? AI_MAX((max_) - (offset_), 0) : AI_MIN(offset_, max_))) /*****************************************************************************/ /** Network Context Handlers **/ /*****************************************************************************/ /*****************************************************************************/ /** Network Tensors Handlers **/ /*****************************************************************************/ #define AI_TENSOR_HAS_INTQ_INFO \ AI_BUFFER_META_HAS_INTQ_INFO #define CORE_TENSOR_GET_SHAPE_SIZE(tensor_) \ ai_shape_get_size(AI_TENSOR_SHAPE(tensor_)) #define CORE_ASSERT_SHAPE_MATCH(x, y) \ do { \ AI_ASSERT(AI_SHAPE_H(y) == 1 || AI_SHAPE_H(x)==1 || AI_SHAPE_H(y)==AI_SHAPE_H(x)) \ AI_ASSERT(AI_SHAPE_W(y) == 1 || AI_SHAPE_W(x)==1 || AI_SHAPE_W(y)==AI_SHAPE_W(x)) \ AI_ASSERT(AI_SHAPE_D(y) == 1 || AI_SHAPE_D(x)==1 || AI_SHAPE_D(y)==AI_SHAPE_D(x)) \ AI_ASSERT(AI_SHAPE_E(y) == 1 || AI_SHAPE_E(x)==1 || AI_SHAPE_E(y)==AI_SHAPE_E(x)) \ AI_ASSERT(AI_SHAPE_CH(y) == 1 || AI_SHAPE_CH(x)==1|| AI_SHAPE_CH(y)==AI_SHAPE_CH(x)) \ AI_ASSERT(AI_SHAPE_IN_CH(y) == 1 || AI_SHAPE_IN_CH(x)==1|| AI_SHAPE_IN_CH(y)==AI_SHAPE_IN_CH(x)) \ } while(0); #define AI_TENSOR_ARRAY_BYTE_SIZE(t_) \ AI_ARRAY_OBJ_BYTE_SIZE(AI_ARRAY_OBJ(t_->data)) #define AI_TENSOR_ARRAY_GET_DATA_ADDR(t_) \ AI_HANDLE_PTR(AI_ARRAY_OBJ_DATA_START(t_->data, void)) #define AI_TENSOR_ARRAY_UPDATE_DATA_ADDR(t_, addr_) \ { ai_array *arr_ = AI_ARRAY_OBJ(t_->data); \ const uintptr_t off_ = (uintptr_t)arr_->data - (uintptr_t)arr_->data_start; \ arr_->data_start = AI_PTR(addr_); \ arr_->data = AI_PTR((uintptr_t)addr_ + off_); \ } #define AI_TENSOR_INTEGER_GET_SIZE(t_) \ ((t_->klass) ? (AI_KLASS_GET_INTQ_INFO_LIST(t_))->size : 0) #define AI_TENSOR_INTEGER_GET_SCALE(t_, idx_) \ AI_INTQ_INFO_LIST_SCALE(AI_KLASS_GET_INTQ_INFO_LIST(t_), ai_float, idx_) #define AI_TENSOR_INTEGER_GET_ZEROPOINT_I8(t_, idx_) \ AI_INTQ_INFO_LIST_ZEROPOINT(AI_KLASS_GET_INTQ_INFO_LIST(t_), ai_i8, idx_) #define AI_TENSOR_INTEGER_GET_ZEROPOINT_U8(t_, idx_) \ AI_INTQ_INFO_LIST_ZEROPOINT(AI_KLASS_GET_INTQ_INFO_LIST(t_), ai_u8, idx_) #define AI_TENSOR_FMT_GET_SIGN(t_) \ AI_BUFFER_FMT_GET_SIGN(AI_ARRAY_OBJ(t_->data)->format) #define AI_TENSOR_FMT_GET_BITS(t_) \ AI_BUFFER_FMT_GET_BITS(AI_ARRAY_OBJ(t_->data)->format) #define AI_TENSOR_FMT_GET_FBITS(t_) \ AI_BUFFER_FMT_GET_FBITS(AI_ARRAY_OBJ(t_->data)->format) #define AI_TENSOR_FMT_GET_TYPE(t_) \ AI_BUFFER_FMT_GET_TYPE(AI_ARRAY_OBJ(t_->data)->format) #define AI_TENSOR_GET_FMT(t_) \ (AI_ARRAY_OBJ(t_->data)->format) /*****************************************************************************/ /** Network Buffers Handlers **/ /*****************************************************************************/ #define AI_FOR_EACH_BUFFER_ARRAY_ITEM(buffer_ptr_, buffer_array_ptr_, start_pos_, end_pos_) \ ai_buffer* buffer_ptr_ = AI_BUFFER_ARRAY_ITEM(buffer_array_ptr_, \ CORE_OFFSET(end_pos_, AI_BUFFER_ARRAY_SIZE(buffer_array_ptr_))); \ for ( ; buffer_ptr_ && AI_BUFFER_ARRAY_SIZE(buffer_array_ptr_) && \ (buffer_ptr_>=AI_BUFFER_ARRAY_ITEM(buffer_array_ptr_, \ CORE_OFFSET(start_pos_, AI_BUFFER_ARRAY_SIZE(buffer_array_ptr_)))); buffer_ptr_--) /*****************************************************************************/ /** Network Arrays Handlers **/ /*****************************************************************************/ #define AI_ARRAY_OBJ_FMT(array_) \ AI_CAST(ai_array_format, AI_ARRAY_OBJ(array_)->format) #define AI_ARRAY_OBJ_SIZE(array_) \ (AI_ARRAY_OBJ(array_)->size) #define AI_ARRAY_OBJ_BYTE_SIZE(array_) \ AI_SIZE(AI_ARRAY_GET_BYTE_SIZE(AI_ARRAY_OBJ_FMT(array_), \ AI_ARRAY_OBJ_SIZE(array_))) #define AI_ARRAY_OBJ_DATA_SIZE(array_) \ AI_ARRAY_GET_DATA_BYTE_SIZE(AI_ARRAY_OBJ_FMT(array_), \ AI_ARRAY_OBJ_SIZE(array_)) #define AI_ARRAY_OBJ_DATA(array_, type_) \ AI_CAST(type_*, AI_ARRAY_OBJ(array_)->data) #define AI_ARRAY_OBJ_DATA_START(array_, type_) \ AI_CAST(type_*, AI_ARRAY_OBJ(array_)->data_start) #define AI_ARRAY_OBJ_ELEM(array_, type_, pos_) \ AI_ARRAY_OBJ_DATA(array_, type_)[(pos_)] /*****************************************************************************/ /** Network Tensors Chains / Lists Handlers **/ /*****************************************************************************/ #define SET_TENSOR_IN(chain_, pos_) \ (GET_TENSOR_LIST_IN(chain_)->tensor[(pos_)]) #define SET_TENSOR_OUT(chain_, pos_) \ (GET_TENSOR_LIST_OUT(chain_)->tensor[(pos_)]) #define AI_NODE_IO_GET(node_, in_, out_) \ ASSERT_NODE_SANITY(node_) \ ai_tensor* in_ = GET_TENSOR_IN((node_)->tensors, 0); \ ai_tensor* out_ = GET_TENSOR_OUT((node_)->tensors, 0); \ ASSERT_TENSOR_SANITY(in_) \ ASSERT_TENSOR_SANITY(out_) /*****************************************************************************/ #define AI_BITS_TO_BYTES(bits_) \ (((bits_)+0x7) >> 3) #define AI_BYTES_TO_BITS(bytes_) \ ((bytes_) << 3) /*****************************************************************************/ /** Network Nodes Handlers **/ /*****************************************************************************/ #define AI_NODE_IS_FIRST(node) \ (AI_NODE_OBJ(node)==AI_NODE_OBJ(AI_NODE_OBJ(node)->network->input_node)) #define AI_NODE_IS_LAST(node_) \ ((AI_NODE_OBJ(node_)==AI_NODE_OBJ(node_)->next) || \ (AI_NODE_OBJ(node_)->next==NULL)) #define AI_FOR_EACH_NODE_DO(node_, nodes_) \ for (ai_node* node_ = AI_NODE_OBJ(nodes_); (node_); \ node_ = ((AI_NODE_IS_LAST(node_)) ? NULL : (node_)->next)) /*****************************************************************************/ typedef struct { ai_ptr start; ai_size size; } ai_address_range; typedef struct { ai_address_range range; ai_u16 chain_id; ai_u16 tensor_id; ai_bool partial; } ai_memory_overlap; /*****************************************************************************/ AI_DECLARE_STATIC ai_address_range core_address_range_init( const ai_handle start, const ai_handle end) { ai_address_range r; r.start = (start<end) ? start : end; r.size = (ai_size) ((start<end) ? ((ai_uptr)end-(ai_uptr)start) : ((ai_uptr)start-(ai_uptr)end)); return r; } AI_DECLARE_STATIC ai_buffer_meta_info* core_get_buffer_meta_info( ai_buffer_meta_info* meta, const ai_tensor* t) { if (!meta) return NULL; AI_ASSERT(t && t->data) ai_bool ok; meta->flags = 0x0; meta->intq_info = AI_KLASS_GET_INTQ_INFO_LIST(t); ok = (meta->intq_info && (meta->intq_info->size>0)); meta->flags |= (ok) ? AI_BUFFER_META_HAS_INTQ_INFO : 0x0; return (ok) ? meta : NULL; } #if 0 #include <stdio.h> #include <stdarg.h> AI_DECLARE_STATIC void _dump_file_print( const char* fname, const char* fmt, ...) { static FILE* fp = NULL; if (fname) { if (!fp) { fp = fopen(fname, "a"); } } if (fp) { va_list args; va_start(args, fmt); vfprintf(fp, fmt, args); va_end(args); fflush(fp); } } AI_DECLARE_STATIC void _dump_bytearray( const char* fname, const ai_handle src, const ai_size src_size, const ai_u8 src_id, const char* name) { static FILE* fp = NULL; if (fname && src && (src_size>0)) { if (!fp) { fp = fopen(fname, "a"); } } if (fp) { switch (src_id) { case 1: { const ai_float* src_value = (const ai_float*)src; fprintf(fp, "ai_float %s[%u] = {%f", name, src_size, src_value[0]); for (ai_size i=1; i<src_size; i++) { fprintf(fp, ", %f", src_value[i]); } } break; case 2: { const ai_i8* src_value = (const ai_i8*)src; fprintf(fp, "ai_i8 %s[%u] = {%d", name, src_size, src_value[0]); for (ai_size i=1; i<src_size; i++) { fprintf(fp, ", %d", src_value[i]); } } break; case 3: { const ai_u8* src_value = (const ai_u8*)src; fprintf(fp, "ai_u8 %s[%u] = {%u", name, src_size, src_value[0]); for (ai_size i=1; i<src_size; i++) { fprintf(fp, ", %u", src_value[i]); } } break; default: fprintf(fp, "format not supported: %u {", src_id); break; } fprintf(fp, "};\n"); fflush(fp); } } #endif #endif /* CORE_PRIVATE_H */
13,110
C
34.822404
114
0.510297
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_operators.h
#ifndef LITE_OPERATORS_H #define LITE_OPERATORS_H #pragma once #include "lite_bn_f32.h" #include "lite_bn_integer.h" #include "lite_conv2d.h" #include "lite_conv2d_dqnn.h" #include "lite_convert_dqnn.h" #include "lite_dense_if32.h" #include "lite_dense_is1.h" #include "lite_dense_is1ws1.h" #include "lite_dense_ws1.h" #include "lite_gru_f32.h" #include "lite_dw_dqnn.h" #include "lite_pw_dqnn.h" #include "lite_dense_is8os8ws8.h" #include "lite_generic_float.h" #include "lite_pool_f32.h" #include "lite_maxpool_dqnn.h" #include "lite_nl_generic_float.h" #include "lite_nl_generic_integer.h" #include "lite_pad_generic.h" #include "lite_pad_dqnn.h" #include "lite_upsample_generic.h" #endif /* LITE_OPERATORS_H */
718
C
23.793103
36
0.727019
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_norm.h
/** ****************************************************************************** * @file layers_norm.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform normalization layers datatypes ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_NORM_H #define LAYERS_NORM_H #pragma once #include "layers_common.h" /*! * @defgroup layers_norm Normalization Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_bn * @ingroup layers_norm * @brief Batch normalization (scale with bias) layer */ typedef ai_layer_base ai_layer_bn; /*! * @struct ai_layer_lrn * @ingroup layers_norm * @brief Local Response Normalization layer * * Divides each element by a scale factor computed */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_lrn_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_u32 local_size; /*!< size of the normalization window */ ai_float k; /*!< bias term */ ai_float alpha; /*!< input scale */ ai_float beta; /*!< scale exponent */ } ai_layer_lrn; /*! * @enum ai_norm_type_e * @ingroup layers_norm * @brief store the type of normalization algorithm to apply */ typedef enum ai_norm_type_ { NONE = 0, L1 = 1, L2 = 2, MAX = 3, } ai_norm_type_e; /*! * @struct ai_layer_norm * @ingroup layers_norm * @brief Lp Normalization layer * * Normalizes the tensor along the 'axis' direction using the Lp norm. * Optionally divides the result by the number of the elements. */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_norm_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape_idx axis; /*! normalization axis */ ai_float exponent; /*!< normalization exponent p */ ai_bool scale; /*!< multiplies by the pth root of the number of elements */ ai_norm_type_e norm_type; } ai_layer_norm; /*! * @brief Local response normalization computed on a float array * @ingroup layers_norm * @param out opaque handler to float output channel * @param in opaque handler to float input channel * @param pad amount of padding for the channels */ AI_INTERNAL_API void func_lrn_array_f32(ai_handle out, const ai_handle in, const ai_size in_size, const ai_size channel_size, const ai_i32 pad, const ai_float k, const ai_float alpha, const ai_float beta); /*! * @brief Lp normalization computed on a float array * @ingroup layers_norm * @param out opaque handler to float output channel * @param in opaque handler to float input channel * @param exponent p exponent for the Lp normalization * @param axis_stride stride (in array elements) of the normalization axis * @param axis_size size of the normalization axis * @param outer_size number of tensor slices (including the normalization axis) * on which compute the normalization */ AI_INTERNAL_API void func_norm_array_f32(ai_handle out, const ai_handle in, const ai_float exponent, const ai_float norm, const ai_size axis_stride, const ai_size axis_size, const ai_size outer_size); /*! * @brief Max normalization computed on float array * @ingroup layers_norm * @param out opaque handler to float output channel * @param in opaque handler to float input channel * @param axis_stride stride (in array elements) of the normalization axis * @param axis_size size of the normalization axis * @param outer_size number of tensor slices (including the normalization axis) */ AI_INTERNAL_API void func_norm_max_array_f32(ai_handle out, const ai_handle in, const ai_float norm, const ai_size axis_size, const ai_size n_el); /*! * @brief Fast L2 normalization computed on a float array * @ingroup layers_norm * @param out opaque handler to float output channel * @param in opaque handler to float input channel * @param axis_size size of the normalization axis * @param n_el total number of elements in the tensor */ AI_INTERNAL_API void func_norm_l2_fast_array_f32(ai_handle out, const ai_handle in, const ai_float norm, const ai_size axis_size, const ai_size outer_size); /*! * @brief Fast L1 normalization computed on a float array * @ingroup layers_norm * @param out opaque handler to float output channel * @param in opaque handler to float input channel * @param axis_size size of the normalization axis * @param n_el total number of elements in the tensor */ AI_INTERNAL_API void func_norm_l1_fast_array_f32(ai_handle out, const ai_handle in, const ai_float norm, const ai_size axis_size, const ai_size n_el); /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Computes the activations of a batchnorm (scale + bias) layer. * @ingroup layers_norm * @param layer the batch normalization (bn) layer */ AI_INTERNAL_API void forward_bn(ai_layer* layer); /*! * @brief Computes the activations of a batchnorm (scale + bias) layer with * integer format * @ingroup layers_norm * @param layer the batch normalization (bn) layer */ AI_INTERNAL_API void forward_bn_integer(ai_layer* layer); /*! * @brief Computes the activations of a Local Response Normalization Layer. * @ingroup layers_norm * @param layer the local response normalization (lrn) layer */ AI_INTERNAL_API void forward_lrn(ai_layer* layer); /*! * @brief Computes the activations of a normalization layer. * @ingroup layers_norm * @param layer the normalization (norm) layer */ AI_INTERNAL_API void forward_norm(ai_layer* layer); /*! * @brief Batch Normalization with 16-bit input, 16-bit threshold and binary output. * It is implemented using a threshold, and this is possible because the output is binary. * @param layer the batch normalization layer */ AI_INTERNAL_API void forward_bn_is16os1ws16(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_NORM_H*/
6,910
C
31.909524
97
0.612156
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_conv2d_dqnn.h
/** ****************************************************************************** * @file layers_conv2d_dqnn.h * @author AIS * @brief header file of AI platform DQNN conv datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_CONV2D_DQNN_H #define LAYERS_CONV2D_DQNN_H #pragma once #include "layers_common.h" #include "layers_conv2d.h" /*! * @defgroup layers_conv2d_dqnn Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN #define AI_DQNN_PAD_1_KEY (1) #define AI_DQNN_PAD_M1_KEY (-1) #define AI_DQNN_PAD_0_KEY (0) #define AI_DQNN_PAD_1_VALUE (0x0) #define AI_DQNN_PAD_M1_VALUE (0xFFFFFFFF) #define AI_DQNN_PAD_0_VALUE (0x2) /*! * @struct ai_layer_conv2d_dqnn * @ingroup layers_conv2d_dqnn * @brief conv2d_dqnn layer * * @ref forward_conv2d_is1os1ws1 */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_conv2d_dqnn_ { AI_LAYER_CONV2D_FIELDS_DECLARE ai_i32 pad_value; } ai_layer_conv2d_dqnn; /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles point wise convolution with binary input, binary output and * binary weights * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_pw_is1os1ws1_bn(ai_layer *pLayer); /*! * @brief Handles point wise convolution with binary input, binary output and * binary weights - Optimized thanks to Optim2 assumptions * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_pw_is1os1ws1_bn_optim2(ai_layer *pLayer); /*! * @brief Handles point wise convolution with binary input, 8-bits output and * binary weights * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_pw_is1os8ws1_bn(ai_layer *pLayer); /*! * @brief Handles point wise convolution with binary input, 8-bits output and * binary weights - Optimized thanks to Optim1 assumptions * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_pw_is1os8ws1_bn_optim1(ai_layer *pLayer); /*! * @brief Handles point-wise convolution with binary input, float32 output * and binary weights * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_pw_is1of32ws1_bn(ai_layer *pLayer); /*! * @brief Handles point-wise convolution with binary input, float32 output * and binary weights - Optimized thanks to Optim1 assumptions * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_pw_is1of32ws1_bn_optim1(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is1os1ws1_bn(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights - Optimized thanks to Optim2 assumptions * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is1os1ws1_bn_optim2(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, 8-bits output and * binary weights * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is1os8ws1_bn(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, 8-bits output and * binary weights - Optimized thanks to Optim1 assumptions * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is1os8ws1_bn_optim1(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights - with 0 padding (QKeras like) * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is1os1ws1_bn_pad0(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights - with 0 padding (QKeras like) - Optimized thanks to * Optim0 assumptions * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is1os1ws1_bn_pad0_optim0(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, 8-bits output and * binary weights - with 0 padding (QKeras like) * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is1os8ws1_bn_pad0(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights - with +1/-1 padding (Larq like) * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is1os1ws1_bn_pad1(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights - with +1/-1 padding (Larq like) - Optimized thanks * to Optim2 assumptions * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is1os1ws1_bn_pad1_optim2(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, 8-bits output and * binary weights - with +1/-1 padding (Larq like) * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is1os8ws1_bn_pad1(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, 8-bits output and * binary weights - with +1/-1 padding (Larq like) - Optimized thanks * to Optim1 assumptions * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is1os8ws1_bn_pad1_optim1(ai_layer *pLayer); /*! * @brief Handles 2D convolution with 8-bits quantized Input and weights and * binary output * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is8os1ws8(ai_layer *pLayer); /*! * @brief Handles 2D convolution with 8-bits quantized Input and weights and * binary output - Optimized thanks to Optim2 assumptions * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is8os1ws8_optim2(ai_layer *pLayer); /*! * @brief Handles 2D convolution with 8-bits quantized Input and weights and * binary output - quantized with DoReFa SotA quantizer * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_dorefa_is8os1ws8(ai_layer *pLayer); /*! * @brief Handles 2D convolution with 16-bits quantized input, binary weights and binary output * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is16os1ws1_bn_fxp(ai_layer *pLayer); /*! * @brief Handles 2D convolution with 16-bits quantized input, binary weights and 16-bits quantized output * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is16os16ws1_fxp(ai_layer *pLayer); /*! * @brief Handles depth-wise convolution with binary input, binary output and * binary weights * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_dw_is1os1ws1_bn(ai_layer *pLayer); /*! * @brief Handles depth-wise convolution with binary input, binary output and * binary weights - Optimized thanks to Optim3 assumptions * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_dw_is1os1ws1_bn_optim3(ai_layer *pLayer); /*! * @brief Handles depth-wise convolution with binary input, binary output and * binary weights - with 0 padding (QKeras like) * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_dw_is1os1ws1_bn_pad0(ai_layer *pLayer); /*! * @brief Handles depth-wise convolution with binary input, binary output and * binary weights - with 0 padding (QKeras like) - Optimized thanks to * Optim3 assumptions * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_dw_is1os1ws1_bn_pad0_optim3(ai_layer *pLayer); /*! * @brief Handles depth-wise convolution with binary input, binary output and * binary weights - with +1/-1 padding (Larq like) * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_dw_is1os1ws1_bn_pad1(ai_layer *pLayer); /*! * @brief Handles depth-wise convolution with binary input, binary output and * binary weights - with +1/-1 padding (Larq like) - Optimized thanks to * Optim3 assumptions * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_dw_is1os1ws1_bn_pad1_optim3(ai_layer *pLayer); /*! * @brief Handles 2D convolution with 8-bits quantized Input and output and * binary weights * @ingroup layers_conv2d_dqnn * @param layer conv2d_dqnn layer */ AI_INTERNAL_API void forward_conv2d_is8os8ws1(ai_layer *pLayer); /** * @brief Handles 2D convolution with binary input, fixed point 16-bits output and * binary weights - with 0 padding (QKeras like) - Lite I/F * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_is1os16ws1_bn_pad0_fxp(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, fixed point 16-bits output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_is1os16ws1_bn_pad1_fxp(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, fixed point 16-bits output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F * - Optimized thanks to Optim1 assumptions * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_is1os16ws1_bn_pad1_optim1_fxp(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and * binary weights - with 0 padding (QKeras like) - Lite I/F * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_is1ou16ws1_bn_pad0_fxp(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, fixed point 16-bits unsigned output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F * @ingroup lite_conv2d_dqnn * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_is1ou16ws1_bn_pad1_fxp(ai_layer *pLayer); /*! * @brief Handles 2D convolution with binary input, fixed point 16-bits unsiged output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F * - Optimized thanks to Optim1 assumptions * @ingroup lite_conv2d_dqnn * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_is1ou16ws1_bn_pad1_optim1_fxp(ai_layer *pLayer); /*! * @brief Computes the activations of a integer quantized 2D convolutional layer * for SSSA per channel quantized RGB scheme using n_channel_in = 3 * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_conv2d_is8os8ws8_sssa_ch_rgb(const ai_i8 *pData_in, ai_i8 *pData_out, const ai_i8 *pWeights, const ai_i32 *pBias, ai_u16 *pBuffer_a, const ai_size width_in, const ai_size height_in, const ai_size width_out, const ai_size height_out, const ai_u16 n_channel_in, const ai_u16 n_channel_out, const ai_size filt_width, const ai_size filt_height, const ai_u16 filt_pad_x, const ai_u16 filt_pad_y, const ai_u16 filt_stride_x, const ai_u16 filt_stride_y, const ai_float in_scale, const ai_float out_scale, const ai_float *pWt_scale, const ai_i8 in_zeropoint, const ai_i8 out_zeropoint, const ai_bool out_ch_format, ai_i16 *p_out_r_shift, ai_i32 *p_out_factor); /*! * @brief Computes the activations of a point-wise integer quantized convolution for SSSA per channel quantized scheme * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_pw_is8os8ws8_sssa_ch(const ai_i8 *pData_in, ai_i8 *pData_out, const ai_i8 *pWeights, const ai_i32 *pBias, ai_u16 *pBuffer_a, const ai_size width_in, const ai_size height_in, const ai_size width_out, const ai_size height_out, const ai_u16 n_channel_in, const ai_u16 n_channel_out, const ai_size filt_width, const ai_size filt_height, const ai_u16 filt_pad_x, const ai_u16 filt_pad_y, const ai_u16 filt_stride_x, const ai_u16 filt_stride_y, const ai_u16 dilation_x, const ai_u16 dilation_y, const ai_float in_scale, const ai_float out_scale, const ai_float *pWt_scale, const ai_i8 in_zeropoint, const ai_i8 out_zeropoint, ai_i16 *p_out_r_shift, ai_i32 *p_out_factor, ai_i32 AI_PWOverlay, ai_i16 *bufferA, ai_i32 scratch_size); // st_nn_context_t context); /*! * @brief Computes the activations of a depth-wise integer quantized convolution for SSSA per channel quantized scheme * @ingroup layers_conv2d * @param layer the convolutional (conv) layer */ AI_INTERNAL_API void forward_dw_is8os8ws8_sssa_ch(const ai_i8 *pData_in, ai_i8 *pData_out, const ai_i8 *pWeights, const ai_i32 *pBias, ai_u16 *pBuffer_a, const ai_size width_in, const ai_size height_in, const ai_size width_out, const ai_size height_out, const ai_u16 n_channel_in, const ai_u16 n_channel_out, const ai_size filt_width, const ai_size filt_height, const ai_u16 filt_pad_x, const ai_u16 filt_pad_y, const ai_u16 filt_stride_x, const ai_u16 filt_stride_y, const ai_u16 dilation_x, const ai_u16 dilation_y, const ai_float in_scale, const ai_float out_scale, const ai_float *pWt_scale, const ai_i8 in_zeropoint, const ai_i8 out_zeropoint, ai_i16 *p_out_r_shift, ai_i32 *p_out_factor); AI_API_DECLARE_END #endif /*LAYERS_CONV2D_DQNN_H*/
17,573
C
34.647059
91
0.579981
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_upsample_generic.h
/** ****************************************************************************** * @file layers_upsample_generic.h * @author Cyril Enault * @brief header file of AI platform padding generic datatypes ****************************************************************************** * @attention * * Copyright (c) 2022 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_UPSAMPLE_H #define LAYERS_UPSAMPLE_H #pragma once #include "layers_generic.h" /*! * @defgroup layers_pad_generic Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles generic upsmapling in nearest mode * @ingroup layers_generic * @param layer upsample layer */ AI_INTERNAL_API void forward_upsample_nearest(ai_layer *pLayer); /*! * @brief Handles generic upsmapling in zeros mode * @ingroup layers_generic * @param layer upsample layer */ AI_INTERNAL_API void forward_upsample_zeros(ai_layer *pLayer); /*! * @brief Handles generic upsmapling in bilinear mode * @ingroup layers_generic * @param layer upsample layer */ AI_INTERNAL_API void forward_upsample_bilinear(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_PAD_GENERIC_H*/
1,845
C
26.552238
80
0.509485
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_bn_f32.h
#ifndef LITE_BN_F32_H #define LITE_BN_F32_H #pragma once #include "ai_lite_interface.h" /*! * @brief Forward function for a batch normalization (BN) layer with * signed float input, signed float output, and float parameters. * @ingroup lite_bn_f32 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param scale The pointer to BN scale param. * @param bias The pointer to bias. * @param n_elements The number of elements in the input tensor. * @param n_channel_in The number of channel in the input tensor. */ LITE_API_ENTRY void forward_lite_bn_if32of32wf32( ai_float* output, const ai_float* input, const ai_float* scale, const ai_float* bias, const ai_u32 n_elements, const ai_u32 n_channel_in); #endif /* LITE_BN_F32_H */
791
C
29.461537
69
0.718078
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_dense_is1ws1.h
#ifndef _LITE_DENSE_IS1WS1_H #define _LITE_DENSE_IS1WS1_H #pragma once #include "ai_lite_interface.h" /*! * @brief Forward function for a dense layer with signed binary input, * signed binary output, and signed binary weights. * @ingroup lite_dense_is1ws1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param bias The pointer to bias (NULL if not available). * @param scratch The pointer to the scratch buffer. * @param n_channel_in The number of channels of the input. * @param n_channel_ouy The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_is1os1ws1( ai_pbits *output, const ai_pbits *input, const ai_pbits *weights, const ai_pbits *bias, ai_i32 *scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out ); /*! * @brief Forward function for a dense layer with signed binary input, * signed binary output, and signed binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup lite_dense_is1ws1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param scale The pointer to scale. * @param offset The pointer to offset. * @param scratch The pointer to the scratch buffer. * @param n_channel_in The number of channels of the input. * @param n_channel_ouy The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_is1os1ws1_bn( ai_pbits *output, const ai_pbits *input, const ai_pbits *weights, const ai_float *scale, const ai_float *offset, ai_i32 *scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out ); /*! * @brief Forward function for a dense layer with signed binary input, * signed binary output, and signed 16bit weights. * @ingroup lite_dense_is1ws1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param bias The pointer to bias (NULL if not available). * @param scratch The pointer to the scratch buffer (signed 32bit). * @param n_channel_in The number of channels of the input. * @param n_channel_ouy The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_is1os16ws1( ai_i16 *output, const ai_pbits *input, const ai_pbits *weights, const ai_pbits *bias, ai_i32 *scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out); /*! * @brief Forward function for a dense layer with signed binary input, * signed binary output, and signed 16bit weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup lite_dense_is1ws1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param bias The pointer to bias (NULL if not available). * @param scratch The pointer to the scratch buffer (signed 32bit). * @param n_channel_in The number of channels of the input. * @param n_channel_ouy The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_is1os16ws1_bn( ai_i16 *output, const ai_pbits *input, const ai_pbits *weights, const ai_float *scale, const ai_float *offset, ai_i32 *scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out); /*! * @brief Forward function for a dense layer with signed binary input, * signed float output, and signed binary weights. * @ingroup lite_dense_is1ws1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param bias The pointer to bias (NULL if not available). * @param scratch The pointer to the scratch buffer (unused). * @param n_channel_in The number of channels of the input. * @param n_channel_ouy The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_is1of32ws1( ai_float *output, const ai_pbits *input, const ai_pbits *weights, const ai_pbits *bias, ai_i32 *scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out ); /*! * @brief Forward function for a dense layer with signed binary input, * signed float output, and signed binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup lite_dense_is1ws1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param scale The pointer to scale. * @param offset The pointer to offset. * @param scratch The pointer to the scratch buffer (unused). * @param n_channel_in The number of channels of the input. * @param n_channel_out The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_is1of32ws1_bn( ai_float *output, const ai_pbits *input, const ai_pbits *weights, const ai_float *scale, const ai_float *offset, ai_i32 *scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out ); #endif /*_LITE_DENSE_IS1WS1_H*/
5,999
C
40.666666
80
0.724287
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_custom.h
/** ****************************************************************************** * @file layers_custom.h * @author Marco Lattuada * @brief header file of AI platform custom layers datatype ****************************************************************************** * @attention * * Copyright (c) 2020 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_CUSTOM_H #define LAYERS_CUSTOM_H #pragma once #include "layers_common.h" /*! * @defgroup layers_custom Custom layer definitions * @brief Definition of structures custom layers */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_custom * @ingroup layers_custom * @brief Custom layer wrapper * * The custom layer wrapper */ typedef ai_layer_stateful ai_layer_custom; AI_API_DECLARE_END #endif /*LAYERS_CUSTOM_H*/
1,217
C
24.914893
80
0.518488
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_conv2d.h
/** ****************************************************************************** * @file lite_conv2d.h * @author AIS * @brief header file of AI platform lite conv2d kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_CONV2D_H #define LITE_CONV2D_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles 2D convolution with float input, float output and * float weights * @ingroup lite_conv2d */ LITE_API_ENTRY void forward_lite_conv2d_if32of32wf32(const ai_float *pDataIn_init, ai_float *pDataOut_init, const ai_ptr_const pWeights_init, const ai_ptr_const pBias_init, const ai_size n_channel_in, const ai_size n_channel_out, const ai_size width_in, const ai_size height_in, const ai_size width_out, const ai_size height_out, const ai_size filt_width, const ai_size filt_height, const ai_u16 filt_pad_x, const ai_u16 filt_pad_y, const ai_u16 filt_stride_x, const ai_u16 filt_stride_y, const ai_size filt_height_dilated, const ai_size filt_width_dilated, const ai_u16 dilation_x, const ai_u16 dilation_y, const ai_size n_groups); /*! * @brief Handles 2D depthwise convolution with float input, float output and * float weights * @ingroup lite_conv2d */ LITE_API_ENTRY void forward_lite_dw_if32of32wf32(const ai_float *pDataIn_init, ai_float *pDataOut_init, const ai_ptr_const pWeights_init, const ai_ptr_const pBias_init, const ai_size n_channel_in, const ai_size n_channel_out, const ai_size width_in, const ai_size height_in, const ai_size width_out, const ai_size height_out, const ai_size filt_width, const ai_size filt_height, const ai_u16 filt_pad_x, const ai_u16 filt_pad_y, const ai_u16 filt_stride_x, const ai_u16 filt_stride_y, const ai_size filt_height_dilated, const ai_size filt_width_dilated, const ai_u16 dilation_x, const ai_u16 dilation_y, const ai_size n_groups); /*! * @brief Handles 2D grouped convolution with float input, float output and * float weights * @ingroup lite_conv2d */ LITE_API_ENTRY void forward_lite_conv2d_if32of32wf32_group(const ai_float *pDataIn_init, ai_float *pDataOut_init, const ai_ptr_const pWeights_init, const ai_ptr_const pBias_init, const ai_size n_channel_in, const ai_size n_channel_out, const ai_size width_in, const ai_size height_in, const ai_size width_out, const ai_size height_out, const ai_size filt_width, const ai_size filt_height, const ai_u16 filt_pad_x, const ai_u16 filt_pad_y, const ai_u16 filt_stride_x, const ai_u16 filt_stride_y, const ai_size filt_height_dilated, const ai_size filt_width_dilated, const ai_u16 dilation_x, const ai_u16 dilation_y, const ai_size n_groups); /*! * @brief Handles dilated conv2d convolutions (valid padding) * @ingroup lite_conv2d */ LITE_API_ENTRY void forward_lite_conv2d_dilated_sssa8_ch(const ai_i8 *pData_in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 n_channel_in, const ai_i8 *pWeights, const ai_u16 n_channel_out, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dilation_x, const ai_u16 dilation_y, const ai_i32 *pBias, const ai_i8 in_zeropoint, const ai_i8 out_zeropoint, const ai_layer_format_type out_ch_format, ai_i8 *pData_out, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_u32 height_loop_cnt, const ai_u16 weights_prefetch_enabled, ai_i32 scratch_size, ai_i16 *pBuffer_a); /*! * @brief Handles conv2d convolutions (valid padding) with number of channels >= 8 * @ingroup lite_conv2d */ LITE_API_ENTRY void forward_lite_conv2d_deep_sssa8_ch(const ai_i8 *pData_in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 n_channel_in, const ai_i8 *pWeights, const ai_u16 n_channel_out, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_i32 *pBias, const ai_i8 in_zeropoint, const ai_i8 out_zeropoint, const ai_layer_format_type out_ch_format, ai_i8 *pData_out, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_u32 height_loop_cnt, const ai_u16 weights_prefetch_enabled, ai_i32 scratch_size, ai_i16 *pBuffer_a); /*! * @brief Handles conv2d convolutions (valid padding) with number of channels >= 8 * Special forward function for 3x3 kernels and Stride = 1 * @ingroup lite_conv2d */ LITE_API_ENTRY void forward_lite_conv2d_deep_3x3_sssa8_ch(const ai_i8 *pData_in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 n_channel_in, const ai_i8 *pWeights, const ai_u16 n_channel_out, const ai_i32 *pBias, const ai_i8 in_zeropoint, const ai_i8 out_zeropoint, const ai_layer_format_type out_ch_format, ai_i8 *pData_out, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_u32 height_loop_cnt, ai_i32 scratch_size, ai_i16 *pBuffer_a); /*! * @brief Handles conv2d convolutions with same padding or with number of channels < 8 * @ingroup lite_conv2d */ LITE_API_ENTRY void forward_lite_conv2d_sssa8_ch(const ai_i8 *pData_in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 n_channel_in, const ai_i8 *pWeights, const ai_u16 n_channel_out, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_i32 *pBias, const ai_i8 in_zeropoint, const ai_i8 out_zeropoint, const ai_layer_format_type out_ch_format, ai_i8 *pData_out, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_u16 weights_prefetch_enabled, ai_i32 scratch_size, ai_i16 *pBuffer_a); /*! * @brief Handles rgb conv2d convolutions * @ingroup lite_conv2d */ LITE_API_ENTRY void forward_lite_conv2d_rgb_sssa8_ch(const ai_i8 *pData_in, const ai_u16 dim_im_in, const ai_i8 *pWeights, const ai_u16 n_channel_out, const ai_u16 dim_kernel, const ai_u16 padding, const ai_u16 stride, const ai_i32 *pBias, const ai_i8 in_zeropoint, const ai_i8 out_zeropoint, const ai_layer_format_type out_ch_format, ai_i8 *pData_out, const ai_u16 dim_im_out, ai_i32 scratch_size, ai_i16 *pBuffer_a); /*! * @brief Handles 2D convolution with float input, float output and * float weights with pool fused * @ingroup lite_conv2d */ LITE_API_ENTRY void forward_lite_conv2d_if32of32wf32_pool(const ai_float *pDataIn_init, ai_float *pDataOut_init, const ai_float * pWeights_init, const ai_float *pBias_init, ai_float *pScratch_init, const ai_short_size n_channel_in, const ai_short_size n_channel_out, const ai_short_size width_in, const ai_short_size height_in, const ai_short_size width_out, const ai_short_size height_out, const ai_short_size filt_width, const ai_short_size filt_height, const ai_u16 filt_pad_x, const ai_u16 filt_pad_y, const ai_u16 filt_stride_x, const ai_u16 filt_stride_y, const ai_short_size filt_height_dilated, const ai_short_size filt_width_dilated, const ai_u16 dilation_x, const ai_u16 dilation_y, const ai_short_size n_groups, const ai_short_size width_conv_out, const ai_short_size height_conv_out, ai_handle pool_func, const ai_short_size pool_width, const ai_short_size pool_height, const ai_short_size pool_stride_x, const ai_short_size pool_stride_y, const ai_short_size pool_pad_x, const ai_short_size pool_pad_y); /*! * @brief Handles 2D depthwise convolution with float input, float output and * float weights with pool fused * @ingroup lite_conv2d */ LITE_API_ENTRY void forward_lite_dw_if32of32wf32_pool(const ai_float *pDataIn_init, ai_float *pDataOut_init, const ai_float *pWeights_init, const ai_float *pBias_init, ai_float *pScratch_init, const ai_short_size n_channel_in, const ai_short_size n_channel_out, const ai_short_size width_in, const ai_short_size height_in, const ai_short_size width_out, const ai_short_size height_out, const ai_short_size filt_width, const ai_short_size filt_height, const ai_u16 filt_pad_x, const ai_u16 filt_pad_y, const ai_u16 filt_stride_x, const ai_u16 filt_stride_y, const ai_short_size filt_height_dilated, const ai_short_size filt_width_dilated, const ai_u16 dilation_x, const ai_u16 dilation_y, const ai_short_size n_groups, const ai_short_size width_conv_out, const ai_short_size height_conv_out, ai_handle pool_func, const ai_short_size pool_width, const ai_short_size pool_height, const ai_short_size pool_stride_x, const ai_short_size pool_stride_y, const ai_short_size pool_pad_x, const ai_short_size pool_pad_y); /*! * @brief Handles 2D grouped convolution with float input, float output and * float weights with pool fused * @ingroup lite_conv2d */ LITE_API_ENTRY void forward_lite_conv2d_if32of32wf32_group_pool(const ai_float *pDataIn_init, ai_float *pDataOut_init, const ai_float *pWeights_init, const ai_float *pBias_init, ai_float *pScratch_init, const ai_short_size n_channel_in, const ai_short_size n_channel_out, const ai_short_size width_in, const ai_short_size height_in, const ai_short_size width_out, const ai_short_size height_out, const ai_short_size filt_width, const ai_short_size filt_height, const ai_u16 filt_pad_x, const ai_u16 filt_pad_y, const ai_u16 filt_stride_x, const ai_u16 filt_stride_y, const ai_short_size filt_height_dilated, const ai_short_size filt_width_dilated, const ai_u16 dilation_x, const ai_u16 dilation_y, const ai_short_size n_groups, const ai_short_size width_conv_out, const ai_short_size height_conv_out, ai_handle pool_func, const ai_short_size pool_width, const ai_short_size pool_height, const ai_short_size pool_stride_x, const ai_short_size pool_stride_y, const ai_short_size pool_pad_x, const ai_short_size pool_pad_y); #endif /*LITE_CONV2D_H*/
18,200
C
49.418282
86
0.398407
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_formats_converters.h
/** ****************************************************************************** * @file layers_formats_converters.h * @author AST Embedded Analytics Research Platform * @brief header file of formats converters layers ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_FORMATS_CONVERTERS_H #define LAYERS_FORMATS_CONVERTERS_H #pragma once #include "layers_common.h" /*! * @defgroup layers_formats_converters Formats Converters Layers Definition * @brief this group implements formats converter layers (cast, etc.) * */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_cast * @ingroup layers_formats_converters * @brief C Implementation of cast layer */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_cast_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_array_format to_format; /*!< cast output format */ } ai_layer_cast; /*****************************************************************************/ /* Forward Functions Section */ /*****************************************************************************/ /*! * @brief forward function for cast layer. * @ingroup layers_ * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_cast(ai_layer* layer); AI_API_DECLARE_END #endif /*LAYERS_FORMATS_CONVERTERS_H*/
1,862
C
29.048387
80
0.50913
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_pool.h
/** ****************************************************************************** * @file layers_pool.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform pooling layers datatypes ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_POOL_H #define LAYERS_POOL_H #pragma once #include "layers_common.h" #include "lite_maxpool_dqnn.h" #include "lite_pool_f32.h" /*! * @defgroup layers_pool Pooling Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_pool * @ingroup layers_pool * @brief Pooling layer * * The type of pooling function is handled by the specific forward function * @ref forward_pool */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pool_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape_2d pool_size; /*!< pooling size */ ai_shape_2d pool_stride; /*!< pooling stride */ ai_shape pool_pad; /*!< pooling pad, y,x border sizes */ ai_u8 count_include_pad; /*!< include pad flag */ } ai_layer_pool; /*! * @brief Max Pooling on a 8/16 bits fixed point data array * @ingroup layers_pool * @param in opaque handler to input data to process * @param dim_im_in_x input feature map width * @param dim_im_in_y input feature map height * @param ch_im_in number of input channels * @param dim_kernel_x kernel width * @param dim_kernel_y kernel height * @param padding_x right padding value * @param padding_y top padding value * @param stride_x stride value on x dimension * @param stride_y stride value on y dimension * @param dim_im_out_x output feature map width * @param dim_im_out_y output feature map height * @param out opaque handler to output data */ AI_INTERNAL_API void pool_func_mp_array_fixed(ai_handle in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_handle out); /*! * @brief Max Pooling on a 8-bits integer quantized data array * @ingroup layers_pool * @param in opaque handler to input data to process * @param dim_im_in_x input feature map width * @param dim_im_in_y input feature map height * @param ch_im_in number of input channels * @param dim_kernel_x kernel width * @param dim_kernel_y kernel height * @param padding_x right padding value * @param padding_y top padding value * @param stride_x stride value on x dimension * @param stride_y stride value on y dimension * @param dim_im_out_x output feature map width * @param dim_im_out_y output feature map height * @param out opaque handler to output data */ AI_INTERNAL_API void pool_func_mp_array_integer(ai_handle in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_handle out); /*! * @brief Max Pooling on a signed 8-bits integer quantized data array * @ingroup layers_pool * @param in opaque handler to input data to process * @param dim_im_in_x input feature map width * @param dim_im_in_y input feature map height * @param ch_im_in number of input channels * @param dim_kernel_x kernel width * @param dim_kernel_y kernel height * @param padding_x right padding value * @param padding_y top padding value * @param stride_x stride value on x dimension * @param stride_y stride value on y dimension * @param dim_im_out_x output feature map width * @param dim_im_out_y output feature map height * @param out opaque handler to output data */ AI_INTERNAL_API void pool_func_mp_array_integer_INT8(ai_handle in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_handle out); /*! * @brief Max Pooling on a unsigned 8-bits integer quantized data array * @ingroup layers_pool * @param in opaque handler to input data to process * @param dim_im_in_x input feature map width * @param dim_im_in_y input feature map height * @param ch_im_in number of input channels * @param dim_kernel_x kernel width * @param dim_kernel_y kernel height * @param padding_x right padding value * @param padding_y top padding value * @param stride_x stride value on x dimension * @param stride_y stride value on y dimension * @param dim_im_out_x output feature map width * @param dim_im_out_y output feature map height * @param out opaque handler to output data */ AI_INTERNAL_API void pool_func_mp_array_integer_UINT8(ai_handle in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_handle out); /*! * @brief Average Pooling on a 8/16 bits fixed point data array * @ingroup layers_pool * @param in opaque handler to input data to process * @param dim_im_in_x input feature map width * @param dim_im_in_y input feature map height * @param ch_im_in number of input channels * @param dim_kernel_x kernel width * @param dim_kernel_y kernel height * @param padding_x right padding value * @param padding_y top padding value * @param stride_x stride value on x dimension * @param stride_y stride value on y dimension * @param dim_im_out_x output feature map width * @param dim_im_out_y output feature map height * @param out opaque handler to scratch memory */ AI_INTERNAL_API void pool_func_ap_array_fixed(ai_handle in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_handle out); /*! * @brief Average Pooling on a 8-bits integer quantized data array * @ingroup layers_pool * @param in opaque handler to input data to process * @param dim_im_in_x input feature map width * @param dim_im_in_y input feature map height * @param ch_im_in number of input channels * @param dim_kernel_x kernel width * @param dim_kernel_y kernel height * @param padding_x right padding value * @param padding_y top padding value * @param stride_x stride value on x dimension * @param stride_y stride value on y dimension * @param dim_im_out_x output feature map width * @param dim_im_out_y output feature map height * @param out opaque handler to scratch memory */ AI_INTERNAL_API void pool_func_ap_array_integer(ai_handle in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_handle out); /*! * @brief Average Pooling on a signed 8-bits integer quantized data array * @ingroup layers_pool * @param in opaque handler to input data to process * @param dim_im_in_x input feature map width * @param dim_im_in_y input feature map height * @param ch_im_in number of input channels * @param dim_kernel_x kernel width * @param dim_kernel_y kernel height * @param padding_x right padding value * @param padding_y top padding value * @param stride_x stride value on x dimension * @param stride_y stride value on y dimension * @param dim_im_out_x output feature map width * @param dim_im_out_y output feature map height * @param out opaque handler to scratch memory */ AI_INTERNAL_API void pool_func_ap_array_integer_INT8(ai_handle in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_handle out); /*! * @brief Average Pooling on a unsigned 8-bits integer quantized data array * @ingroup layers_pool * @param in opaque handler to input data to process * @param dim_im_in_x input feature map width * @param dim_im_in_y input feature map height * @param ch_im_in number of input channels * @param dim_kernel_x kernel width * @param dim_kernel_y kernel height * @param padding_x right padding value * @param padding_y top padding value * @param stride_x stride value on x dimension * @param stride_y stride value on y dimension * @param dim_im_out_x output feature map width * @param dim_im_out_y output feature map height * @param out opaque handler to scratch memory */ AI_INTERNAL_API void pool_func_ap_array_integer_UINT8(ai_handle in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_handle out); /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Computes the activations of a max pooling layer. * @ingroup layers_pool * @param layer the pooling (pool) layer */ AI_INTERNAL_API void forward_mp(ai_layer* layer); /*! * @brief Computes the activations of a fixed point max pooling layer. * @ingroup layers_pool * @param layer the pooling (pool) layer */ AI_INTERNAL_API void forward_mp_fixed(ai_layer *pLayer); /*! * @brief Computes the activations of an integer-quantized max pooling layer. * @ingroup layers_pool * @param layer the pooling (pool) layer */ AI_INTERNAL_API void forward_mp_integer(ai_layer *pLayer); /*! * @brief Computes the activations of an integer-quantized max pooling layer * with int8 I/O * @ingroup layers_pool * @param layer the pooling (pool) layer */ AI_INTERNAL_API void forward_mp_integer_INT8(ai_layer *pLayer); /*! * @brief Computes the activations of an integer-quantized max pooling layer * with uint8 I/O * @ingroup layers_pool * @param layer the pooling (pool) layer */ AI_INTERNAL_API void forward_mp_integer_UINT8(ai_layer *pLayer); /*! * @brief Computes the activations of an integer-quantized max pooling layer * with int16 I/O * @ingroup layers_pool * @param layer the pooling (pool) layer */ AI_INTERNAL_API void forward_mp_integer_INT16(ai_layer *pLayer); /*! * @brief Computes the activations of an integer-quantized max pooling layer * with uint16 I/O * @ingroup layers_pool * @param layer the pooling (pool) layer */ AI_INTERNAL_API void forward_mp_integer_UINT16(ai_layer *pLayer); /*! * @brief Computes the activations of an average pooling layer. * @ingroup layers_pool * @param layer the pooling (pool) layer */ AI_INTERNAL_API void forward_ap(ai_layer* layer); /*! * @brief Computes the activations of a fixed point average pooling layer. * @ingroup layers_pool * @param layer the pooling (pool) layer */ AI_INTERNAL_API void forward_ap_fixed(ai_layer *pLayer); /*! * @brief Computes the activations of an integer-quantized average pooling layer. * @ingroup layers_pool * @param layer the pooling (pool) layer */ AI_INTERNAL_API void forward_ap_integer(ai_layer *pLayer); /*! * @brief Computes the activations of an integer-quantized average pooling layer * with int8 I/O * @ingroup layers_pool * @param layer the pooling (pool) layer */ AI_INTERNAL_API void forward_ap_integer_INT8(ai_layer *pLayer); /*! * @brief Computes the activations of an integer-quantized average pooling layer * with uint8 I/O * @ingroup layers_pool * @param layer the pooling (pool) layer */ AI_INTERNAL_API void forward_ap_integer_UINT8(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_POOL_H*/
14,171
C
36.294737
81
0.624656
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_pad_generic.h
/** ****************************************************************************** * @file layers_pad_generic.h * @author Marco Forleo * @brief header file of AI platform padding generic datatypes ****************************************************************************** * @attention * * Copyright (c) 2022 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_PADDING_DQNN_H #define LAYERS_PADDING_DQNN_H #pragma once #include "layers_generic.h" /*! * @defgroup layers_pad_generic Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles generic padding in constant mode * @ingroup layers_generic_dqnn * @param layer pad layer */ AI_INTERNAL_API void forward_pad_constant(ai_layer *pLayer); /*! * @brief Handles generic padding in edge mode * @ingroup layers_generic_dqnn * @param layer pad layer */ AI_INTERNAL_API void forward_pad_edge(ai_layer *pLayer); /*! * @brief Handles generic padding in reflect mode * @ingroup layers_generic_dqnn * @param layer pad layer */ AI_INTERNAL_API void forward_pad_reflect(ai_layer *pLayer); /*! * @brief Handles generic padding in constant mode Channel 1st 8bit * @ingroup layers_generic_dqnn * @param layer pad layer */ AI_INTERNAL_API void forward_pad_8bit_ch1st_3x3_constant(ai_layer* pLayer); AI_API_DECLARE_END #endif /*LAYERS_PAD_GENERIC_H*/
2,034
C
25.776315
80
0.525074
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_ml_treeensembleclassifier.h
/** ****************************************************************************** * @file layers_ml_treeensembleclassifier.h * @author AIS * @brief header file of AI platform TreeEnsembleClassifier datatypes ****************************************************************************** * @attention * * Copyright (c) 2021-2022 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_TREE_ENSEMBLE_CLASSIFIER_H #define LAYERS_TREE_ENSEMBLE_CLASSIFIER_H #pragma once #include "layers_common.h" #include "layers_nl.h" /*! * @defgroup layers_ml_treensembleclassifier Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /* Error return codes */ #define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_NO 0 #define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_WRONG_IDX_FMT -1 #define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_UNFOUND_LEAF -2 #define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_UNSUPPORTED_BRANCH -3 #define AI_TREE_ENSEMBLE_CLASSIFIER_ERROR_UNSUPPORTED_FEATURE -4 #define AI_TREE_ENSEMBLE_CLASSIFIER_DEPTH_MAX 10000 /* Type of condition in the TreeEnsembleClassifier*/ typedef enum { AI_TREE_ENSEMBLE_CLASSIFIER_BRANCH_LT_IDX = 0, AI_TREE_ENSEMBLE_CLASSIFIER_BRANCH_LEQ_IDX, AI_TREE_ENSEMBLE_CLASSIFIER_BRANCH_EQ_IDX, AI_TREE_ENSEMBLE_CLASSIFIER_BRANCH_END, } ai_tree_ensenble_classifier_branch_e; typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_tree_ensemble_classifier_ { AI_LAYER_COMMON_FIELDS_DECLARE func_nl nl_func; uint8_t all_weights_are_positive; ai_float nodes_values_scale; ai_float nodes_values_offset; ai_float class_weights_scale; ai_float class_weights_offset; } ai_layer_tree_ensemble_classifier; /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Decodes the TreeEnsembleClassifier ML operator. * @ingroup layers_svmreg * @param layer tree ensemble classifier layer */ AI_INTERNAL_API void forward_tree_ensemble_classifier(ai_layer *pLayer); AI_INTERNAL_API ai_i32 decodeEstimator_LEQ_8Bits(const ai_float *pDataIn, ai_float *pOutDataScores, const ai_u8 *pFeatureIdxForEstimator, const ai_float *pValuesForEstimator, const ai_u8 *pTrueIdxForEstimator, const ai_u8 *pFalseIdxForEstimator, const ai_handle pClassWeightsForEstimator, const ai_array_format classWeightsFormat, const ai_u8 *pClassNodeIdsForEstimator, const ai_u16 nbClassWithCurrentEstimator, const ai_u8 *pClassIdsForEstimator); AI_INTERNAL_API ai_i32 decodeEstimator_LEQ_16Bits(const ai_float *pDataIn, ai_float *pOutDataScores, const ai_u8 *pFeatureIdxForEstimator, const ai_float *pValuesForEstimator, const ai_u16 *pTrueIdxForEstimator, const ai_u16 *pFalseIdxForEstimator, ai_handle pClassWeightsForEstimator, const ai_array_format classWeightsFormat, const ai_u16 *pClassNodeIdsForEstimator, const ai_u16 nbClassWithCurrentEstimator, const ai_u16 *pClassIdsForEstimator); AI_API_DECLARE_END #endif /*LAYERS_TREE_ENSEMBLE_CLASSIFIER_H*/
4,238
C
36.513274
80
0.542237
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_dense_is8os8ws8.h
/** ****************************************************************************** * @file lite_dense_is8os8ws8.h * @author Marco Forleo * @brief header file of AI platform lite dense kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2022 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_DENSE_IS8OS8WS8_H #define LITE_DENSE_IS8OS8WS8_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Forward function for a dense layer with signed input, * signed output and signed weights all at 8 bits. * @ingroup lite_dense_is8os8ws8 * @param input The pointer to input buffer. * @param output The pointer to output buffer. * @param weights The pointer to weights. * @param bias The pointer to bias (NULL if not available). * @param in_zeropoint The value of the zero point of the input. * @param out_zeropoint TThe value of the zero point of the output. * @param n_channel_in The number of channels of the input. * @param n_channel_out The number of channels of the output, i.e., * the number of dense hidden neurons. * @param n_pixels Total number of pixels. */ LITE_API_ENTRY void forward_lite_dense_is8os8ws8(ai_i8 * pDataOut, const ai_i8 *pDataIn, const ai_i8 *pWeights, const ai_i32 *pBias, const ai_i8 in_zeropoint, const ai_i8 out_zeropoint, const ai_u16 n_channel_in, const ai_u16 n_channel_out, const ai_size n_pixels, const ai_float in_scale, const ai_float out_scale, const ai_float Wt_scale, ai_i16 *pBuffer_a); void forward_lite_dense_is8os8ws8_ch(ai_i8 * pDataOut, const ai_i8 *pDataIn, const ai_i8 *pWeights, const ai_i32 *pBias, const ai_i8 in_zeropoint, const ai_i8 out_zeropoint, const ai_u16 n_channel_in, const ai_u16 n_channel_out, const ai_size n_pixels, const ai_float in_scale, const ai_float out_scale, const ai_float *pWt_scale, ai_i16 *pBuffer_a); #endif /*LITE_DENSE_IS8OS8WS8_H*/
3,465
C
44.605263
80
0.43088
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/ai_platform_interface.h
/** ****************************************************************************** * @file ai_platform_interface.h * @author AST Embedded Analytics Research Platform * @brief Definitions of AI platform interface APIs types ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_PLATFORM_INTERFACE_H #define AI_PLATFORM_INTERFACE_H #pragma once #include "ai_platform.h" #include "datatypes_network.h" #include "ai_datatypes.h" #include "ai_datatypes_format.h" /*! * @defgroup datatypes_interface Interface Datatypes * @brief Data structures and defines used to implement neural networks */ /******************************************************************************/ #define AI_ERROR_TRAP(net_, type_, code_) \ ai_platform_network_set_error((net_), AI_CONCAT(AI_ERROR_,type_), \ AI_CONCAT(AI_ERROR_CODE_,code_)) /*! AI_PTR HANDLERS SECTION ************************************/ #define AI_PTR(ptr_) AI_CAST(ai_ptr, ptr_) #define AI_PTR_CONST(ptr_) AI_CAST(ai_ptr_const, ptr_) /*! STATIC ARRAYS ALLOCATOR SECTION ************************************/ #define AI_PACK_STORAGE_ARRAY(type_, dim_, ...) \ (type_[dim_]) { AI_PACK(__VA_ARGS__) } /*! AI_STORAGE_KLASS SECTION ************************************/ #define AI_STORAGE_KLASS_PACK(type_, dim_, ...) \ AI_PACK_STORAGE_ARRAY(type_, dim_, __VA_ARGS__) #define AI_STORAGE_KLASS_INIT(type_, size_, data_) \ { \ .type = (type_), \ .size = (size_), \ .data = (ai_handle)(data_), \ } /*! * @enum ai_storage_klass_type * @ingroup ai_platform_interface * @brief @ref ai_storage_class types enum */ typedef enum { AI_STORAGE_KLASS_NONE = 0x00, AI_STORAGE_KLASS_SHAPE = 0x01, AI_STORAGE_KLASS_STRIDE = 0x02, } ai_storage_klass_type; /*! * @struct ai_storage_klass * @ingroup ai_platform_interface * @brief Generic "Template" klass for generic storage arrays containers * from this klass several typed containers are derived (see e.g. @ref ai_shape) */ AI_PACKED_STRUCT_START typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_storage_klass_s { ai_u32 type : 8; ai_u32 size : 24; ai_handle data; } ai_storage_klass; AI_PACKED_STRUCT_END /*! AI_SHAPES SECTION ************************************/ #define AI_SHAPE_MAX_DIMENSION (6) #define AI_SHAPE_2D_INIT(w_, h_) \ { .data = { (w_), (h_) } } #define AI_SHAPE_INIT(dim_, ...) \ AI_STORAGE_KLASS_INIT( \ AI_STORAGE_KLASS_SHAPE, \ dim_, \ AI_STORAGE_KLASS_PACK(ai_shape_dimension, dim_, ## __VA_ARGS__)) #define AI_SHAPE_INIT_FROM_BUFFER(dim_, buffer_) \ AI_STORAGE_KLASS_INIT( \ AI_STORAGE_KLASS_SHAPE, \ dim_, \ buffer_) #define AI_SHAPE_ALLOCATE_STATIC(num_dim_) \ AI_SHAPE_INIT((num_dim_), 0) typedef ai_u8 ai_shape_idx; /*! * @struct ai_shape * @ingroup ai_platform_interface * @brief Dimensions for generic 4D tensors */ typedef ai_storage_klass ai_shape; /*! AI_STRIDES HANDLERS SECTION ************************************/ #define AI_STRIDE_INIT(dim_, ...) \ AI_STORAGE_KLASS_INIT( \ AI_STORAGE_KLASS_STRIDE, \ dim_, \ AI_STORAGE_KLASS_PACK(ai_stride_dimension, dim_, ## __VA_ARGS__)) #define AI_STRIDE_INIT_FROM_BUFFER(dim_, buffer_) \ AI_STORAGE_KLASS_INIT( \ AI_STORAGE_KLASS_STRIDE, \ dim_, \ buffer_) #define AI_STRIDE_ALLOCATE_STATIC(num_dims_) \ AI_STRIDE_INIT((num_dims_), 0) /*! * @struct ai_stride * @ingroup ai_platform_interface * @brief Stride dimensions for generic 4D tensors (in number of elements) */ typedef ai_storage_klass ai_stride; /*! BASIC_TYPES HANDLERS SECTION ************************************/ #define AI_SIZE(value_) \ AI_CAST(ai_size, value_) /*! AI_KLASS_OBJ HANDLERS SECTION ************************************/ #define AI_KLASS_OBJ(obj_) \ AI_CAST(ai_klass_obj, obj_) /*! GENERIC HANDLERS SECTION ************************************/ #define AI_OBJ_DATA(obj_, type_) \ AI_CAST(type_, (obj_)->data) /*! AI_BUFFER HANDLERS SECTION ************************************/ #define AI_BUFFER_OBJ(ptr_) \ AI_CAST(ai_buffer*, ptr_) /*! AI_ARRAY HANDLERS SECTION ************************************/ #define AI_ARRAY_OBJ(ptr_) \ AI_CAST(ai_array*, ptr_) #define AI_ARRAY_OBJ_INIT_STATIC(type_, format_, size_, ...) { \ .format = AI_FMT_OBJ(format_), \ .size = (ai_array_size)(size_), \ .data = (ai_ptr)((type_[]){ __VA_ARGS__ }), \ .data_start = AI_PTR(0), \ } #define AI_ARRAY_OBJ_INIT(format_, data_, data_start_, size_) { \ .format = AI_FMT_OBJ(format_), \ .size = AI_CAST(ai_array_size, size_), \ .data = AI_PTR(data_), \ .data_start = AI_PTR(data_start_) } #define AI_ARRAY_OBJ_DECLARE_STATIC(name_, type_, format_, attr_, size_, ...) \ AI_ALIGNED(4) \ attr_ ai_array name_ = AI_ARRAY_OBJ_INIT_STATIC(type_, format_, size_, __VA_ARGS__); #define AI_ARRAY_OBJ_DECLARE(name_, format_, data_, data_start_, size_, attr_) \ AI_ALIGNED(4) \ attr_ ai_array name_ = AI_ARRAY_OBJ_INIT(format_, data_, data_start_, size_); /********************************* ai_array macros ***************************/ #define AI_PACK_ARRAYS(...) \ (ai_array[]) { AI_PACK(__VA_ARGS__) } #define AI_ARRAY_LIST_OBJ_INIT(arrays_ptr_) \ ((ai_array*)(arrays_ptr_)) #define AI_ARRAY_LIST_FLAGS(list_) \ ((list_) ? (list_)->flags : 0x0) #define AI_ARRAY_LIST_SIZE(list_) \ ((list_) ? (list_)->size : 0) #define AI_ARRAY_LIST_DATA(list_, pos_) \ ((list_) ? &((list_)->data[pos_]) : NULL) /********************************* ai_tensor macros **************************/ #define AI_TENSOR_OBJ(obj_) \ AI_CAST(ai_tensor*, obj_) #define AI_TENSOR_INFO_OBJ_INIT(id_, flags_, data_size_) { \ .id = (id_), \ .flags = (flags_), \ .data_size = (data_size_) \ } #define AI_TENSOR_OBJ_INIT(id_, flags_, shape_, stride_, arrays_size_, arrays_ptr_, klass_obj_) { \ .klass = (ai_klass_obj)(klass_obj_), \ .info = AI_TENSOR_INFO_OBJ_INIT(id_, flags_, arrays_size_), \ .shape = shape_, \ .stride = stride_, \ .data = AI_ARRAY_LIST_OBJ_INIT(AI_PACK(arrays_ptr_)), \ } #define AI_TENSOR_OBJ_DECLARE(name_, attr_, id_, flags_, shape_, stride_, \ arrays_size_, arrays_ptr_, klass_obj_) \ AI_ALIGNED(4) \ attr_ ai_tensor name_ = AI_TENSOR_OBJ_INIT(id_, flags_, AI_PACK(shape_), AI_PACK(stride_), \ arrays_size_, AI_PACK(arrays_ptr_), AI_PACK(klass_obj_)); /********************************* TENSOR STATE MACROS ***********************/ #define AI_TENSOR_STATE_OBJ_INIT(end_ptr_ , curr_ptr_, stride_, size_) \ { (end_ptr_), (curr_ptr_), (stride_), (size_) } /********************************* TENSOR LIST MACROS ************************/ #if (AI_TOOLS_API_VERSION <= AI_TOOLS_API_VERSION_1_3) #pragma message ("Including deprecated AI_TENSOR_LIST_ENTRY, AI_TENSOR_LIST_EMPTY, AI_TENSOR_LIST_IO_ENTRY") AI_DEPRECATED #define AI_TENSOR_LIST_EMPTY \ AI_TENSOR_LIST_OBJ_EMPTY AI_DEPRECATED #define AI_TENSOR_LIST_ENTRY(...) \ AI_TENSOR_LIST_OBJ_INIT(AI_FLAG_NONE, AI_NUMARGS(__VA_ARGS__), __VA_ARGS__) AI_DEPRECATED #define AI_TENSOR_LIST_IO_ENTRY(flags_, size_, ...) \ AI_TENSOR_LIST_IO_OBJ_INIT(flags_, size_, __VA_ARGS__) #endif /* AI_TOOLS_API_VERSION_1_3 */ #define AI_TENSOR_LIST_OBJ_INIT(flags_, size_, ...) \ { .size = (size_), .flags = (flags_), \ .tensor = (ai_tensor*[]) { __VA_ARGS__ }, .info = NULL \ } #define AI_TENSOR_LIST_OBJ_EMPTY \ { .size = 0, .flags = AI_FLAG_NONE, \ .tensor = (ai_tensor*[]) { NULL }, .info = NULL \ } #define AI_TENSOR_LIST_OBJ_DECLARE(name_, attr_, flags_, size_, ...) \ AI_ALIGNED(4) \ attr_ ai_tensor_list name_ = AI_TENSOR_LIST_OBJ_INIT( \ flags_, size_, __VA_ARGS__); /********************************* TENSOR LIST I/O MACROS ********************/ #define AI_TENSOR_LIST_IO_OBJ_INIT(flags_, size_, ...) \ { .size = (size_), .flags = (flags_), \ .tensor = (ai_tensor*[]) { __VA_ARGS__ }, \ .info = (ai_tensor_list_info[1]) { { \ .buffer = (ai_buffer[size_]){AI_STRUCT_INIT}, \ .state = (ai_tensor_state[size_]){AI_STRUCT_INIT}, \ .meta = (ai_buffer_meta_info[size_]){AI_STRUCT_INIT} \ } } \ } /********************************* TENSOR CHAIN MACROS ***********************/ #define AI_TENSOR_CHAIN_OBJ_INIT(flags_, size_, ...) \ { .size = (size_), .flags = (flags_), \ .chain = (ai_tensor_list[]){ __VA_ARGS__ } } #define AI_TENSOR_CHAIN_OBJ_DECLARE(name_, attr_, size_, ...) \ AI_ALIGNED(4) \ attr_ ai_tensor_chain name_ = \ AI_TENSOR_CHAIN_OBJ_INIT(AI_FLAG_NONE, size_, __VA_ARGS__); /********************************* TENSOR CHAIN I/O MACROS *******************/ #define AI_TENSOR_CHAIN_IO_OBJ_INIT(flags_, in_tensor_list_, out_tensor_list_) \ { .chain = (ai_tensor_list[]){ in_tensor_list_, out_tensor_list_ }, \ .size = 2, .flags = (flags_) } #define AI_TENSOR_CHAIN_IO_OBJ_DECLARE( \ name_, attr_, flags_, in_tensor_list_, out_tensor_list_) \ AI_ALIGNED(4) \ attr_ ai_tensor_chain_io name_ = \ AI_TENSOR_CHAIN_IO_OBJ_INIT(flags_, in_tensor_list_, out_tensor_list_); /******************************* NETWORK SECTION ****************************/ #define AI_NETWORK_OBJ(obj_) \ ((ai_network*)(obj_)) #if (AI_TOOLS_API_VERSION < AI_TOOLS_API_VERSION_1_5) AI_DEPRECATED #define AI_NETWORK_OBJ_INIT( \ weights_buffer_, activations_buffer_, \ in_tensor_list_ptr_, out_tensor_list_ptr_, \ in_node_ptr_, signature_, klass_obj_) { \ .magic = 0x0, \ .signature = signature_, \ .klass = AI_KLASS_OBJ(klass_obj_), \ .flags = AI_FLAG_NONE, \ .error = AI_ERROR_INIT(NONE, NONE), \ .n_batches = 0, \ .batch_id = 0, \ .buffers = AI_NETWORK_BUFFERS_INIT( \ AI_BUFFER_ARRAY_OBJ_INIT_STATIC(AI_FLAG_NONE, 1, AI_PACK(weights_buffer_)), \ AI_BUFFER_ARRAY_OBJ_INIT_STATIC(AI_FLAG_NONE, 1, AI_PACK(activations_buffer_))), \ .tensors = AI_TENSOR_CHAIN_IO_OBJ_INIT(AI_FLAG_NONE, \ AI_PACK(in_tensor_list_ptr_), \ AI_PACK(out_tensor_list_ptr_)), \ .input_node = AI_NODE_OBJ(in_node_ptr_), \ .current_node = AI_NODE_OBJ(NULL), \ .on_node_exec = NULL, \ .data_exec = NULL, \ .lite_cb = NULL, \ } #else #define AI_NETWORK_OBJ_INIT( \ weights_buffer_, activations_buffer_, \ in_tensor_list_ptr_, out_tensor_list_ptr_, \ in_node_ptr_, signature_, klass_obj_) { \ .magic = 0x0, \ .signature = signature_, \ .klass = AI_KLASS_OBJ(klass_obj_), \ .flags = AI_FLAG_NONE, \ .error = AI_ERROR_INIT(NONE, NONE), \ .n_batches = 0, \ .batch_id = 0, \ .buffers = AI_NETWORK_BUFFERS_INIT(AI_PACK(weights_buffer_), \ AI_PACK(activations_buffer_)), \ .tensors = AI_TENSOR_CHAIN_IO_OBJ_INIT(AI_FLAG_NONE, \ AI_PACK(in_tensor_list_ptr_), \ AI_PACK(out_tensor_list_ptr_)), \ .input_node = AI_NODE_OBJ(in_node_ptr_), \ .current_node = AI_NODE_OBJ(NULL), \ .on_node_exec = NULL, \ .data_exec = NULL, \ .lite_cb = NULL, \ } #endif // AI_TOOLS_API_VERSION #define AI_NETWORK_OBJ_DECLARE( \ name_, attr_, \ weights_buffer_, activations_buffer_, \ in_tensor_list_ptr_, out_tensor_list_ptr_, \ in_node_ptr_, signature_, klass_obj_) \ AI_ALIGNED(4) \ attr_ ai_network name_ = AI_NETWORK_OBJ_INIT( \ AI_PACK(weights_buffer_), \ AI_PACK(activations_buffer_), \ AI_PACK(in_tensor_list_ptr_), \ AI_PACK(out_tensor_list_ptr_), \ (in_node_ptr_), (signature_), (klass_obj_)); #define AI_NETWORK_ACQUIRE_CTX(handle_) \ AI_NETWORK_OBJ(ai_platform_context_acquire(handle_)) /******************************************************************************/ AI_API_DECLARE_BEGIN /*! * @typedef ai_version * @ingroup ai_platform_interface * @brief Packed representation for @ref ai_platform_version */ typedef uint32_t ai_version; /*! * @typedef ai_klass_obj * @ingroup ai_platform_interface * @brief handler to (private) generic subclass derivatives implementation */ typedef void* ai_klass_obj; /*! * @typedef ai_ptr * @ingroup ai_platform_interface * @brief Byte pointer data addressing */ typedef uint8_t* ai_ptr; /*! * @typedef ai_ptr_const * @ingroup ai_platform_interface * @brief Constant byte pointer data addressing */ typedef const uint8_t* ai_ptr_const; /*! * @typedef ai_ptr_offset * @ingroup ai_platform_interface * @brief byte offset for computing strides */ typedef int32_t ai_ptr_offset; /*! * @typedef ai_magic * @ingroup ai_platform_interface * @brief magic field to mark internal datatstructures */ typedef uint32_t ai_magic; /*! * @typedef ai_any_ptr * @ingroup ai_platform_interface * @brief union for defining any pointer */ typedef union { ai_handle handle; ai_ptr ptr; ai_float* float32; ai_double* float64; ai_u8* u8; ai_i8* s8; ai_u16* u16; ai_i16* s16; ai_u32* u32; ai_i32* s32; ai_u64* u64; ai_i64* s64; } ai_any_ptr; #define AI_ANY_PTR_INIT(ptr_) \ { .handle = (ai_handle)(ptr_) } #define AI_CONTEXT_FIELDS \ ai_magic magic; /*!< magic word to mark valid contexts datastructs*/ \ ai_signature signature; /*!< 32bit signature for network consistency checks */ #define AI_CONTEXT_OBJ(obj) ((ai_context*)(obj)) /*! * @typedef ai_context * @ingroup ai_platform_interface * @brief Abstract internal context header exposed to codegen interface */ AI_PACKED_STRUCT_START typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_context_ { AI_CONTEXT_FIELDS } ai_context; AI_PACKED_STRUCT_END /*! * @enum ai_shape_2d_type * @ingroup ai_platform_interface * @brief Codes for the 2D tensor dimensions */ typedef enum { AI_SHAPE_2D_MAX_DIMENSION = 0x2, AI_SHAPE_2D_HEIGHT = 0x1, AI_SHAPE_2D_WIDTH = 0x0, } ai_shape_2d_type; /*! * @struct ai_shape_2d * @ingroup ai_platform_interface * @brief Dimensions for generic 2D tensors */ AI_PACKED_STRUCT_START typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_shape_2d_s { ai_shape_dimension data[AI_SHAPE_2D_MAX_DIMENSION]; /*!< 2D tensor dimensions */ } ai_shape_2d; AI_PACKED_STRUCT_END /*! * @struct ai_array * @ingroup ai_platform_interface * @brief Generic flattened array with size * and (byte) stride of each item */ AI_PACKED_STRUCT_START typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_array_s { ai_array_format format; /*!< array format (see @ref ai_array_format) */ ai_array_size size; /*!< number of elements in the array (NOT number of bytes!). The size of the array could be determine using @ref AI_ARRAY_GET_BYTE_SIZE macro */ ai_ptr data; /*!< pointer to data */ ai_ptr data_start; /*!< pointer to parent's data start address */ } ai_array; AI_PACKED_STRUCT_END /*! * @struct ai_tensor_info * @ingroup ai_platform_interface * @brief ai_tensor_info info structure for storing size of the array list, * tensor dimensionality, etc. * */ AI_PACKED_STRUCT_START typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_info_s { ai_u16 id; ai_u8 flags; ai_u8 data_size; } ai_tensor_info; AI_PACKED_STRUCT_END /*! * @struct ai_tensor * @ingroup ai_platform_interface * @brief Generic tensor structure for storing parameters and activations * * The data is stored in a flattened array with an implicit order given by the * reverse order in @ref ai_shape_dimension: * in_channels, channels, width, height. */ AI_PACKED_STRUCT_START typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_s { ai_klass_obj klass; /*!< opaque pointer to klass context */ ai_tensor_info info; /*!< tensor info metadata see @ref ai_tensor_info)*/ ai_shape shape; /*!< tensor shape see @ref ai_shape */ ai_stride stride; /*!< tensor stride see @ref ai_stride */ ai_array* data; /*!< flattened array pointer to tensor data */ } ai_tensor; AI_PACKED_STRUCT_END /*! * @struct ai_tensor_state * @ingroup ai_platform_interface * @brief state context for tensor management (used for I/O network tensors) */ AI_PACKED_STRUCT_START typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_state_s { ai_ptr end_ptr; /*!< end address of the I/O tensor data buffer */ ai_ptr curr_ptr; /*!< current address of the I/O tensor data buffer (for batching) */ ai_ptr_offset stride; /*!< single batch buffer size (in bytes) */ ai_size size; /*!< total size in bytes of the I/O tensor buffer */ } ai_tensor_state; AI_PACKED_STRUCT_END /*! * @struct ai_tensor_list_info * @ingroup ai_platform_interface * @brief info metadata for tensor list management (used for I/O network tensors) */ AI_PACKED_STRUCT_START typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_list_info_s { ai_tensor_state* state; /*!< I/O buffer internal pointers state */ ai_buffer* buffer; /*!< I/O buffer pointer */ ai_buffer_meta_info* meta; /*!< I/O buffer meta informations */ } ai_tensor_list_info; AI_PACKED_STRUCT_END /********************************* INTEGER QUANTIZATION DATATYPES ************/ #define AI_INTQ_INFO_OBJ_INIT(flags_, scale_ , zeropoint_) { \ .scale = (scale_), \ .zeropoint = (ai_handle)(zeropoint_), \ .flags = (flags_), \ } #define AI_PACK_INTQ_INFO_LIST(...) \ (ai_intq_info_list[]) { AI_PACK(__VA_ARGS__) } #define AI_PACK_INTQ_INFO(scale_, zp_) \ (INTQ_CONST ai_intq_info[1]) { { \ .scale = (INTQ_CONST ai_float*) AI_PACK(scale_), \ .zeropoint = (ai_handle) AI_PACK(zp_) \ } } #define AI_PACK_INTQ_SCALE(...) \ (INTQ_CONST ai_float[]) { AI_PACK(__VA_ARGS__) } #define AI_PACK_INTQ_ZP(...) \ (INTQ_CONST ai_i8[]) { AI_PACK(__VA_ARGS__) } #define AI_PACK_UINTQ_ZP(...) \ (INTQ_CONST ai_u8[]) { AI_PACK(__VA_ARGS__) } #define AI_PACK_INTQ_ZP16(...) \ (INTQ_CONST ai_i16[]) { AI_PACK(__VA_ARGS__) } #define AI_PACK_UINTQ_ZP16(...) \ (INTQ_CONST ai_u16[]) { AI_PACK(__VA_ARGS__) } #define AI_INTQ_INFO_LIST_OBJ_INIT(flags_, size_, info_) \ { \ .flags = (flags_), \ .size = (size_), \ .info = (info_), \ } #define AI_INTQ_INFO_LIST_OBJ_EMPTY { 0 } #define AI_INTQ_INFO_LIST_OBJ_DECLARE(name_, attr_, flags_, size_, info_) \ AI_ALIGNED(4) \ attr_ ai_intq_info_list name_ = \ AI_INTQ_INFO_LIST_OBJ_INIT(flags_, size_, AI_PACK(info_)); #define AI_INTQ_INFO_LIST_OBJ_DECLARE_EMPTY(name_, attr_) \ AI_ALIGNED(4) \ attr_ ai_intq_info_list name_ = AI_INTQ_INFO_LIST_OBJ_EMPTY; /********************************* TENSOR CHAINS DATATYPES *******************/ /*! * @enum ai_tensor_chain_type * @ingroup ai_platform_interface * @brief Enum for the different tensor chains supported in the library */ typedef enum { AI_TENSOR_CHAIN_INPUT = 0x0, AI_TENSOR_CHAIN_OUTPUT = 0x1, AI_TENSOR_CHAIN_WEIGHTS = 0x2, AI_TENSOR_CHAIN_SCRATCH = 0x3, AI_TENSOR_CHAIN_SIZE } ai_tensor_chain_type; /*! * @struct ai_tensor_list * @ingroup ai_platform_interface * @brief list (in form of arrays) of internal nodes tensor pointers */ AI_PACKED_STRUCT_START typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_list_s { ai_u16 size; /*!< number of elements in the the tensor list */ ai_u16 flags; /*!< optional flags to store tensor list attributes */ ai_tensor** tensor; /*!< array of linked tensor pointer */ ai_tensor_list_info* info; /*!< pointer to an array of metainfo associated to the tensors */ } ai_tensor_list; AI_PACKED_STRUCT_END /*! * @struct ai_tensor_chain * @ingroup ai_platform_interface * @brief tensor chain datastruct for internal network nodes */ AI_PACKED_STRUCT_START typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_tensor_chain_s { ai_u16 size; ai_u16 flags; ai_tensor_list* chain; /*!< pointer to a 4 sized array see @ref ai_tensor_chain_type */ } ai_tensor_chain; AI_PACKED_STRUCT_END /************************************** LAYER DATATYPES *******************/ /*! * @struct ai_layer * @ingroup ai_platform_interface * @brief Structure encoding a generic opaque layer in the network * */ typedef void ai_layer; /************************************** OBSERVER DATATYPES *******************/ /* forward function */ struct ai_node_s; /*! * @struct ai_observer_node * @ingroup ai_observer_interface * @brief observer node data struct for internal network nodes */ AI_PACKED_STRUCT_START typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_observer_node_s { ai_u16 c_idx; /*!< node index (position in the execution list) */ ai_u16 type; /*!< node type info @see ai_node datastruct */ ai_u16 id; /*!< node id assigned by codegen tool to identify the model layer*/ ai_u16 unused; /*!< unused field for alignment */ const ai_tensor_chain* inner_tensors; /*!< pointer to the inner tensor if available */ const ai_tensor_chain* tensors; /*!< pointer to a 4 sized array see @ref ai_tensor_chain_type */ } ai_observer_node; AI_PACKED_STRUCT_END #define AI_OBSERVER_NONE_EVT (0) /*!< No event */ #define AI_OBSERVER_INIT_EVT (1 << 0) /*!< called at the end of the init function */ #define AI_OBSERVER_PRE_EVT (1 << 1) /*!< before c-node execution */ #define AI_OBSERVER_POST_EVT (1 << 2) /*!< after c-node execution */ #define AI_OBSERVER_FIRST_EVT (1 << 8) /*!< indicate the first c-node */ #define AI_OBSERVER_LAST_EVT (1 << 9) /*!< indicate the last c-node */ #define AI_OBSERVER_REGISTERED (1 << 24) /*!< internal flag */ #define AI_OBSERVER_MASK_EVT (0xFF) /*!< mask for requested user event */ /* Client callback definition */ typedef ai_u32 (*ai_observer_node_cb)( const ai_handle cookie, const ai_u32 flags, const ai_observer_node *node); AI_PACKED_STRUCT_START typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_observer_exec_ctx_s { ai_observer_node_cb on_node; /*!< registered user observer call-back function */ ai_handle cookie; /*!< reference of the user context */ ai_u32 flags; /*!< flags definition */ ai_u16 c_idx; /*!< store/indicate the index of the current c_node */ ai_u16 n_nodes; /*!< total number of c_node */ struct ai_node_s *cur; /*!< pointer of the current node (pre or post) */ } ai_observer_exec_ctx; AI_PACKED_STRUCT_END typedef enum { AI_NODE_EXEC_INIT = 0x0, AI_NODE_EXEC_START = 0x1, AI_NODE_EXEC_PRE = 0x2, AI_NODE_EXEC_POST = 0x3, } ai_node_exec_state; /* Internal/private definition of node execution callback */ typedef ai_u32 (*ai_node_exec_cb)( const ai_node_exec_state state, struct ai_node_s *cur, const ai_handle ctx); /********************************* NETWORK DATATYPES *************************/ /*! * @struct ai_network * @ingroup layers * @brief Structure encoding a sequential neural network */ AI_PACKED_STRUCT_START typedef AI_ALIGNED_TYPE(struct, 4) AI_PACKED ai_network_s { AI_CONTEXT_FIELDS ai_klass_obj klass; /*!< opaque handler to specific network implementations */ ai_flags flags; /*!< bitflags mask to track some network state info */ ai_error error; /*!< track 1st error code in the network */ ai_u16 n_batches; /*!< number of batches to process */ ai_u16 batch_id; /*!< current batch to to process btw [0, n_batches)*/ // New 6.1 context storing explicitly network buffers. This allow also management of network persistent state now ai_network_buffers buffers; /*!< network buffers datastruct */ ai_tensor_chain tensors; /*!< I/O tensor chain list see @ref ai_tensor_list */ struct ai_node_s* input_node; /*!< first node to execute */ struct ai_node_s* current_node; /*!< current node to execute */ ai_node_exec_cb on_node_exec; /*!< registered call-back function called when a node/operator is scheduled */ ai_handle data_exec; /*!< private reference for the runtime context */ ai_handle lite_cb; /*!< registered opaque call-back handler for lite APIs */ ai_version tool_api_version; /*! Tools Codegen API version */ } ai_network; AI_PACKED_STRUCT_END /*! * @brief Get platform runtime lib revision version as string. * @ingroup ai_platform_interface * @return a string containing the revision of the runtime library */ AI_INTERFACE_TYPE const char* ai_platform_runtime_get_revision(void); /*! * @brief Get platform runtime lib version as datastruct. * @ingroup ai_platform_interface * @return a datastruct containing the version of the runtime library */ AI_INTERFACE_TYPE ai_platform_version ai_platform_runtime_get_version(void); /*! * @brief Get platform public APIs version as datastruct. * @ingroup ai_platform_interface * @return a datastruct containing the version of the public APIs */ AI_INTERFACE_TYPE ai_platform_version ai_platform_api_get_version(void); /*! * @brief Get platform interface private APIs version as datastruct. * @ingroup ai_platform_interface * @return a datastruct containing the version of the interface private APIs */ AI_INTERFACE_TYPE ai_platform_version ai_platform_interface_api_get_version(void); /**************************************************************************** ** Context APIs ****************************************************************************/ /*! * @brief Get platform context. * @ingroup ai_platform_interface * @return a valid context handle or NULL otherwise */ AI_INTERFACE_TYPE ai_context* ai_platform_context_acquire(const ai_handle handle); /*! * @brief Release platform context. * @ingroup ai_platform_interface * @return an opaque handle to the released object */ AI_INTERFACE_TYPE ai_handle ai_platform_context_release(ai_context* ctx); /**************************************************************************** ** Platform Network Params APIs ****************************************************************************/ /*! * @brief get the weights map from user provided network params info * @ingroup ai_platform_interface * @param params a pointer to ai_network_params struct * @param map table pointer to the table map to initialize * @param map_size the number of entries of the table to initialize * @return true if initialization succeeded, false otherwise */ AI_INTERFACE_TYPE ai_bool ai_platform_get_weights_map( ai_ptr* map, const ai_size map_size, const ai_network_params* params); /*! * @brief get the activations map from user provided network params info * @ingroup ai_platform_interface * @param params a pointer to ai_network_params struct * @param map table pointer to the table map to initialize * @param map_size the number of entries of the table to initialize * @return true if initialization succeeded, false otherwise */ AI_INTERFACE_TYPE ai_bool ai_platform_get_activations_map( ai_ptr* map, const ai_size map_size, const ai_network_params* params); /*! * @brief bind code generated weights and activations map arrays to ai_netwoek_params * @ingroup ai_platform_interface * @param[out] params the network params struct reporting binded params * @param[in] map_weights pointer to the codegened weights map array to be bound * @param[in] map_activations pointer to the codegened activation map array to be bound * @return true if network parameters binding succeed, false otherwise */ AI_INTERFACE_TYPE ai_bool ai_platform_bind_network_params( ai_network_params* params, const ai_buffer_array* map_weights, const ai_buffer_array* map_activations); /**************************************************************************** ** Platform Network APIs ****************************************************************************/ /*! * @brief get **first** error tracked when using the network * @ingroup ai_platform_interface * @param network an opaque handler to the network context * @return ai_error the FIRST error generated during network processing */ AI_INTERFACE_TYPE ai_error ai_platform_network_get_error(ai_handle network); /*! * @brief Set specific error code of the network. if an error is already present * keep it * @ingroup ai_platform_interface * @param net_ctx a pointer to the network context * @param type error type as defined in @ref ai_error_type * @param code error code as defined in @ref ai_error_code * @return true if no previous errors where recorded, false if a previous error * is present or context is invalid */ AI_INTERFACE_TYPE ai_bool ai_platform_network_set_error( ai_network* net_ctx, const ai_error_type type, const ai_error_code code); /*! * @brief Finalize network report datastruct with I/O buffer infos * @ingroup ai_platform_interface * @return bool if the report has been finalized correctly. false otherwise */ AI_INTERFACE_TYPE ai_bool ai_platform_api_get_network_report( ai_handle network, ai_network_report* r); /*! * @brief Get network inputs array pointer as a ai_buffer array pointer. * @ingroup network * @param network an opaque handler to the network context * @param n_buffer optional parameter to return the number of inputs * @return a ai_buffer pointer to the inputs arrays */ AI_INTERFACE_TYPE ai_buffer* ai_platform_inputs_get(ai_handle network, ai_u16 *n_buffer); /*! * @brief Get network outputs array pointer as a ai_buffer array pointer. * @ingroup network * @param network an opaque handler to the network context * @param n_buffer optional parameter to return the number of outputs * @return a ai_buffer pointer to the inputs arrays */ AI_INTERFACE_TYPE ai_buffer* ai_platform_outputs_get(ai_handle network, ai_u16 *n_buffer); /*! * @brief create a network context with some error check * @ingroup ai_platform_interface * @param a pointer to an opaque handle of the network context * @param an (optional) pointer to the network config buffer info * @param net_ctx a pointer to the network context structure to initialize * @param tool_major major version id of the tool used to generate the network * @param tool_minor minor version id of the tool used to generate the network * @param tool_micro micro version id of the tool used to generate the network * @return the error during network creation or error none if ok */ AI_INTERFACE_TYPE ai_error ai_platform_network_create( ai_handle* network, const ai_buffer* network_config, ai_network* net_ctx, const ai_u8 tool_major, const ai_u8 tool_minor, const ai_u8 tool_micro); /*! * @brief destroy a network context * @ingroup ai_platform_interface * @param network a pointer to an opaque handle of the network context * @return AI_HANDLE_NULL if deallocation OK, same network handle if failed */ AI_INTERFACE_TYPE ai_handle ai_platform_network_destroy(ai_handle network); /*! * @brief initialize the network context * @ingroup ai_platform_interface * @param network a pointer to an opaque handle of the network context * @return a valid network context, NULL if initialization failed */ AI_INTERFACE_TYPE ai_network* ai_platform_network_init( ai_handle network, const ai_network_params* params); /*! * @brief post-initialize of the network context. * @ingroup ai_platform_interface * @param network a pointer to an opaque handle of the network context * @return a valid network context, NULL if initialization failed */ AI_INTERFACE_TYPE ai_bool ai_platform_network_post_init(ai_handle network); /*! * @brief main platform runtime execute of a network * @ingroup ai_platform_interface * @param network an opaque handler to the network context * @param input a pointer to the input buffer data to process * @param output a pointer to the output buffer * @return the number of batches processed from the input. A result <=0 in case * of error */ AI_INTERFACE_TYPE ai_i32 ai_platform_network_process( ai_handle network, const ai_buffer* input, ai_buffer* output); /**************************************************************************** ** Observer APIs ****************************************************************************/ /*! * @brief Return the info of a requested c-node (defined by the * c_idx field). Should be called after the initialization phase. * @ingroup ai_platform_observer * @param network a pointer to an opaque handle of the network context * @param node_info a pointer to a reference of the node description * @return true if the node_info->c_idx designates a valid index else * false (network error is updated). */ AI_INTERFACE_TYPE ai_bool ai_platform_observer_node_info( ai_handle network, ai_observer_node *node_info); /*! * @brief Register an observer context. Allows to register a client CB which * will be called before or/and after the execution of a c-node with * the references of the used tensors (see @ref ai_observer_node). * @ingroup ai_platform_observer * @param network a pointer to an opaque handle of the network context * @param cb reference of the user callback function * @param cookie reference of a user object/ctx * @param flags indicate expected events (see AI_OBSERVER_XX_EVT flag definition) * @return false if the registration has failed (network error is updated) else true * of error. */ AI_INTERFACE_TYPE ai_bool ai_platform_observer_register( ai_handle network, ai_observer_node_cb cb, ai_handle cookie, ai_u32 flags); AI_INTERFACE_TYPE ai_bool ai_platform_observer_register_s(ai_handle network, ai_observer_exec_ctx *ctx); /*! * @brief un-register the observer context. * @ingroup ai_platform_observer * @param network a pointer to an opaque handle of the network context * @param ctx a pointer to a reference of the registered platform observer context * @param cb reference of the registered user callback function * @param cookie reference of the registered user object/ctx * @return false if the un-registration has failed (network error is updated) else true * of error. */ AI_INTERFACE_TYPE ai_bool ai_platform_observer_unregister(ai_handle network, ai_observer_node_cb cb, ai_handle cookie); AI_INTERFACE_TYPE ai_bool ai_platform_observer_unregister_s(ai_handle network, ai_observer_exec_ctx *ctx); AI_API_DECLARE_END #endif /*AI_PLATFORM_INTERFACE_H*/
35,318
C
33.091699
115
0.615012
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/core_common.h
/** ****************************************************************************** * @file core_common.h * @author AST Embedded Analytics Research Platform * @brief header file of common core datatypes ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef CORE_COMMON_H #define CORE_COMMON_H #pragma once #include "ai_platform.h" #include "ai_platform_interface.h" #include "core_datatypes.h" // #include "core_log.h" /*! * @defgroup core_common Common Core Library Routines * @brief Common macros, datatypes and routines of core common module * @details This module contains the definitons and handling of the @ref ai_node * datastructures. An ai_node is a generic abstraction for a network node that * could be either a fixed function layer or an operator. Ideally the platform * interface defined in api module should handle an process generic nodes in the * network, not relying on the fact that they are layers or operators datastructs * Specific implementative details should be kept inside layers and operators * modules. The core module implements additionally common routines used in the * layers and operators modules. */ /******************************************************************************/ #ifdef HAS_AI_ASSERT #define ASSERT_ARRAY_SANITY(a_) \ AI_ASSERT((a_) && (a_)->size>0) #define ASSERT_ARRAY_DATA_SANITY(a_) \ ASSERT_ARRAY_SANITY(a_) \ AI_ASSERT((a_)->data && (a_)->data_start) #define ASSERT_TENSOR_SANITY(t_) \ AI_ASSERT((t_) && (t_)->data) \ AI_ASSERT(CORE_TENSOR_GET_SHAPE_SIZE(t_)>0) \ ASSERT_ARRAY_SANITY((t_)->data) #define ASSERT_TENSOR_LIST_SANITY(tlist_) \ AI_ASSERT((tlist_) && (GET_TENSOR_LIST_SIZE(tlist_)>0)) \ #define ASSERT_TENSOR_DATA_SANITY(t_) \ ASSERT_TENSOR_SANITY(t_) \ ASSERT_ARRAY_DATA_SANITY((t_)->data) #define ASSERT_NODE_SANITY(node_) \ do { \ AI_ASSERT(AI_NODE_OBJ(node_)->tensors && AI_NODE_OBJ(node_)->tensors->chain) \ ASSERT_TENSOR_SANITY(GET_TENSOR_IN(AI_NODE_OBJ(node_)->tensors, 0)) \ ASSERT_TENSOR_SANITY(GET_TENSOR_OUT(AI_NODE_OBJ(node_)->tensors, 0)) \ } while (0); #else #define ASSERT_ARRAY_SANITY(a_) /* ASSERT_ARRAY_SANITY */ #define ASSERT_ARRAY_DATA_SANITY(a_) /* ASSERT_ARRAY_DATA_SANITY */ #define ASSERT_TENSOR_SANITY(t_) /* ASSERT_TENSOR_SANITY */ #define ASSERT_TENSOR_LIST_SANITY(tlist_) /* ASSERT_TENSOR_LIST_SANITY */ #define ASSERT_TENSOR_DATA_SANITY(t_) /* ASSERT_TENSOR_DATA_SANITY */ #define ASSERT_NODE_SANITY(node_) /* ASSERT_NODE_SANITY */ #endif /*HAS_AI_ASSERT*/ #if defined(__GNUC__) || defined(__clang__) /* Suppress unused function warnings */ #define AI_UNUSED_FUNCTION __attribute__((unused)) /* Manage false positives in address sanitizer */ #define AI_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) #else #define AI_UNUSED_FUNCTION /* AI_UNUSED_FUNCTION */ #define AI_NO_SANITIZE_ADDRESS /* AI_NO_SANITIZE_ADDRESS */ #endif /******************************************************************************/ #define AI_NODE_TYPE(type_) \ ((ai_node_type)((ai_u32)(type_)&0xFFFF)) #define AI_NODE_OBJ(obj_) \ ((ai_node*)(obj_)) #define AI_NODE_FUNC(func_) \ ((node_func)(func_)) #define AI_NODE_COMMON_FIELDS_DECLARE \ ai_node_type type; /*!< node type id (see @ref ai_node_type) */ \ ai_id_obj id; /*!< node object instance id (see @ref ai_id_obj) */ \ ai_flags flags; /*!< node object flags */ \ ai_klass_obj klass; /*!< opaque handler to specific layer implementations */ \ struct ai_network_s* network; /*!< handle to global network context */ \ struct ai_node_s* next; /*!< the next node object in the sequence */ \ node_func forward; /*!< forward function for the node */ \ AI_CONST ai_tensor_chain* tensors; /*!< pointer to node tensor chain */ #define AI_NODE_STATEFUL_FIELDS_DECLARE \ AI_NODE_COMMON_FIELDS_DECLARE \ ai_handle state; \ node_func init; \ node_func update; \ node_func destroy; #define AI_NODE_COMMON_INIT(type_, id_, flags_, klass_, network_, next_, forward_) \ .type = AI_NODE_TYPE(type_), \ .id = AI_ID_OBJ(id_), \ .flags = (flags_), \ .klass = AI_KLASS_OBJ(klass_), \ .network = AI_NETWORK_OBJ(network_), \ .next = AI_NODE_OBJ(next_), \ .forward = AI_NODE_FUNC(forward_) /*****************************************************************************/ /** Network Tensors Chains / Lists Handlers **/ /*****************************************************************************/ #define AI_FOR_EACH_TENSOR_CHAIN_DO(tlist_ptr_, chain_) \ ai_tensor_list* tlist_ptr_ = (chain_)->chain; \ for (; tlist_ptr_<(((chain_)->chain)+((chain_)->size)); tlist_ptr_++) #define AI_FOR_EACH_TENSOR_LIST_DO(idx_, t_ptr_, tlist_ptr_) \ ai_tensor* t_ptr_ = NULL; \ for (ai_size idx_ = 0; (idx_ < GET_TENSOR_LIST_SIZE(tlist_ptr_)) && \ ((t_ptr_ = GET_TENSOR_LIST_ITEM(tlist_ptr_, idx_)) != NULL); ++idx_) #define GET_TENSOR_LIST_INFO(list_) \ ((list_)->info) #define GET_TENSOR_LIST_META(list_, pos_) \ (&(GET_TENSOR_LIST_INFO(list_)->meta[pos_])) #define GET_TENSOR_LIST_STATE(list_, pos_) \ (&(GET_TENSOR_LIST_INFO(list_)->state[pos_])) #define GET_TENSOR_LIST_BUFFER(list_, pos_) \ (&(GET_TENSOR_LIST_INFO(list_)->buffer[pos_])) #define GET_TENSOR_LIST_ITEM(list_, pos_) \ ((NULL!=GET_TENSOR_LIST_ITEMS(list_)) \ ? GET_TENSOR_LIST_ITEMS(list_)[(pos_)] : NULL) #define GET_TENSOR_LIST_ITEMS(list_) \ ((list_)->tensor) #define GET_TENSOR_LIST_SIZE(list_) \ ((NULL!=(list_)) ? (list_)->size : 0) #define GET_TENSOR_CHAIN_SIZE(chain_) \ ((NULL!=(chain_)) ? (chain_)->size : 0) #define GET_TENSOR_LIST(chain_, type_) \ ((AI_CONCAT(AI_TENSOR_CHAIN_, type_)<(chain_)->size) \ ? &(chain_)->chain[AI_CONCAT(AI_TENSOR_CHAIN_, type_)] : NULL) #define GET_TENSOR_LIST_IN(chain_) \ (GET_TENSOR_LIST(chain_, INPUT)) #define GET_TENSOR_LIST_OUT(chain_) \ (GET_TENSOR_LIST(chain_, OUTPUT)) #define GET_TENSOR_LIST_WEIGTHS(chain_) \ (GET_TENSOR_LIST(chain_, WEIGHTS)) #define GET_TENSOR_LIST_SCRATCH(chain_) \ (GET_TENSOR_LIST(chain_, SCRATCH)) #define GET_TENSOR_IN(chain_, pos_) \ (GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_IN(chain_), (pos_))) #define GET_TENSOR_OUT(chain_, pos_) \ (GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_OUT(chain_), (pos_))) #define GET_TENSOR_WEIGHTS(chain_, pos_) \ (GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_WEIGTHS(chain_), (pos_))) #define GET_TENSOR_SCRATCH(chain_, pos_) \ (GET_TENSOR_LIST_ITEM(GET_TENSOR_LIST_SCRATCH(chain_), (pos_))) /******************************************************************************/ #if 1 #define SECTION_SERIAL(expr) expr #define SECTION_PARALLEL(expr) #else #define SECTION_SERIAL(expr) #define SECTION_PARALLEL(expr) expr #endif AI_API_DECLARE_BEGIN /*! * @struct ai_node_type * @ingroup core_common * @brief generic network node numeric type ID * */ typedef uint16_t ai_node_type; /*! * @typedef void (*node_func)(struct ai_node_s* node) * @ingroup core_common * @brief Callback signatures for all forward functions */ typedef void (*node_func)(struct ai_node_s* node); /*! * @typedef ai_float (*func_nl_el)(const ai_float x) * @ingroup core_common * @brief Fuction pointer for generic elementwise transforms * * This function pointer abstracts a generic nonlinear function applied to a * single element. See @ref ai_math_sqrt in @ref math_helpers as examples. */ typedef ai_float (*func_nl_el)(const ai_float x); /*! * @struct ai_node * @ingroup core_common * @brief Structure encoding a generic node of the network * * The node struct includes information about the network it belong to, the * next node in a sequential network and the forward function. The forward * functions are implemented in the @ref layers module. */ typedef AI_ALIGNED_TYPE(struct, 4) ai_node_s { AI_NODE_COMMON_FIELDS_DECLARE } ai_node; /*! * @struct ai_node_stateful * @ingroup core_common * @brief Structure encoding a stateful node of the network * * The node struct includes information about the network it belong to, the * next node in a sequential network and the init, update and forward functions. * The node functions are implemented in the @ref layers module. */ typedef AI_ALIGNED_TYPE(struct, 4) ai_node_stateful_s { AI_NODE_STATEFUL_FIELDS_DECLARE } ai_node_stateful; /*! * @brief initialize core module * @ingroup core_common * @return false if initialization fails, false otherwise */ AI_INTERNAL_API ai_bool core_init(void); /*! * @brief get 1st error raised during processing * @ingroup core_common * @param[out] error the @ref ai_error recorded during processing * @return the 1st error generated during processing. If no errors AI_ERROR_NONE */ AI_INTERNAL_API ai_error core_get_error(ai_error* error); /*! * @brief set error recorded during processing * @ingroup core_common * @param[out] error the @ref ai_error to set * @param[in] type the specific error type to set * @param[in] code the specific error code to set * @return true if the error is set, false in case a precedent error was already */ AI_INTERNAL_API ai_bool core_set_error( ai_error* error, const ai_error_type type, const ai_error_code code); AI_API_DECLARE_END #endif /*CORE_COMMON_H*/
9,995
C
33.588235
92
0.615408
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers.h
/** ****************************************************************************** * @file layers.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform layers datatypes ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_H #define LAYERS_H #pragma once #include "layers_common.h" #include "layers_conv2d.h" #include "layers_custom.h" #include "layers_dense.h" #include "layers_formats_converters.h" #include "layers_generic.h" #include "layers_lite_graph.h" #include "layers_nl.h" #include "layers_norm.h" #include "layers_pad_dqnn.h" #include "layers_pad_generic.h" #include "layers_pool.h" #include "layers_rnn.h" #include "layers_upsample_generic.h" #include "layers_sm.h" #include "layers_ml.h" #include "layers_ml_iforest.h" #include "layers_ml_svc.h" #include "layers_ml.h" #include "layers_ml_linearclassifier.h" #include "layers_ml_treeensembleclassifier.h" #include "layers_ml_treeensembleregressor.h" #include "layers_ml_svmregressor.h" #include "layers_conv2d_dqnn.h" #include "layers_dense_dqnn.h" #include "layers_pool_dqnn.h" #include "layers_generic_dqnn.h" #include "layers_upsample_generic.h" // #include "layers_template.h" AI_API_DECLARE_BEGIN /*! * @struct ai_any_layer_ptr * @ingroup layers * @brief Generic union for typed layers pointers */ typedef struct { ai_layer_type type; /*!< layer type id (see @ref ai_layer_type) */ union { #define LAYER_ENTRY(type_, id_, struct_, forward_func_, init_func_, destroy_func_) \ AI_CONCAT(ai_layer_, struct_)* struct_; #include "layers_list.h" }; } ai_any_layer_ptr; AI_API_DECLARE_END #endif /*LAYERS_H*/
2,190
C
27.089743
84
0.603653
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/core_net_inspect_interface.h
/** ****************************************************************************** * @file core_net_inspect_interface.h * @author AST Embedded Analytics Research Platform * @brief header file of core network inspection interface APIs ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef __CORE_NET_INSPECT_INTERFACE_H_ #define __CORE_NET_INSPECT_INTERFACE_H_ #pragma once #include "ai_platform.h" AI_API_DECLARE_BEGIN /*! * @defgroup core_validation Validation Core * @brief Implementation of the validation network interface headers */ /*! * @struct ai_inspect_node_info * @brief network node inspection context: there is one of this datastruct * for each node of the network */ typedef struct ai_inspect_node_info_s { ai_u16 type; /*!< node type info @see ai_node datastruct */ ai_u16 id; /*!< node id assigned by codegen tool to identify the specific node instance */ ai_u16 batch_id; /*!< current node batch processed */ ai_u16 n_batches; /*!< total number of node batches to process */ ai_float elapsed_ms; /*!< node performance analysys: time in milliseconds to execute the node forward function */ ai_u16 in_size; /*!< number of node's input activation buffers */ ai_u16 out_size; /*!< number of node's output activation buffers */ ai_buffer* in; /*!< input node activation buffer see @ref ai_buffer */ ai_buffer* out; /*!< output node activation buffer see @ref ai_buffer */ } ai_inspect_node_info; /*! * @struct ai_inspect_net_report * @brief network inspection report context */ typedef struct ai_inspect_net_report_s { ai_u32 id; /*!< id of the report */ ai_signature signature; /*!< network identification checksum */ ai_u32 num_inferences; /*!< total number of inferences processed during the inspection */ ai_u32 n_nodes; /*!< number of nodes in the network */ ai_float elapsed_ms; /*!< network total time (in ms) for processing num_inferences inferences */ ai_inspect_node_info* node; /*!< pointer to the array of size n_nodes where a single node report is reported. see @ref ai_inspect_node_info datastruct */ } ai_inspect_net_report; /*! * @enum net inspector inspection mode * @brief configuration flags to set net inspection mode */ typedef enum { VALIDATION_INSPECT = (0x1<<0), /**< Network validation inspection mode */ STORE_ALL_IO_ACTIVATIONS = (0x1<<7), /**< Store all I/O activations on snapshot datastruct */ } ai_inspect_mode; typedef enum { AI_NODE_EXEC_PRE_FORWARD_STAGE = 0x0, AI_NODE_EXEC_POST_FORWARD_STAGE = 0x1, } ai_node_exec_stage; /*! * @brief function pointer to callback report */ typedef void (*ai_inspect_report_cb_func)( const ai_handle cookie, const ai_inspect_net_report* report); /*! * @brief function pointer to node execute */ typedef void (*ai_inspect_exec_node_cb_func)( const ai_handle cookie, const ai_inspect_node_info* node_info, const ai_node_exec_stage stage); /*! * @struct ai_inspect_config * @brief inspection config datastruct */ typedef struct ai_inspect_config_s { ai_u8 validation_mode; /*!< validation mode flags see @ref ai_inspect_mode */ ai_u8 log_level; /*!< log class level see @ref LOG_SUDO */ ai_bool log_quiet; /*!< log class quiet mode */ ai_inspect_report_cb_func on_report_destroy; /*!< callback function called when a report datastruct is released from memory */ ai_inspect_exec_node_cb_func on_exec_node; /*!< callback function called when a node is executed (pre & post) */ ai_handle cookie; } ai_inspect_config; AI_API_DECLARE_END #endif /*__CORE_NET_INSPECT_INTERFACE_H_*/
4,734
C
37.495935
98
0.562315
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_rnn.h
/** ****************************************************************************** * @file layers_rnn.h * @author AST Embedded Analytics Research Platform * @brief header file of RNN layers ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_RNN_H #define LAYERS_RNN_H #pragma once #include "layers_common.h" #include "layers_nl.h" AI_API_DECLARE_BEGIN /*! * @struct ai_layer_lstm * @ingroup layers * @brief LSTM layer with generic nonlinearities and peephole connections */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_lstm_ { AI_LAYER_STATEFUL_FIELDS_DECLARE ai_size n_units; /**< size of the hidden RNN state */ func_nl activation_nl; /**< activation nonlinearity (input to cell) */ func_nl recurrent_nl; /**< recurrent nonlinearity (hidden to cell) */ func_nl out_nl; /**< output nonlinearity (cell to hidden) */ ai_bool go_backwards; /**< process reversed input */ ai_bool return_state; /**< return state */ ai_bool reverse_seq; /**< reverse output sequence */ ai_float cell_clip; /**< cell clip value */ } ai_layer_lstm; /*! * @struct ai_layer_gru * @ingroup layers * @brief Gated Recurrent Unit (GRU) layer with generic nonlinearities */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_gru_ { AI_LAYER_STATEFUL_FIELDS_DECLARE ai_size n_units; /**< size of the hidden RNN state */ func_nl activation_nl; /**< activation nonlinearity (input to cell) */ func_nl recurrent_nl; /**< recurrent nonlinearity (hidden to cell) */ ai_bool reset_after; ai_bool return_state; ai_bool go_backwards; /**< process reversed input */ ai_bool reverse_seq; /**< reverse output sequence */ } ai_layer_gru; /*! * @struct ai_layer_rnn * @ingroup layers * @brief Simple Recurrent Neural Network (RNN) layer */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_rnn_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_size n_units; /**< size of the hidden RNN state */ func_nl activation_nl; /**< activation nonlinearity (input to hidden) */ ai_bool go_backwards; /**< process reversed input */ ai_bool reverse_seq; /**< reverse output sequence */ ai_bool return_state; } ai_layer_rnn; /*! * @brief Initialize a Long-Short Term Memory (LSTM) layer. * @ingroup layers * * Function used to initialize lstm internal state */ AI_INTERNAL_API void init_lstm(ai_layer * layer); /*! * @brief Destroy a Long-Short Term Memory (LSTM) layer state. * @ingroup layers * * Function used to destroy lstm internal state */ AI_INTERNAL_API void destroy_lstm(ai_layer * layer); /*! * @brief Computes the activations of a Long-Short Term Memory (LSTM) layer. * @ingroup layers * * Implements a Long-Short Term Layer with peephole connections: * \f{eqnarray*}{ * i_t &=& \sigma_a(x_t W_{xi} + h_{t-1} W_{hi} * + w_{ci} \odot c_{t-1} + b_i)\\ * f_t &=& \sigma_a(x_t W_{xf} + h_{t-1} W_{hf} * + w_{cf} \odot c_{t-1} + b_f)\\ * c_t &=& f_t \odot c_{t - 1} * + i_t \odot \sigma_r(x_t W_{xc} + h_{t-1} W_{hc} + b_c)\\ * o_t &=& \sigma_a(x_t W_{xo} + h_{t-1} W_{ho} + w_{co} \odot c_t + b_o)\\ * h_t &=& o_t \odot \sigma_o(c_t) * \f} * where \f$\sigma_a\f$ is the activation nonlinearity, \f$\sigma_r\f$ is the * recurrent nonlinearity and \f$\sigma_o\f$ is the out nonlinearity. The * \f$W_x\f$, \f$W_h\f$ and \f$W_c\f$ weights are sliced from the kernel, * recurrent and peephole weights. * * @param layer the LSTM layer */ AI_INTERNAL_API void forward_lstm(ai_layer * layer); /*! * @brief Initialize a Gated Recurrent Unit (GRU) layer. * @ingroup layers * * Function used to initialize gru internal state */ AI_INTERNAL_API void init_gru(ai_layer * layer); /*! * @brief Destroy a Gated Recurrent Unit (GRU) layer state. * @ingroup layers * * Function used to destroy gru internal state */ AI_INTERNAL_API void destroy_gru(ai_layer * layer); /*! * @brief Computes the activations of a Gated Recurrent Unit (GRU) layer. * @ingroup layers * * Implements a Gated Recurrent Unit with the formula: * \f{eqnarray*}{ * r_t &=& \sigma_a(x_t W_{xr} + h_{t - 1} W_{hr} + b_r) \\ * z_t &=& \sigma_a(x_t W_{xz} + h_{t - 1} W_{hz} + b_z) \\ * c_t &=& \sigma_r(x_t W_{xc} + r_t \odot (h_{t - 1} W_{hc} + b_{hc}) + b_c) * \qquad \textnormal{when reset after is true} \\ * c_t &=& \sigma_r(x_t W_{xc} + (r_t \odot h_{t - 1}) W_{hc} + b_{hc} + b_c) * \qquad \textnormal{when reset after is false (default)} \\ * h_t &=& (1 - z_t) \odot h_{t - 1} + z_t \odot c_t * \f} * where \f$\sigma_a\f$ is the activation nonlinearity and \f$\sigma_r\f$ is * the recurrent nonlinearity. The weights are sliced from the kernel and * recurrent weights. * * @param layer the GRU layer */ AI_INTERNAL_API void forward_gru(ai_layer * layer); /*! * @brief Computes the activations of a Recurrent Neural Network (RNN) layer. * @ingroup layers * * Implements a recurrent layer with the formula: * \f{eqnarray*}{ * h_t &=& \sigma_a(x_t W_{xr} + h_{t - 1} W_{hr} + b_r) * \f} * where \f$\sigma_a\f$ is the activation nonlinearity. The weights are sliced * from the kernel and recurrent weights. * * @param layer the RNN layer */ AI_INTERNAL_API void forward_rnn(ai_layer * layer); AI_API_DECLARE_END #endif /* LAYERS_RNN_H */
5,866
C
30.713513
80
0.596147
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_lite_graph.h
/** ****************************************************************************** * @file layers_lite_graph.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform lite graph layers wrapper interface ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_LITE_GRAPH_H #define LAYERS_LITE_GRAPH_H #pragma once #include "core_common.h" /*! * @defgroup layers_lite_graph Lite Graph Wrapper Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_lite_graph * @ingroup layers_lite_graph * @brief Generic Lite Graph Layer Wrapper * * The type of lite graph is handled by the specific forward lite graph function. */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_lite_graph_ { AI_NODE_COMMON_FIELDS_DECLARE ai_handle* activations_map; /*!< array of pointers to shared activations memory pools */ ai_handle* weights_map; /*!< array of pointers to shared weights memory pools */ } ai_layer_lite_graph; AI_API_DECLARE_END #endif /*LAYERS_LITE_GRAPH_H*/
1,598
C
29.169811
98
0.558824
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/core_datatypes.h
/** ****************************************************************************** * @file core_datatypes.h * @author AST Embedded Analytics Research Platform * @brief header file of core module private defines and datatypes * to public nor codegen tool ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_CORE_DATATYPES_H #define AI_CORE_DATATYPES_H #pragma once #include <stdint.h> /*! * @defgroup Core Module Datatypes * @brief Data structures and defines used by core module */ /*! * @brief platform runtime core library version */ #define AI_PLATFORM_RUNTIME_MAJOR 8 #define AI_PLATFORM_RUNTIME_MINOR 1 #define AI_PLATFORM_RUNTIME_MICRO 0 #define AI_PLATFORM_RUNTIME_BUILD A1-SNAPSHOT #define AI_MAGIC_CONTEXT_TOKEN (0xA1C00100) /*!< AI Cool! Magic Token */ #define AI_MAGIC_INSPECTOR_TOKEN (0xA1C00101) /*!< AI Cool! Magic Token */ #define AI_ID_OBJ(id) \ ((ai_id_obj)(id)) #define AI_C_ARRAY_COUNT(array_) \ ( sizeof(array_) / sizeof((array_)[0]) ) #define AI_C_ARRAY_BYTE_SIZE(array_) \ ( sizeof(array_) ) /*! * @typedef ai_id_obj * @ingroup core_datatypes * @brief numeric identifier for generic object instances (e.g. layers, * operators, etc.) It is used by codegen tool to keep tracks of specific * instances created */ typedef uint16_t ai_id_obj; #endif /*AI_CORE_DATATYPES_H*/
1,901
C
27.818181
80
0.570752
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_ml_svmregressor.h
/** ****************************************************************************** * @file layers_svmregressor.h * @author AIS * @brief header file of AI platform SVM Regressor datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_SVMREGRESSOR_H #define LAYERS_SVMREGRESSOR_H #pragma once #include "layers_common.h" /*! * @defgroup layers_svmreg Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /* SVM regressor kernel types */ typedef enum ai_svm_kernel_e_ { AI_SVMREG_KERNEL_LINEAR = 0, AI_SVMREG_KERNEL_POLYNOMIAL, AI_SVMREG_KERNEL_RBF, AI_SVMREG_KERNEL_SIGMOID, AI_SVMREG_KERNEL_UNSUPPORTED, } ai_svm_kernel_e; /*! * @struct ai_layer_svmreg * @ingroup layers_svmreg * @brief SVM Regressor layer * * The type of svmreg function is handled by the specific forward function * @ref forward_svm_regressor */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_svmreg_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_float intercept; /*!< constant used in the decision function */ ai_float gamma; /*!< kernel coefficient for rbf, polynomial and sigmoid functions */ ai_float coef0; /*!< term in polynomial and sigmoid functions */ ai_u32 degree; /*!< polynomial function degree */ ai_svm_kernel_e kernel_type; /*!< kernel type : see ai_svm_kernel_e */ } ai_layer_svmreg; /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Decodes the SVM Regressor ML operator. * @ingroup layers_svmreg * @param layer svm regressor layer */ AI_INTERNAL_API void forward_svm_regressor(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_SVMREGRESSOR_H*/
2,397
C
27.891566
96
0.53567
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_bn_integer.h
#ifndef LITE_BN_INTEGER_H #define LITE_BN_INTEGER_H #pragma once #include "ai_lite_interface.h" /** * @brief Batch Normalization with 16-bit input, 16-bit threshold and binary output. * It is implemented using a threshold, and this is possible because the output is binary. * * @param[in] pIn Input data pointer * @param[out] pOut_32 Output data pointer * @param[in] pThreshold Thresholds pointer (one per channel) * @param[in] dim_x X dimension * @param[in] dim_y Y dimension * @param[in] channels_num Channels number */ LITE_API_ENTRY void forward_lite_bn_is16os1ws16(const ai_i16 *pIn, ai_u32 *pOut_32, const ai_i16 *pThreshold, const ai_i16 dim_x, const ai_i16 dim_y, const ai_i16 channels_num); #endif /* LITE_BN_INTEGER_H */
913
C
34.153845
90
0.591457
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/ai_datatypes_internal.h
/** ****************************************************************************** * @file ai_datatypes_internal.h * @author AST Embedded Analytics Research Platform * @brief Definitions of AI platform private APIs types ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_DATATYPES_INTERNAL_H #define AI_DATATYPES_INTERNAL_H #pragma once #include "ai_datatypes.h" #include "ai_datatypes_defines.h" /*! * @defgroup datatypes_internal Internal Datatypes * @brief Data structures used internally to implement neural networks * * The layers are defined as structs; a generic layer type defines the basic * layer parameters and type-specific parameters are handled by specializations * implemented as a C union. The layers keep also a pointer to the parent * network and the next layer in the network. * The input, output and parameters are tensor with an hard-coded maximum * dimension of 4. Tensors are floating point arrays with a notion of size. * The network is a linked list of layers, and thus it stores only the pointer * to the first layer. */ /*! * @section Offsets * @ingroup datatypes_internal * Macros to handle (byte) stride addressing on tensors. The `AI_PTR` macro * is used to always cast a pointer to byte array. The macros `AI_OFFSET_X` are * used to compute (byte) offsets of respectively adjacents row elements, col * elements, channel elements and `channel_in` elements. * @{ */ /*! AI_STORAGE_KLASS SECTION ************************************/ #define AI_STORAGE_KLASS_TYPE(s_) \ ( (s_)->type ) #define AI_STORAGE_KLASS_SIZE(s_) \ ( (s_)->size ) #define AI_STORAGE_KLASS_DATA(s_, type_) \ ( (type_*)((s_)->data) ) #define AI_STORAGE_KLASS_COPY(dst_, dst_type_, src_, src_type_) \ { \ AI_ASSERT(AI_STORAGE_KLASS_SIZE(src_)>=AI_STORAGE_KLASS_SIZE(dst_)) \ AI_STORAGE_KLASS_SIZE(dst_) = AI_STORAGE_KLASS_SIZE(src_); \ for (ai_size i=0; i<AI_STORAGE_KLASS_SIZE(dst_); i++ ) { \ AI_STORAGE_KLASS_DATA(dst_, dst_type_)[i] = \ AI_STORAGE_KLASS_DATA(src_, src_type_)[i]; \ } \ } #define AI_STORAGE_KLASS_DUMP(s_, pfx_, post_, fmt_, type_) \ { \ AI_ASSERT(s_) \ AI_DEBUG_PRINT(pfx_, AI_STORAGE_KLASS_SIZE(s_)) \ for ( ai_u32 i=0; i<AI_STORAGE_KLASS_SIZE(s_); i++ ) { \ if ( (i % 8)==0 ) { AI_DEBUG_PRINT("\n ") } \ AI_DEBUG_PRINT(fmt_, AI_STORAGE_KLASS_DATA(s_, type_)[i]) \ } \ AI_DEBUG_PRINT(post_) \ } /*! AI_SHAPES SECTION ************************************/ #define AI_SHAPE_2D_H(shape_) \ AI_SHAPE_ELEM(shape_, AI_SHAPE_2D_HEIGHT) #define AI_SHAPE_2D_W(shape_) \ AI_SHAPE_ELEM(shape_, AI_SHAPE_2D_WIDTH) #define AI_SHAPE_ELEM(shape_, pos_) \ AI_STORAGE_KLASS_DATA(shape_, ai_shape_dimension)[pos_] #define AI_SHAPE_GET_ELEM(shape_, pos_) \ (((pos_) < AI_SHAPE_SIZE(shape_)) ? AI_SHAPE_ELEM(shape_, pos_) : 1) #define AI_SHAPE_SET_ELEM(shape_, pos_, val_) \ if ((pos_) < AI_SHAPE_SIZE(shape_)) { AI_SHAPE_ELEM(shape_, pos_) = (val_); } #define AI_SHAPE_TYPE(shape_) \ AI_STORAGE_KLASS_TYPE(shape_) #define AI_SHAPE_SIZE(shape_) \ AI_STORAGE_KLASS_SIZE(shape_) #define AI_SHAPE_CLONE(dst_, src_) \ AI_STORAGE_KLASS_COPY(dst_, ai_shape_dimension, src_, ai_shape_dimension) #define AI_SHAPE_BCAST_CLONE(dst_, src_) \ { \ for (ai_size i = 0; i < AI_SHAPE_SIZE(dst_); i++) { \ AI_SHAPE_SET_ELEM(dst_, i, AI_SHAPE_GET_ELEM(src_, i)); \ } \ } //#define AI_SHAPE_BATCH(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_BATCH_CHANNEL) #define AI_SHAPE_H(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_HEIGHT) #define AI_SHAPE_W(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_WIDTH) #define AI_SHAPE_CH(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_CHANNEL) #define AI_SHAPE_IN_CH(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_IN_CHANNEL) #define AI_SHAPE_D(shape_) ((AI_SHAPE_SIZE((shape_)) > AI_SHAPE_DEPTH) \ ? AI_SHAPE_ELEM((shape_), AI_SHAPE_DEPTH) : 1) #define AI_SHAPE_E(shape_) ((AI_SHAPE_SIZE((shape_)) > AI_SHAPE_EXTENSION) \ ? AI_SHAPE_ELEM((shape_), AI_SHAPE_EXTENSION) : 1) #define AI_SHAPE_T(shape_) AI_SHAPE_ELEM((shape_), AI_SHAPE_TIME) #define AI_CONV_SHAPE_H AI_SHAPE_W #define AI_CONV_SHAPE_W AI_SHAPE_CH #define AI_CONV_SHAPE_CH AI_SHAPE_H #define AI_CONV_SHAPE_IN_CH AI_SHAPE_IN_CH /*! AI_STRIDES SECTION ***********************************/ #define AI_STRIDE_2D_H(stride_) \ AI_STRIDE_ELEM((stride_), AI_SHAPE_2D_HEIGHT) #define AI_STRIDE_2D_W(stride_) \ AI_STRIDE_ELEM((stride_), AI_SHAPE_2D_WIDTH) #define AI_STRIDE_ELEM(stride_, pos_) \ AI_STORAGE_KLASS_DATA(stride_, ai_stride_dimension)[pos_] #define AI_STRIDE_GET_ELEM(stride_, pos_) \ (((pos_) < AI_STRIDE_SIZE(stride_)) ? AI_STRIDE_ELEM(stride_, pos_) : 0) #define AI_STRIDE_SET_ELEM(stride_, pos_, val_) \ if ((pos_) < AI_STRIDE_SIZE(stride_)) AI_STRIDE_ELEM(stride_, pos_) = (val_); #define AI_STRIDE_TYPE(stride_) \ AI_STORAGE_KLASS_TYPE(stride_) #define AI_STRIDE_SIZE(stride_) \ AI_STORAGE_KLASS_SIZE(stride_) #define AI_STRIDE_CLONE(dst_, src_) \ AI_STORAGE_KLASS_COPY(dst_, ai_stride_dimension, src_, ai_stride_dimension) #define AI_STRIDE_BCAST_CLONE(dst_, src_) \ { \ for (ai_size i=0; i<AI_STRIDE_SIZE(dst_); i++) { \ AI_STRIDE_SET_ELEM(dst_, i, AI_STRIDE_GET_ELEM(src_, i)); \ } \ } //#define AI_STRIDE_BATCH(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_BATCH_CHANNEL) #define AI_STRIDE_H(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_HEIGHT) #define AI_STRIDE_W(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_WIDTH) #define AI_STRIDE_CH(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_CHANNEL) #define AI_STRIDE_IN_CH(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_IN_CHANNEL) #define AI_STRIDE_D(stride) ((AI_STRIDE_SIZE((stride)) >= 5) ? AI_STRIDE_ELEM((stride), AI_SHAPE_DEPTH) : 0) #define AI_STRIDE_E(stride) ((AI_STRIDE_SIZE((stride)) == 6) ? AI_STRIDE_ELEM((stride), AI_SHAPE_EXTENSION) : 0) #define AI_STRIDE_T(stride) AI_STRIDE_ELEM((stride), AI_SHAPE_TIME) #define AI_STRIDE_SET_H(stride, val) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_HEIGHT, val) #define AI_STRIDE_SET_W(stride, val) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_WIDTH, val) #define AI_STRIDE_SET_CH(stride, val) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_CHANNEL, val) #define AI_STRIDE_SET_IN_CH(stride, val) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_IN_CHANNEL, val) #define AI_STRIDE_SET_D(stride, val) if (AI_STRIDE_SIZE((stride)) >= 5) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_DEPTH, val) #define AI_STRIDE_SET_E(stride, val) if (AI_STRIDE_SIZE((stride)) == 6) AI_STRIDE_SET_ELEM((stride), AI_SHAPE_EXTENSION, val) /*! AI_TENSORS SECTION ***********************************/ #define AI_TENSOR_KLASS(tensor_) \ ((tensor_) ? (tensor_)->klass : NULL) #define AI_TENSOR_SHAPE(tensor_) \ (&((tensor_)->shape)) #define AI_TENSOR_STRIDE(tensor_) \ (&((tensor_)->stride)) #define AI_TENSOR_INFO(tensor_) \ (&((tensor_)->info)) #define AI_TENSOR_ARRAY(tensor_) \ ((tensor_) ? (tensor_)->data : NULL) #define AI_TENSOR_ID(tensor_) \ ((tensor_) ? AI_TENSOR_INFO(tensor_)->id : 0) #define AI_TENSOR_FLAGS(tensor_) \ ((tensor_) ? AI_TENSOR_INFO(tensor_)->flags : 0) #define AI_TENSOR_DATA_SIZE(tensor_) \ ((tensor_) ? AI_TENSOR_INFO(tensor_)->data_size : 0) /*! AI_OFFSETS SECTION ***********************************/ //#define AI_OFFSET_BATCH(b, stride) ((ai_ptr_offset)(b) * AI_STRIDE_BATCH(stride)) #define AI_OFFSET_H(y, stride) ((ai_ptr_offset)(y) * AI_STRIDE_H(stride)) #define AI_OFFSET_W(x, stride) ((ai_ptr_offset)(x) * AI_STRIDE_W(stride)) #define AI_OFFSET_CH(ch, stride) ((ai_ptr_offset)(ch) * AI_STRIDE_CH(stride)) #define AI_OFFSET_IN_CH(in_ch, stride) ((ai_ptr_offset)(in_ch) * \ AI_STRIDE_IN_CH(stride)) #define AI_OFFSET_D(d, stride) ((ai_ptr_offset)(d) * AI_STRIDE_D(stride)) #define AI_OFFSET_E(e, stride) ((ai_ptr_offset)(e) * AI_STRIDE_E(stride)) #define AI_OFFSET_5D(y, x, d, e, ch, stride) ( \ AI_OFFSET_H((y), (stride)) + AI_OFFSET_W((x), (stride)) + \ AI_OFFSET_D((d), (stride)) + AI_OFFSET_E((e), (stride)) + \ AI_OFFSET_CH((ch), (stride)) ) #define AI_OFFSET(y, x, ch, z, stride) ( \ AI_OFFSET_H((y), (stride)) + AI_OFFSET_W((x), (stride)) + \ AI_OFFSET_CH((ch), (stride)) + \ ((AI_STRIDE_SIZE((stride)) == 4) ? AI_OFFSET_IN_CH((z), (stride)) : AI_OFFSET_D((z), (stride))) ) /*! @} */ #define AI_GET_CONV_OUT_SIZE(in_size, filt_size, pad_l, pad_r, filt_stride) \ ((((in_size) - (filt_size) + (pad_l) + (pad_r)) / (filt_stride)) + 1) /** Tensors datatypes defines handlers ****************************************/ #define AI_TENSOR_SIZE(tensor_) \ get_tensor_size(tensor_, true) #define AI_TENSOR_SIZE_UNPAD(tensor_) \ get_tensor_size(tensor_, false) #define AI_TENSOR_BYTE_SIZE(tensor_) \ get_tensor_byte_size(tensor_) /******************************************************************************/ #define AI_PLATFORM_VERSION_INIT(major_, minor_, micro_) \ { .major = (major_), .minor = (minor_), .micro = (micro_), .reserved = 0x0 } /** Integer tensor info extraction ********************************************/ #define AI_INTQ_INFO_LIST_SCALE_ARRAY(list_, type_) \ ( ((list_) && (list_)->info) \ ? ((type_*)((list_)->info->scale)) : NULL ) #define AI_INTQ_INFO_LIST_ZEROPOINT_ARRAY(list_, type_) \ ( ((list_) && (list_)->info) \ ? ((type_*)((list_)->info->zeropoint)) : NULL ) #define AI_KLASS_GET_INTQ_INFO_LIST(tensor_) \ ((ai_intq_info_list*)((tensor_)->klass)) AI_API_DECLARE_BEGIN /*! * @brief Check whether 2 shapes have identical dimensions. * @ingroup datatypes_internal * @param shape0 the 1st tensor shape to compare * @param shape1 the 2nd tensor shape to compare * @return true if shape0 and shape1 have same dimensions. false otherwise */ AI_DECLARE_STATIC ai_bool ai_shape_is_same( const ai_shape* shape0, const ai_shape* shape1) { AI_ASSERT(shape0 && shape1) if (AI_SHAPE_SIZE(shape0) != AI_SHAPE_SIZE(shape1)) return false; ai_size dim = AI_SHAPE_SIZE(shape0); while ( dim>0 ) { dim--; if ( AI_SHAPE_ELEM(shape0, dim)!=AI_SHAPE_ELEM(shape1, dim) ) return false; } return true; } /*! * @brief Check whether the shapes is 1*1*1... for a scalar value content. * @ingroup datatypes_internal * @param shape the tensor shape to evaluate * @return true if shape0 is scalar false otherwise */ AI_DECLARE_STATIC ai_bool ai_shape_is_scalar( const ai_shape* shape0) { ai_size dim = AI_SHAPE_SIZE(shape0); while (dim>0) { dim--; if (AI_SHAPE_ELEM(shape0, dim) != 1) return false; } return true; } /*! * @brief Check if shape0 is a subshape of shape1 * @ingroup datatypes_internal * @param shape0 the 1st tensor shape to compare * @param shape1 the 2nd tensor shape to compare * @return true if shape0 is a subshape of shape1 (all shape0 dimensions are * smallers or equal of the shape1 ones). false otherwise */ AI_DECLARE_STATIC ai_bool ai_shape_is_subshape( const ai_shape* shape0, const ai_shape* shape1) { AI_ASSERT(shape0 && shape1) AI_ASSERT(AI_SHAPE_SIZE(shape0)==AI_SHAPE_SIZE(shape1)) ai_size dim = AI_SHAPE_SIZE(shape0); while (dim) { dim--; if ( AI_SHAPE_ELEM(shape0, dim)>AI_SHAPE_ELEM(shape1, dim) ) return false; } return true; } /*! * @brief Computes the total size of a tensor given its dimensions. * @ingroup datatypes_internal * @param shape the tensor shape */ AI_DECLARE_STATIC ai_size ai_shape_get_size(const ai_shape* shape) { AI_ASSERT(shape) ai_size dim = AI_SHAPE_SIZE(shape); ai_size size = 1; while (dim>0) { dim--; size *= AI_SHAPE_ELEM(shape, dim); } return size; } /*! * @brief Computes the size of the input image discarding the channels. * @ingroup datatypes_internal * @param shape the tensor shape */ AI_DECLARE_STATIC ai_size ai_shape_get_npixels(const ai_shape* shape) { AI_ASSERT(shape) const ai_size npixels = AI_SHAPE_W(shape) * AI_SHAPE_H(shape); return npixels; } /** APIs Section *************************************************************/ /*! * @brief Get packed version from major, minor, micro representaion. * @ingroup datatypes_internal * @param major major version value * @param minor minor version value * @param micro micro version value * @return a packed version info obtained serializing input values */ AI_INTERNAL_API ai_version ai_version_get(const ai_u8 major, const ai_u8 minor, const ai_u8 micro); /*! * @brief Get un-packed version from packed version representaion. * @ingroup datatypes_internal * @param version a packed varsion info * @return struct with de-serialized major, minor, micro values */ AI_INTERNAL_API ai_platform_version ai_platform_version_get(const ai_version version); /*! * @brief Map from ai_buffer data struct to ai_array data struct. * @ingroup datatypes_internal * @param buf a pointer to the ai_buffer to be mapped to ai_array * @return an initialized @ref ai_array struct representing same data */ AI_INTERNAL_API ai_array ai_from_buffer_to_array(const ai_buffer* buf); /*! * @brief Map from ai_array data struct to ai_buffer data struct. * @ingroup datatypes_internal * @param array a pointer to the ai_array to be mapped to ai_buffer * @return an initialized @ref ai_buffer struct representing same data */ AI_INTERNAL_API ai_buffer ai_from_array_to_buffer(const ai_array* array); /*! * @brief get the total number of elements of a n-dimensional tensor. * @ingroup datatypes_internal * @param t a pointer to an @ref ai_tensor * @param with_padding when true it considers also padded elements * @return the number of elements of the tensor (with/without padded ones) */ AI_INTERNAL_API ai_size get_tensor_size(const ai_tensor* t, const ai_bool with_padding); /*! * @brief get the total size in bytes of elements of a n-dimensional tensor (excluding padded ones). * @ingroup datatypes_internal * @param t a pointer to an @ref ai_tensor * @return the total size in bytes of elements of the tensor (excluding padded ones) */ AI_INTERNAL_API ai_size get_tensor_byte_size(const ai_tensor* t); AI_API_DECLARE_END #endif /*AI_DATATYPES_INTERNAL_H*/
14,911
C
34.336493
133
0.62303
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_dw_dqnn.h
/** ****************************************************************************** * @file lite_dw_dqnn.h * @author AIS * @brief header file of AI platform lite dw kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_DW_DQNN_H #define LITE_DW_DQNN_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles 2D DW convolution with binary input, binary output and * binary weights - with 0 padding (QKeras like) - Lite I/F * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_dw_is1os1ws1_bn_pad0(const ai_u32 *pDataIn_init, ai_u32 * pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_i32 *pThreshold); /*! * @brief Handles 2D DW convolution with binary input, binary output and * binary weights - with 0 padding (QKeras like) - Lite I/F * - Optimized thanks to Optim3 assumptions * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_dw_is1os1ws1_bn_pad0_optim3(const ai_u32 *pDataIn_init, ai_u32 * pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_i32 *pThreshold); /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_dw_is1os1ws1_bn_pad1(const ai_u32 *pDataIn_init, ai_u32 * pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_i32 *pThreshold, const ai_i32 pad_value); /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights - with +1/-1 padding (Larq like) - Lite I/F * - Optimized thanks to Optim3 assumptions * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_dw_is1os1ws1_bn_pad1_optim3(const ai_u32 *pDataIn_init, ai_u32 * pDataOut_init, const ai_u32 *pWeights_init, ai_float *pScratch_32, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_in, const ai_i32 height_in, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 filt_width, const ai_i32 filt_height, const ai_i32 filt_pad_x, const ai_i32 filt_pad_y, const ai_i32 filt_stride_x, const ai_i32 filt_stride_y, const ai_i32 *pThreshold, const ai_i32 pad_value); #endif /*LITE_DW_DQNN_H*/
6,834
C
49.629629
80
0.362891
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_upsample_generic.h
/** ****************************************************************************** * @file lite_upsample.h * @author AIS * @brief header file of AI platform lite pw kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_UPSAMPLE_GENERIC_H #define LITE_UPSAMPLE_GENERIC_H #pragma once #include "ai_lite_interface.h" void forward_lite_upsample_generic_nearest(const ai_u8* in_data, ai_u8* out_data, const ai_size width_in, const ai_size width_out, const ai_float width_scale, const ai_size height_out, const ai_float height_scale, const ai_u32 output_tensor_w_stride, const ai_float offset_round_coeff); void forward_lite_upsample_nearest(ai_ptr in_data, ai_ptr out_data, const ai_size width_in, const ai_float width_scale, const ai_float height_scale, const ai_size width_out, const ai_size height_out, const ai_ptr_offset stride_w, const ai_float offset_round_coeff); void forward_lite_upsample_zeros( ai_ptr in_data, ai_ptr out_data, const ai_size width_in, const ai_size height_in, const ai_float width_scale, const ai_float height_scale, const ai_size width_out, const ai_size height_out, const ai_ptr_offset stride_ch, const ai_ptr_offset stride_w, const ai_handle p_zero_value); #endif /*LITE_UPSAMPLE_GENERIC_H*/
2,756
C
44.196721
80
0.394049
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/ai_lite_interface.h
/** ****************************************************************************** * @file ai_lite_interface.h * @author AST Embedded Analytics Research Platform * @brief Definitions and implementations of runtime-lite codegen APIs ****************************************************************************** * @attention * * Copyright (c) 2022 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_LITE_INTERFACE_H #define AI_LITE_INTERFACE_H #pragma once #include "ai_platform.h" #include "ai_lite.h" /*****************************************************************************/ /* Generic Codegen Section */ // #ifdef HAS_LOG #if 0 #include "core_log.h" #define LITE_GRAPH_START(_graph_name) \ AI_LOG_DEBUG("[LITE GRAPH START] : " _graph_name) #define LITE_GRAPH_END(_graph_name) \ AI_LOG_DEBUG("[LITE GRAPH END] : " _graph_name) #else #define LITE_GRAPH_START(_graph_name) \ /* LITE_GRAPH_START() */ #define LITE_GRAPH_END(_graph_name) \ /* LITE_GRAPH_END() */ #endif /* HAS_LOG */ #ifdef HAS_AI_ASSERT #include <assert.h> #define LITE_ASSERT(_cond) \ { assert(_cond); } #else #define LITE_ASSERT(_cond) \ do { /* LITE_ASSERT() */ } while (0); #endif /*HAS_AI_ASSERT*/ /*****************************************************************************/ #if defined(_MSC_VER) #define LITE_DECLARE_STATIC static __inline #define LITE_HINT_INLINE static __inline #define LITE_FORCE_INLINE static __inline #elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__) #define LITE_DECLARE_STATIC static inline #define LITE_HINT_INLINE static inline #define LITE_FORCE_INLINE static inline #elif defined(__GNUC__) #define LITE_DECLARE_STATIC static __inline #define LITE_HINT_INLINE static __inline #define LITE_FORCE_INLINE static __inline #else #define LITE_DECLARE_STATIC static __inline #define LITE_HINT_INLINE static __inline #define LITE_FORCE_INLINE static __inline #endif /* _MSC_VER */ #define LITE_API_ENTRY /* LITE_API_ENTRY */ #define LITE_PACK(...) \ __VA_ARGS__ #define LITE_UNUSED(_elem) \ ((void)(_elem)); #define LITE_KERNEL_SECTION(_code_block) \ { LITE_PACK(_code_block) } /*****************************************************************************/ /* Arrays Section */ #define LITE_ARRAY_VALUES(...) \ { LITE_PACK(__VA_ARGS__) } #define LITE_ARRAY_DATA(_array, _type) \ ((_type*)(_array)->data) #define LITE_ARRAY_DATA_START(_array, _type) \ ((_type*)(_array)->data_start) /*****************************************************************************/ /* Tensors Section */ #define LITE_TENSOR_ARRAY(_tensor, _pos) \ (((_tensor)->data) + (_pos)) /*****************************************************************************/ /* Tensors List Section */ #define LITE_TENSOR_LIST(_chain, _pos) \ (&(_chain)->chain[_pos]) #define LITE_TENSOR_IN(_chain, _pos) \ (LITE_TENSOR_LIST(_chain, 0)->tensor[_pos]) #define LITE_TENSOR_OUT(_chain, _pos) \ (LITE_TENSOR_LIST(_chain, 1)->tensor[_pos]) #define LITE_TENSOR_WEIGHTS(_chain, _pos) \ (LITE_TENSOR_LIST(_chain, 2)->tensor[_pos]) #define LITE_TENSOR_SCRATCHS(_chain, _pos) \ (LITE_TENSOR_LIST(_chain, 3)->tensor[_pos]) /*****************************************************************************/ #define LITE_LAYER_ACQUIRE(name_, cast_type_, ptr_) \ LITE_ASSERT(ptr_) \ AI_CONCAT(ai_layer_, cast_type_)* name_ = \ (AI_CONCAT(ai_layer_, cast_type_)*)(ptr_); #define LITE_LAYER_RELEASE(name_, cast_type_) \ /* LITE_LAYER_RELEASE() */ /*****************************************************************************/ AI_API_DECLARE_BEGIN AI_API_DECLARE_END #endif /* AI_LITE_INTERFACE_H */
4,226
C
28.767605
80
0.503076
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_sm.h
/** ****************************************************************************** * @file layers_sm.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform non softmax layer datatype ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_SM_H #define LAYERS_SM_H #pragma once #include "layers_common.h" /*! * @defgroup layers SoftMax Layer Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /*! * @brief Softmax normalization computed on an array of fixed point channels * @ingroup layers_sm * @param out opaque handler to output channel array * @param in opaque handler to input channel array * @param in_size total size (number of elements) to process on the input * @param channel_size number of elements of the input channel * @param in_channel_step number of elements to move to next input element * @param out_channel_step number of elements to move to next output element */ AI_INTERNAL_API void sm_func_sm_array_fixed(ai_handle out, const ai_handle in, const ai_size in_size, const ai_size channel_size, const ai_size in_channel_step, const ai_size out_channel_step); /*! * @brief Computes the activations of a fixed point softmax nonlinear layer. * @ingroup layers_sm * @param layer the softmax (sm) layer */ AI_INTERNAL_API void forward_sm_fixed(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_SM_H*/
2,051
C
30.56923
80
0.570453
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/datatypes_network.h
/** ****************************************************************************** * @file datatypes_network.h * @author AST Embedded Analytics Research Platform * @brief Definitions of code generated network types ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef DATATYPES_NETWORK_H #define DATATYPES_NETWORK_H #pragma once /* * Header to be overriden by the generated version * by including with <> the include directories are searched in the order * specified in the compiler * To enable the override, put the generated path before the API path */ #include "ai_platform.h" AI_API_DECLARE_BEGIN #ifdef AI_OVERRIDE_CUSTOM_TYPES #warning "Warning: Custom Types have been already defined!\n" #endif #define AI_CUSTOM_TYPES_COUNT (3) #define AI_CUSTOM_TYPES_SIGNATURE_DECLARE(name) \ const ai_custom_type_signature name[AI_CUSTOM_TYPES_COUNT+1] = { \ AI_CUSTOM_TYPES_COUNT, \ AI_CUSTOM_SIZE(ai_shape_dimension), \ AI_CUSTOM_SIZE(ai_stride_dimension), \ AI_CUSTOM_SIZE(ai_array_size), \ }; typedef ai_i32 ai_stride_dimension; typedef ai_u32 ai_array_size; AI_API_DECLARE_END #endif /*DATATYPES_NETWORK_H*/
1,694
C
27.728813
80
0.579103
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/ai_layer_custom_interface.h
/** ****************************************************************************** * @file ai_layer_custom_interface.h * @author AST Embedded Analytics Research Platform * @brief Definitions of AI platform custom layers interface APIs ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_LAYER_CUSTOM_INTERFACE_H #define AI_LAYER_CUSTOM_INTERFACE_H #pragma once #include "ai_platform.h" #include "ai_platform_interface.h" #include "layers_custom.h" #define INTQ_SCALE_FLOAT (AI_BUFFER_META_FLAG_SCALE_FLOAT) #define INTQ_ZEROPOINT_U8 (AI_BUFFER_META_FLAG_ZEROPOINT_U8) #define INTQ_ZEROPOINT_S8 (AI_BUFFER_META_FLAG_ZEROPOINT_S8) #define INTQ_ZEROPOINT_U16 (AI_BUFFER_META_FLAG_ZEROPOINT_U16) #define INTQ_ZEROPOINT_S16 (AI_BUFFER_META_FLAG_ZEROPOINT_S16) #define AI_TENSOR_HEIGHT (3) #define AI_TENSOR_WIDTH (2) #define AI_TENSOR_CHANNEL (1) #define AI_TENSOR_IN_CHANNEL (0) AI_API_DECLARE_BEGIN typedef enum { TYPE_NONE = 0x0, TYPE_FLOAT, TYPE_BOOL, TYPE_INTEGER, TYPE_SIGNED, TYPE_UNSIGNED, } ai_tensor_type; typedef struct { ai_tensor_type type; ai_i8 bits; ai_i8 fbits; } ai_tensor_format; typedef struct { ai_u16 flags; /*!< optional flags to store intq info attributes */ ai_u16 size; /*!< number of elements in the the intq_info list */ ai_float* scale; /*!< array of scales factors */ union { ai_u8* zeropoint_u8; /*!< array of zeropoints as unsigned */ ai_i8* zeropoint_s8; /*!< array of zeropoints as signed */ }; } ai_tensor_intq_info; /**************************************************************************** ** Layer Custom Interface APIs ****************************************************************************/ /*! * @brief acquire the custom layer from its handle * @ingroup ai_layer_custom_interface * @param layer an opaque handler to the custom layer * @return a pointer to ai_layer_custom if found and valid, else NULL */ AI_INTERFACE_TYPE ai_layer_custom* ai_layer_custom_get( ai_layer* layer); /*! * @brief release the custom layer provided its handle * @ingroup ai_layer_custom_interface * @param layer an opaque handler to the custom layer to release */ AI_INTERFACE_TYPE void ai_layer_custom_release( ai_layer* layer); /*! * @brief get the number of inputs tensors of a custom layer * @ingroup ai_layer_custom_interface * @param layer an opaque handler to the custom layer * @return the number of input tensors of the layer. 0 if no input tensors or error */ AI_INTERFACE_TYPE ai_size ai_layer_get_tensor_in_size( const ai_layer* layer); /*! * @brief get the number of outputs tensors of a custom layer * @ingroup ai_layer_custom_interface * @param layer an opaque handler to the custom layer * @return the number of outputs tensors of the layer. 0 if no outputs tensors or error */ AI_INTERFACE_TYPE ai_size ai_layer_get_tensor_out_size( const ai_layer* layer); /*! * @brief get the number of weights tensors of a custom layer * @ingroup ai_layer_custom_interface * @param layer an opaque handler to the custom layer * @return the number of weights tensors of the layer. 0 if no weights tensors or error */ AI_INTERFACE_TYPE ai_size ai_layer_get_tensor_weights_size( const ai_layer* layer); /*! * @brief get the n-th (at index pos) input tensor pointer from a layer * @ingroup ai_layer_custom_interface * @param layer an opaque handler to the layer * @param pos the index position in the tensor list * @return a pointer to a tensor if found, else, if invalid or out-of-range NULL */ AI_INTERFACE_TYPE ai_tensor* ai_layer_get_tensor_in( const ai_layer* layer, const ai_u16 pos); /*! * @brief get the n-th (at index pos) output tensor pointer from a layer * @ingroup ai_layer_custom_interface * @param layer an opaque handler to the layer * @param pos the index position in the tensor list * @return a pointer to a tensor if found, else, if invalid or out-of-range NULL */ AI_INTERFACE_TYPE ai_tensor* ai_layer_get_tensor_out( const ai_layer* layer, const ai_u16 pos); /*! * @brief get the n-th (at index pos) weight tensor pointer from a layer * @ingroup ai_layer_custom_interface * @param layer an opaque handler to the layer * @param pos the index position in the tensor list * @return a pointer to a tensor if found, else, if invalid or out-of-range NULL */ AI_INTERFACE_TYPE ai_tensor* ai_layer_get_tensor_weights( const ai_layer* layer, const ai_u16 pos); /**** Layer Tensors APIs ***************************************************/ /*! * @brief check if the tensor has integer quantization informations @ref ai_tensor_intq_info * @ingroup ai_layer_custom_interface * @param tensor a pointer to the tensor * @return true if tensot has integer quantization informations, false otherwise */ AI_INTERFACE_TYPE ai_bool ai_tensor_has_intq( const ai_tensor* t); /*! * @brief get the tensor integer quantization informations @ref ai_tensor_intq_info * @ingroup ai_layer_custom_interface * @param tensor a pointer to the tensor * @return the integer quantization informations as a struct @ref ai_tensor_intq_info */ AI_INTERFACE_TYPE ai_tensor_intq_info ai_tensor_get_intq( const ai_tensor* t); /*! * @brief get the format of the tensor see @ref ai_tensor_format * @ingroup ai_layer_custom_interface * @param tensor a pointer to the tensor * @return the tensor format */ AI_INTERFACE_TYPE ai_tensor_format ai_tensor_get_format( const ai_tensor* t); /**** Shapes Getters ****/ /*! * @brief get the dimensionality of the tensor shapes * @ingroup ai_layer_custom_interface * @param tensor a pointer to the tensor * @return the dimensionality of the tensor shape */ AI_INTERFACE_TYPE ai_size ai_tensor_get_shape_size( const ai_tensor* t); /*! * @brief get the value of the shape dimensionality pos * @ingroup ai_layer_custom_interface * @param tensor a pointer to the tensor * @return the value of the shape dimensionality at pos of the tensor */ AI_INTERFACE_TYPE ai_shape_dimension ai_tensor_get_shape( const ai_tensor* t, const ai_u16 pos); /**** Strides Getters ****/ /*! * @brief get the dimensionality of the tensor strides * @ingroup ai_layer_custom_interface * @param tensor a pointer to the tensor * @return the dimensionality of the tensor strides @ref ai_stride */ AI_INTERFACE_TYPE ai_size ai_tensor_get_stride_size( const ai_tensor* t); /*! * @brief get the value of the stride dimensionality pos * @ingroup ai_layer_custom_interface * @param tensor a pointer to the tensor * @return the value of the stride dimensionality at pos of the tensor */ AI_INTERFACE_TYPE ai_stride_dimension ai_tensor_get_stride( const ai_tensor* t, const ai_u16 pos); /**** Data Storage Getters ****/ /*! * @brief get tensor storage data buffer pointer * @ingroup ai_layer_custom_interface * @param tensor a pointer to the tensor * @return a pointer to the tensor data buffer, set to NULL if error */ AI_INTERFACE_TYPE ai_any_ptr ai_tensor_get_data( const ai_tensor* t); /*! * @brief get number of tensor elements * @ingroup ai_layer_custom_interface * @param tensor a pointer to the tensor * @return the number of tensor elements or 0 if error */ AI_INTERFACE_TYPE ai_size ai_tensor_get_data_size( const ai_tensor* t); /*! * @brief get the size in bytes of the tensor data buffer * @ingroup ai_layer_custom_interface * @param tensor a pointer to the tensor * @return the size in bytes of the tensor data buffer. 0 if error */ AI_INTERFACE_TYPE ai_size ai_tensor_get_data_byte_size( const ai_tensor* t); AI_API_DECLARE_END #endif /*AI_LAYER_CUSTOM_INTERFACE_H*/
8,272
C
29.985019
92
0.66308
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_pool_dqnn.h
/** ****************************************************************************** * @file layers_conv2d_dqnn.h * @author AIS * @brief header file of AI platform DQNN pool datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_POOL_DQNN_H #define LAYERS_POOL_DQNN_H #pragma once #include "layers_common.h" #include "layers_pool.h" /*! * @defgroup layers_pool_dqnn Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_pool_dqnn * @ingroup layers_pool_dqnn * @brief pool_dqnn layer * * @ref forward_maxpool_is1os1 */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pool_dqnn_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape_2d pool_size; /*!< pooling size */ ai_shape_2d pool_stride; /*!< pooling stride */ ai_shape pool_pad; /*!< pooling pad, y,x border sizes */ // ai_u32 pad_value; /*!< pooling pad value */ } ai_layer_pool_dqnn; /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles max pooling with binary input and binary output * @ingroup layers_pool_dqnn * @param layer conv2d_pool layer */ AI_INTERNAL_API void forward_maxpool_is1os1(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_POOL_DQNN_H*/
1,996
C
26.356164
80
0.482966
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_dw.h
/** ****************************************************************************** * @file lite_dw.h * @author AIS * @brief header file of AI platform lite dw kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_DW_H #define LITE_DW_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles dw convolutions generic case (supports depth multiplier >= 1) * @ingroup lite_dw */ LITE_API_ENTRY void forward_lite_dw_dm_sssa8_ch(const ai_i8 *Im_in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_i8 *wt, const ai_u16 ch_im_out, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_i32 *bias, const ai_i8 In_ZeroPoint, const ai_i8 Out_ZeroPoint, ai_i8 *Im_out, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_i32 nl_pool_fused, ai_i16 *bufferA); /*! * @brief Handles dw convolutions with depth multiplier = 1 only * @ingroup lite_dw */ LITE_API_ENTRY void forward_lite_dw_sssa8_ch(const ai_i8 *Im_in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_i8 *wt, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_i32 *bias, const ai_i8 In_ZeroPoint, const ai_i8 Out_ZeroPoint, ai_i8 *Im_out, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_i32 nl_pool_fused, ai_i16 *bufferA); /*! * @brief Handles dw convolutions with depth multiplier = 1, valid padding * and 3*3 kernel size * @ingroup lite_dw */ LITE_API_ENTRY void forward_lite_dw_3x3_sssa8_ch(const ai_i8 *Im_in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_i8 *wt, const ai_u16 stride_x, const ai_u16 stride_y, const ai_i32 *bias, const ai_i8 In_ZeroPoint, const ai_i8 Out_ZeroPoint, ai_i8 *Im_out, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_i32 nl_pool_fused, ai_i16 *bufferA); /*! * @brief Handles dw convolutions with depth multiplier = 1, valid padding, * 3*3 kernel size, stride_x = 1 and weights/input are channel first * @ingroup lite_dw */ LITE_API_ENTRY void forward_lite_dw_3x3_ch1st_sssa8_ch(const ai_i8 *Im_in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_i8 *wt, const ai_u16 stride_x, const ai_u16 stride_y, const ai_i32 *bias, const ai_i8 In_ZeroPoint, const ai_i8 Out_ZeroPoint, ai_i8 *Im_out, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_i32 nl_pool_fused, ai_i16 *bufferA); #endif /*LITE_DW_H*/
5,348
C
39.218045
80
0.382199
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_ml.h
/** ****************************************************************************** * @file layers_ml.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform ml layers datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_ML_H #define LAYERS_ML_H #pragma once #include "layers_common.h" /*! * @defgroup layers_generic ML Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_ArrayFeatureExtractor * @ingroup layers_ml * @brief ai_layer_ArrayFeatureExtractor layer definition * * This layer select elements of the input tensor based on the indices passed. It is intended to be used * by his associated forward function @ref forward_arrayfeatureextractor */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_arrayfeatureextractor_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_tensor* indices; /*!< Indices of corrisponding axis in axes*/ } ai_layer_arrayfeatureextractor; /*! * @struct ai_layer_ZipMap * @ingroup layers_ml * @brief ai_layer_ZipMap layer definition * * This layer creates a map from the input and the attributes. * The values are provided by the input tensor, while the keys are specified by the attributes. * The user must provide keys in either classlabels_strings or classlabels_int64s (but not both). * The columns of the tensor correspond one-by-one to the keys specified by the attributes. * There must be as many columns as keys. * It is intended to be used by his associated forward function @ref forward_zipmap. */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_zipmap_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_bool has_classlabels_int; } ai_layer_zipmap; /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief select elements of the input tensor based on the indices passed. * @ingroup layers_ml * @param layer array feture extractor */ AI_INTERNAL_API void forward_arrayfeatureextractor(ai_layer* layer); /*! * @brief creates a map from the inputs and the attributes * @ingroup layers_ml * @param layer zipmap */ AI_INTERNAL_API void forward_zipmap(ai_layer* layer); AI_API_DECLARE_END #endif /*LAYERS_ML_H*/
2,899
C
28.896907
104
0.595378
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_dense_is8os1ws1.h
/** ****************************************************************************** * @file lite_dense_is8os1ws1.h * @author Marco Forleo * @brief header file of AI platform lite dense kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2023 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_DENSE_IS8OS1WS1_H #define LITE_DENSE_IS8OS1WS1_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Forward function for a dense layer with signed 8 bits input, * binary weights and binary output. * @ingroup lite_dense_is8os1ws1 * @param out_ptr The pointer to output buffer. *@param data_in_init_ptr The pointer to input buffer. * @param weights_ptr The pointer to weights. * @param scratch_ptr The pointer to scratch buffer. * @param scratch_size The value of scratch tensor size. * @param n_channel_out The number of channels of the output, i.e., * the number of dense hidden neurons. * @param n_channel_in The number of channels of the input. * @param scale_ptr The pointer to scale buffer of BN. * @param offset_ptr The pointer to offset buffer of BN. */ LITE_API_ENTRY void forward_lite_dense_is8os1ws1_bn_fxp(ai_pbits *out_ptr, const ai_i8 *data_in_init_ptr, const ai_pbits *weights_ptr, ai_i32 *scratch_ptr, const ai_u32 scratch_size, const ai_u32 n_channel_out, const ai_u32 n_channel_in, const ai_i32 *threshold_ptr); #endif /*LITE_DENSE_IS8OS1WS1_H*/
2,452
C
41.293103
80
0.472268
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_list.h
/** ****************************************************************************** * @file layers_list.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform layers datatypes ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ /* No sentry. This is deliberate!! */ /* Template: LAYER_ENTRY(type_, id_, struct_, forward_func_, init_func_, destroy_func_) * Where: * - type_ is the (enum) type name of the layer. to have the complete enum * value you should use the macro @ref AI_LAYER_TYPE_ENTRY(type_) that adds * the specific prefix and postfix tokens to the type_ * - id_ is the numeric id of the layer * - struct_ is the name of the datastruct of the layer without the ai_layer_ * prefix * - forward_func_ is the forward function name of the routine implementing * actual layer processing * - init_func_ is the init function name of the routine implementing * actual layer initialization * - destroy_func_ is the destroy function name of the routine implementing * actual layer de-initialization */ /* Layer IDs for stateless layers (bit 8 set) */ #define LAYER_ID(id_) \ (0x100 + (id_)) /* Layer IDs for stateful layers (bits 7 and 8 set) */ #define LAYER_STATEFUL_ID(id_) \ (0x180 + (id_)) /*!< Base layer */ LAYER_ENTRY(BASE, LAYER_ID(0), base, NULL, NULL, NULL) /*!< Elementwise addition layer */ LAYER_ENTRY(ADD, LAYER_ID(1), add, forward_add, NULL, NULL) /*!< Batch normalization layer */ LAYER_ENTRY(BN, LAYER_ID(2), bn, forward_bn, NULL, NULL) /*!< 2D Convolutional layer */ LAYER_ENTRY(CONV2D, LAYER_ID(3), conv2d, forward_conv2d, NULL, NULL) /*!< Dense layer */ LAYER_ENTRY(DENSE, LAYER_ID(4), dense, forward_dense, NULL, NULL) /*!< Local Response Normalization layer */ LAYER_ENTRY(LRN, LAYER_ID(6), lrn, forward_lrn, NULL, NULL) /*!< Nonlinearity layer */ LAYER_ENTRY(NL, LAYER_ID(7), nl, NULL, NULL, NULL) /*!< Normalization layer */ LAYER_ENTRY(NORM, LAYER_ID(8), norm, forward_norm, NULL, NULL) /*!< Merged Conv2d / Pool layer */ LAYER_ENTRY(OPTIMIZED_CONV2D, LAYER_ID(9), conv2d_nl_pool, forward_conv2d_nl_pool, NULL, NULL) /*!< Transpose Tensor layer */ LAYER_ENTRY(TRANSPOSE, LAYER_ID(10), transpose, forward_transpose, NULL, NULL) /*!< Pooling layer */ LAYER_ENTRY(POOL, LAYER_ID(11), pool, forward_pool, NULL, NULL) /*!< Softmax layer */ LAYER_ENTRY(SM, LAYER_ID(12), sm, forward_sm, NULL, NULL) /*!< Split layer */ LAYER_ENTRY(SPLIT, LAYER_ID(13), split, forward_split, NULL, NULL) /*!< TimeDelay layer */ LAYER_ENTRY(TIME_DELAY, LAYER_ID(14), time_delay, forward_time_delay, NULL, NULL) /*!< TimeDistributed layer */ LAYER_ENTRY(TIME_DISTRIBUTED, LAYER_ID(15), time_distributed, forward_time_distributed, NULL, NULL) /*!< Concat Tensor layer */ LAYER_ENTRY(CONCAT, LAYER_ID(16), concat, forward_concat, NULL, NULL) /*!< GEMM layer */ LAYER_ENTRY(GEMM, LAYER_ID(17), gemm, forward_gemm, NULL, NULL) /*!< Upsample layer */ LAYER_ENTRY(UPSAMPLE, LAYER_ID(18), upsample, forward_upsample, NULL, NULL) /*!< Container layer for eltwise operations */ LAYER_ENTRY(ELTWISE, LAYER_ID(19), eltwise, forward_eltwise, NULL, NULL) /*!< Container layer for eltwise integer operations */ LAYER_ENTRY(ELTWISE_INTEGER, LAYER_ID(20), eltwise_integer, NULL, NULL, NULL) /*!< InstanceNormalization layer */ LAYER_ENTRY(INSTANCENORMALIZATION, LAYER_ID(21), instanceNormalization, forward_instanceNormalization, NULL, NULL) /*!< Pad layer */ LAYER_ENTRY(PAD, LAYER_ID(22), pad, forward_pad, NULL, NULL) /*!< Slice layer */ LAYER_ENTRY(SLICE, LAYER_ID(23), slice, forward_slice, NULL, NULL) /*!< Tile layer */ LAYER_ENTRY(TILE, LAYER_ID(24), tile, forward_tile, NULL, NULL) /*!< Container layer for reduce operations */ LAYER_ENTRY(REDUCE, LAYER_ID(25), reduce, forward_reduce, NULL, NULL) /*!< Recurrent Neural Network layer */ LAYER_ENTRY(RNN, LAYER_ID(26), rnn, forward_rnn, NULL, NULL) /*!< Resize layer */ LAYER_ENTRY(RESIZE, LAYER_ID(27), resize, forward_resize, NULL, NULL) /*!< Gather layer */ LAYER_ENTRY(GATHER, LAYER_ID(28), gather, forward_gather, NULL, NULL) /*!< Pack layer */ LAYER_ENTRY(PACK, LAYER_ID(29), pack, forward_pack, NULL, NULL) /*!< Unpack layer */ LAYER_ENTRY(UNPACK, LAYER_ID(30), unpack, forward_unpack, NULL, NULL) /*!< ArgMax layer */ LAYER_ENTRY(ARGMAX, LAYER_ID(31), argmax, forward_argmax, NULL, NULL) /*!< ArgMin layer */ LAYER_ENTRY(ARGMIN, LAYER_ID(32), argmin, forward_argmin, NULL, NULL) /*!< Cast Neural Network Layer */ LAYER_ENTRY(CAST, LAYER_ID(33), cast, forward_cast, NULL, NULL) /*!< iForest layer */ LAYER_ENTRY(IFOREST, LAYER_ID(34), iforest, forward_iforest, NULL, NULL) /*!< SVM Regressor layer */ LAYER_ENTRY(SVMREG, LAYER_ID(35), svmreg, forward_svm_regressor, NULL, NULL) /*!< ArrayFeatureExtractor layer */ LAYER_ENTRY(ARRAYFEATUREEXTRACTOR, LAYER_ID(36), arrayfeatureextractor, forward_arrayfeatureextractor, NULL, NULL) /*!< SVM Classifier (SVC) layer */ LAYER_ENTRY(SVC, LAYER_ID(37), svc, forward_svc, NULL, NULL) /*!< ZipMap layer */ LAYER_ENTRY(ZIPMAP, LAYER_ID(38), zipmap, forward_zipmap, NULL, NULL) /*!< Where layer */ LAYER_ENTRY(WHERE, LAYER_ID(39), where, forward_where, NULL, NULL) /*!< LinearClassifier layer */ LAYER_ENTRY(LINEARCLASSIFIER, LAYER_ID(42), linearclassifier, forward_linearclassifier, NULL, NULL) /*!< TreeEnsembleClassifier layer */ LAYER_ENTRY(TREE_ENSEMBLE_CLASSIFIER, LAYER_ID(43), tree_ensemble_classifier, forward_tree_ensemble_classifier, NULL, NULL) /*!< TopK layer */ LAYER_ENTRY(TOPK, LAYER_ID(45), topK, forward_topK, NULL, NULL) /*!< ReduceLogSumExp layer */ LAYER_ENTRY(REDUCE_LOG_SUM_EXP, LAYER_ID(51), reduce_log_sum_exp, forward_reduce_log_sum_exp, NULL, NULL) /*!< ReduceL1 layer */ LAYER_ENTRY(REDUCE_L1, LAYER_ID(52), reduce_l1, forward_reduce_l1, NULL, NULL) /*!< Runtime Lite Graph Wrapper layer */ LAYER_ENTRY(LITE_GRAPH, LAYER_ID(63), lite_graph, NULL, NULL, NULL) /*!< TreeEnsembleRegressor layer */ LAYER_ENTRY(TREE_ENSEMBLE_REGRESSOR, LAYER_ID(66), tree_ensemble_regressor, forward_tree_ensemble_regressor, NULL, NULL) /*!< Deeply Quantized Dense Layers */ LAYER_ENTRY(CONV2D_DQNN, LAYER_ID(40), conv2d_dqnn, forward_pw_is1os1ws1_bn, NULL, NULL) LAYER_ENTRY(POOL_DQNN, LAYER_ID(41), pool_dqnn, forward_maxpool_is1os1, NULL, NULL) LAYER_ENTRY(DENSE_DQNN, LAYER_ID(44), dense_dqnn, forward_dense_is1os1ws1, NULL, NULL) /*!< Reverse layer */ LAYER_ENTRY(REVERSE, LAYER_ID(50), reverse, forward_reverse, NULL, NULL) /*****************************************************************************/ /*!< Base Stateful Layer type */ LAYER_ENTRY(STATEFUL, LAYER_STATEFUL_ID(0), stateful, NULL, NULL, NULL) /*!< Long Short Time Memory layer */ LAYER_ENTRY(LSTM, LAYER_STATEFUL_ID(1), lstm, forward_lstm, init_lstm, destroy_lstm) /*!< Custom layer */ LAYER_ENTRY(CUSTOM, LAYER_STATEFUL_ID(2), custom, NULL, NULL, NULL) /*!< Gated Recurrent Unit layer */ LAYER_ENTRY(GRU, LAYER_STATEFUL_ID(3), gru, forward_gru, init_gru, destroy_gru) /*!< Stateless Template layer declaration */ /* LAYER_ENTRY(TEMPLATE, LAYER_ID(XX), template, forward_template, NULL, NULL) */ /*!< Stateful Template layer declaration */ /* LAYER_ENTRY(TEMPLATE, LAYER_STATEFUL_ID(XX), template, forward_template, init_template, destroy_template) */ #undef LAYER_ENTRY #undef LAYER_ID #undef LAYER_STATEFUL_ID
7,821
C
45.838323
123
0.671781
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_pad_generic.h
/** ****************************************************************************** * @file lite_pad_generic.h * @author AIS * @brief header file of AI platform lite padding kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2022 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_PADDING_DQNN_H #define LITE_PADDING_DQNN_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles padding with 8 bits input/output in constant mode - Lite I/F * Channel 1st Format Input and Output * @ingroup lite_padding_dqnn */ LITE_API_ENTRY void forward_lite_pad_8bit_ch1st_3x3_constant(ai_ptr_const in_data_tensor, ai_ptr out_data_tensor, const ai_handle fill_value, const ai_i32 height_in, const ai_i32 channel_in, const ai_ptr_offset ch_stride_in, const ai_ptr_offset h_stride_in, const ai_ptr_offset h_stride_pad); /*! * @brief Handles padding with 8 bits input/output in constant mode - Lite I/F * @ingroup lite_padding_dqnn */ LITE_API_ENTRY void forward_lite_pad_constant(ai_ptr_const in_data, ai_ptr out_data, const ai_handle fill_value, const ai_i16 in_bits, const ai_i32 height_in, const ai_ptr_offset ch_stride_in, const ai_ptr_offset h_stride_in, const ai_ptr_offset h_stride_pad, const ai_ptr_offset h_stride_pad_b, const ai_ptr_offset w_stride_pad, const ai_ptr_offset w_stride_pad_r); /*! * @brief Handles padding with 8 bits input/output in edge mode - Lite I/F * @ingroup lite_padding_dqnn */ void forward_lite_pad_edge(ai_ptr_const in_data_tensor, ai_ptr out_data, const ai_i32 height_in, const ai_i16 pads_y, const ai_i16 pads_x_r, const ai_ptr_offset h_stride_in, const ai_ptr_offset w_stride_in, const ai_ptr_offset h_stride_out, const ai_ptr_offset h_stride_pad, const ai_ptr_offset w_stride_pad, const ai_ptr_offset h_stride_pad_b); /*! * @brief Handles padding with 8 bits input/output in reflect mode - Lite I/F * @ingroup lite_padding_dqnn */ void forward_lite_pad_reflect(ai_ptr_const in_data, ai_ptr out_data, const ai_i32 depth, const ai_i32 height_in, const ai_i32 width_in, const ai_i32 height_out, const ai_i32 width_out, const ai_ptr_offset h_stride_in, const ai_ptr_offset w_stride_in, const ai_ptr_offset h_stride_out, const ai_ptr_offset w_stride_out, const ai_i16 pads_x, const ai_i16 pads_y, const ai_i16 pads_y_b, const ai_ptr_offset h_stride_pad, const ai_ptr_offset w_stride_pad, const ai_ptr_offset w_stride_pad_r); #endif /*LITE_PADDING_GENERIC_H*/
4,546
C
43.145631
80
0.421909
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/ai_datatypes_format.h
/** ****************************************************************************** * @file ai_datatypes_format.h * @author AST Embedded Analytics Research Platform * @brief Definitions of AI platform private format handling routines ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_DATATYPES_FORMAT_H #define AI_DATATYPES_FORMAT_H #pragma once #include "ai_platform.h" #include "ai_datatypes_defines.h" // #include "core_datatypes.h" /*! * @defgroup ai_datatypes_format Definiton and Macro of array and buffer formats * @brief Type definition and implementation of internal @ref ai_array and * @ref ai_buffer formats. * @details The library handles 2 different kind of formats: an internal format * that is part of the @ref ai_array struct that is a packed 32bit representation * of the format attributes, and a public format (used in public APIs) associated * with @ref ai_buffer struct , defined as enum in @ref ai_platform.h, * that is just an enum type. Converters are provided in this header file to * convert from one format representation to another. * Some MSB bits are reserved in both formats to code some bit flag useful to * declare some special attribute. Three flags are actually implemented in both * formats: the @ref AI_BUFFER_FMT_FLAG_CONST and @ref AI_FMT_FLAG_CONST used * to tag read-only memory buffers, @ref AI_BUFFER_FMT_FLAG_STATIC and * @ref AI_FMT_FLAG_STATIC to mark statically allocated memory buffers and * @ref AI_FMT_FLAG_SCRATCH_BUFFER to tag temporary scratch buffers. * All the formats are declared in a proper tuple organize table header named * @ref format_lists.h that enumerates all the formats available for the library. * A new format could be added easily by adding a new FMY_ENTRY() as required. * The preprocessor automatically generates the code for the handling of the * format according to this tuples entry. A rational for the methodology could * be found here: * - https://codecraft.co/2012/10/29/how-enums-spread-disease-and-how-to-cure-it/ * * The 32bits internal format fields are organized as follows: * * MSB LSB * 31 25 24 23 21 17 14 7 0 * /---------------------------------------------------------------------------/ * / ATTR. FLAGS | FLOAT | SIGN | LDIV | TYPE | PMASK | BITS | FBITS / * /---------------------------------------------------------------------------/ * Where: * - FLAGS: is the reserved bits to store additional format attributes (e.g. * I/O / STATIC flags. etc.) * - FLOAT: 1 bit mark the format as floating point type * - SIGN : 1 bit mark the format as signed type * - LDIV : 2 bits is a log2 value that is used to compute elements size * with some special format such as the compressed ones. It is a shift * factor usually set to zero * - TYPE : 4 bits mark the format "family" type. Actually 5 families are coded, * @ref AI_FMT_FLOAT (float types) * @ref AI_FMT_Q (fixed-point types in Qm.n format) * @ref AI_FMT_BOOL (boolean type) * @ref AI_FMT_LUT4 (compressed lookup 16 formats) * @ref AI_FMT_LUT8 (compressed lookup 256 formats) * - PMASK 3 bits padding mask used to set the optional dimension for padding * to handle special aligned formats/ E.g. a 1 bit format * Usually this is set to 0x0 * - BITS 7 bits set the total number of bits of the element, padding bits * excluded. The bits are thus = sign bit + fractional bits + integer bits * The number of integer bits could thus be known using the @ref * AI_FMT_GET_IBITS() macro. * - FBITS 7 bits set the number of fractional bits in the format * * * A reference code snippet for usage is the test unit that uses this header: * * \include test/test_lcut_formats.cpp * */ /*! * Format bitfields definition. NOTE: 7 MSB are masked off * for (optional) atributes setting using flags. see @ref AI_FMT_FLAG_CONST that * is used for marking a data as constant readonly */ /* 1 bit field to identify floating point values*/ #define _FMT_FLOAT_MASK (0x1) #define _FMT_FLOAT_BITS (24) /*! 1 bit sign info */ #define _FMT_SIGN_MASK (0x1) #define _FMT_SIGN_BITS (23) /*! fractional bits field (i.e. for Q formats see @ref AI_FMT_Q) */ #define _FMT_FBITS_MASK (0x7F) #define _FMT_FBITS_BITS (0) #define _FMT_FBITS_BIAS ((_FMT_FBITS_MASK+1) >> 1) /*! TOTAL number of bits (fractional+integer+sign) (excluded padding ones) */ #define _FMT_BITS_MASK (0x7F) #define _FMT_BITS_BITS (7) #define _FMT_BITS_BIAS (0) /*! Padding bits for handling formats not aligned to multiples of 8 bits */ #define _FMT_PMASK_MASK (0x7) #define _FMT_PMASK_BITS (14) /*! bits reserved for identifying the family format, e.g. float, fixed-point..*/ #define _FMT_TYPE_MASK (0xF) #define _FMT_TYPE_BITS (17) #define _FMT_LDIV_MASK (0x3) #define _FMT_LDIV_BITS (21) /******************************************************************************/ #define AI_FMT_OBJ(fmt_) ((ai_array_format)(fmt_)) /*! * Only 25 LSB bits are used for storing actual format bits. 7 bits are reserved * for format attributes, see @ref AI_FMT_FLAG_CONST flag */ #define AI_FMT_FLAG_BITS (25) #define AI_FMT_MASK ((0x1<<AI_FMT_FLAG_BITS)-1) #define AI_FMT_FLAG_CONST (0x1<<30) #define AI_FMT_FLAG_STATIC (0x1<<29) #define AI_FMT_FLAG_SCRATCH_BUFFER (0x1<<28) #define AI_FMT_FLAG_IS_IO (0x1<<27) #define AI_FMT_FLAG_VISITED (0x1<<26) /******************************************************************************/ /*! * Format "Class" type : this identify the family of the format: * float, integer, fixed point (i.e. Q format), compressed via lookup table */ #define AI_FMT_NONE (0x0) #define AI_FMT_FLOAT (0x1) #define AI_FMT_Q (0x2) #define AI_FMT_BOOL (0x3) #define AI_FMT_LUT4 (0x4) #define AI_FMT_LUT8 (0x8) #define AI_FMT_QMASK \ ( (_FMT_FBITS_MASK<<_FMT_FBITS_BITS) | \ (_FMT_BITS_MASK<<_FMT_BITS_BITS) | \ (_FMT_PMASK_MASK<<_FMT_PMASK_BITS) ) #define AI_FMT_BINARY_MASK \ (AI_FMT_MASK & (~(_FMT_SIGN_MASK<<_FMT_SIGN_BITS))) #define AI_FMT_IS_BINARY(val_) \ (((val_) & AI_FMT_BINARY_MASK) == AI_ARRAY_FORMAT_U1) #define AI_FMT_GET(val_) \ ( (AI_FMT_OBJ(val_)) & AI_FMT_MASK ) #define AI_FMT_MASK_Q(val_) \ ( AI_FMT_OBJ(val_) & (~(AI_FMT_QMASK)) ) #define AI_FMT_GET_Q(val_) \ ( AI_FMT_MASK_Q(val_) | AI_FMT_SET_BITS(0) | AI_FMT_SET_FBITS(0) ) #define AI_FMT_GET_FLAGS(val_) \ ( ((AI_FMT_OBJ(val_)) & (~AI_FMT_MASK)) >> AI_FMT_FLAG_BITS ) #define AI_FMT_SAME(fmt1_, fmt2_) \ ( AI_FMT_GET(fmt1_) == AI_FMT_GET(fmt2_) ) #define _FMT_SET(val, mask, bits) AI_FMT_OBJ(((val)&(mask))<<(bits)) #define _FMT_GET(fmt, mask, bits) ((AI_FMT_OBJ(fmt)>>(bits))&(mask)) #define AI_FMT_SET_FLOAT(val) _FMT_SET(val, _FMT_FLOAT_MASK, _FMT_FLOAT_BITS) #define AI_FMT_GET_FLOAT(fmt) _FMT_GET(fmt, _FMT_FLOAT_MASK, _FMT_FLOAT_BITS) #define AI_FMT_SET_SIGN(val) _FMT_SET(val, _FMT_SIGN_MASK, _FMT_SIGN_BITS) #define AI_FMT_GET_SIGN(fmt) _FMT_GET(fmt, _FMT_SIGN_MASK, _FMT_SIGN_BITS) #define AI_FMT_SET_PMASK(val) _FMT_SET(val, _FMT_PMASK_MASK, _FMT_PMASK_BITS) #define AI_FMT_GET_PMASK(fmt) _FMT_GET(fmt, _FMT_PMASK_MASK, _FMT_PMASK_BITS) #define AI_FMT_SET_TYPE(val) _FMT_SET(val, _FMT_TYPE_MASK, _FMT_TYPE_BITS) #define AI_FMT_GET_TYPE(fmt) _FMT_GET(fmt, _FMT_TYPE_MASK, _FMT_TYPE_BITS) #define AI_FMT_SET_LDIV(val) _FMT_SET(val, _FMT_LDIV_MASK, _FMT_LDIV_BITS) #define AI_FMT_GET_LDIV(fmt) _FMT_GET(fmt, _FMT_LDIV_MASK, _FMT_LDIV_BITS) #define AI_FMT_SET_BITS(val) \ _FMT_SET((val) + _FMT_BITS_BIAS, _FMT_BITS_MASK, _FMT_BITS_BITS) #define AI_FMT_GET_BITS(fmt) \ ((ai_i8)_FMT_GET(fmt, _FMT_BITS_MASK, _FMT_BITS_BITS) - _FMT_BITS_BIAS) #define AI_FMT_SET_FBITS(val) \ _FMT_SET((val) + _FMT_FBITS_BIAS, _FMT_FBITS_MASK, _FMT_FBITS_BITS) #define AI_FMT_GET_FBITS(fmt) \ ((ai_i8)_FMT_GET(fmt, _FMT_FBITS_MASK, _FMT_FBITS_BITS) - _FMT_FBITS_BIAS) /*! * The total number of bits for a given format is supposed to be the sum of the * bits + padding bits. This means that the number of integer bits is derived * as follow: int_bits = bits - fbits (fractional bits) - 1 (for the sign) */ #define AI_FMT_GET_BITS_SIZE(fmt_) \ AI_FMT_GET_BITS(fmt_) /*! Macro used to compute the integer bits for a format */ #define AI_FMT_GET_IBITS(fmt_) \ ((ai_i16)AI_FMT_GET_BITS(fmt_)-AI_FMT_GET_FBITS(fmt_)-AI_FMT_GET_SIGN(fmt_)) /*! ai_buffer format handlers section *****************************************/ #define AI_BUFFER_FMT_MASK_Q(fmt_) \ ( AI_BUFFER_FMT_OBJ(fmt_) & 0xFFFFC000 ) #define AI_BUFFER_FMT_GET_Q(fmt_) \ ( AI_BUFFER_FMT_MASK_Q(fmt_) | AI_BUFFER_FMT_SET_FBITS(0) | \ AI_BUFFER_FMT_SET_FBITS(0) ) #define AI_BUFFER_FMT_SET_Q(bits_, fbits_) \ AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, bits_, fbits_) #define AI_BUFFER_FMT_IS_Q(fmt_) \ ( (AI_BUFFER_FMT_TYPE_Q==AI_BUFFER_FMT_GET_TYPE(fmt_)) && \ (1==AI_BUFFER_FMT_GET_SIGN(fmt_)) ) #define AI_BUFFER_FMT_SET_UQ(bits_, fbits_) \ AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, bits_, fbits_) #define AI_BUFFER_FMT_IS_UQ(fmt_) \ ( (AI_BUFFER_FMT_TYPE_Q==AI_BUFFER_FMT_GET_TYPE(fmt_)) && \ (0==AI_BUFFER_FMT_GET_SIGN(fmt_)) ) /*! Q ai_array format handlers ************************************************/ #define AI_ARRAY_FMT_Q(bits_, fbits_) \ ( AI_FMT_MASK_Q(AI_ARRAY_FORMAT_Q) | AI_FMT_SET_BITS(bits_) | AI_FMT_SET_FBITS(fbits_) ) #define AI_ARRAY_FMT_SET_Q(bits_, fbits_) \ AI_ARRAY_FMT_Q(bits_, fbits_) #define AI_ARRAY_FMT_IS_Q(fmt_) \ ( AI_FMT_GET(AI_FMT_MASK_Q(AI_ARRAY_FORMAT_Q))==AI_FMT_GET(AI_FMT_MASK_Q(fmt_)) ) #define AI_ARRAY_FMT_UQ(bits_, fbits_) \ ( AI_FMT_MASK_Q(AI_ARRAY_FORMAT_UQ) | AI_FMT_SET_BITS(bits_) | AI_FMT_SET_FBITS(fbits_) ) #define AI_ARRAY_FMT_SET_UQ(bits_, fbits_) \ AI_ARRAY_FMT_UQ(bits_, fbits_) #define AI_ARRAY_FMT_IS_UQ(fmt_) \ ( AI_FMT_GET(AI_FMT_MASK_Q(AI_ARRAY_FORMAT_UQ))==AI_FMT_GET(AI_FMT_MASK_Q(fmt_)) ) AI_DEPRECATED /* Alias for AI_ARRAY_FMT_SET_Q */ #define AI_ARRAY_FMT_SET_SQ(bits_, fbits_) \ AI_ARRAY_FMT_SET_Q(bits_, fbits_) AI_DEPRECATED /* Alias for AI_ARRAY_FMT_IS_Q */ #define AI_ARRAY_FMT_IS_SQ(fmt_) \ AI_ARRAY_FMT_IS_Q(fmt_) /*! ai_array section **********************************************************/ #define AI_ARRAY_FMT_ENTRY(name_) \ AI_CONCAT(AI_ARRAY_FORMAT_, name_) #define AI_ARRAY_FMT_NAME(fmt_) \ ai_array_fmt_name(fmt_) #define AI_ARRAY_FMT_VALID(fmt_) \ ai_array_fmt_valid(fmt_) #define AI_ARRAY_FMT_EXPORTED(fmt_) \ ai_array_fmt_exported(fmt_) #define AI_ARRAY_FMT_GET_FORMATS(formats_) \ ai_array_fmt_get_formats(formats_) #define AI_ARRAY_TO_BUFFER_FMT(fmt_) \ ai_array_to_buffer_fmt(fmt_) #define AI_ARRAY_GET_BYTE_SIZE(fmt_, count_) \ ai_array_get_byte_size(fmt_, count_) #define AI_ARRAY_GET_DATA_BYTE_SIZE(fmt_, count_) \ ai_array_get_data_byte_size(fmt_, count_) #define AI_ARRAY_GET_ELEMS_FROM_SIZE(fmt_, size_) \ ai_array_get_elems_from_size(fmt_, size_) AI_API_DECLARE_BEGIN /*! * @typedef ai_array_format * @ingroup ai_datatypes_format * @brief Generic Data Format Specifier for @ref ai_array (32bits packed info) */ typedef int32_t ai_array_format; /*! * @enum internal data format enums * @ingroup ai_datatypes_format * @brief Generic Data Format Specifier (32bits packed info) */ typedef enum { #define FMT_ENTRY(exp_, name_, type_id_, sign_bit_, float_bit_, \ pmask_, bits_, fbits_, ldiv_bits_) \ AI_ARRAY_FMT_ENTRY(name_) = (AI_FMT_SET_FLOAT(float_bit_) | \ AI_FMT_SET_SIGN(sign_bit_) | \ AI_FMT_SET_BITS(bits_) | \ AI_FMT_SET_FBITS(fbits_) | \ AI_FMT_SET_PMASK(pmask_) | \ AI_FMT_SET_TYPE(type_id_) | \ AI_FMT_SET_LDIV(ldiv_bits_)), #include "formats_list.h" } ai_array_format_entry; /*! * @brief Get a human readable string from the format ID value * @ingroup ai_datatypes_format * @param[in] type the @ref ai_array_format to print out * @return a string with a human readable name of the format */ AI_INTERNAL_API const char* ai_array_fmt_name(const ai_array_format type); /*! * @brief Check if @ref ai_array_format is a exportable to an @ref ai_buffer_format * @ingroup ai_datatypes_format * @param[in] type the ai_array_format to check * @return true if the format is exported, false otherwise */ AI_INTERNAL_API ai_bool ai_array_fmt_exported(const ai_array_format type); /*! * @brief Check if @ref ai_array_format is a valid format present in the list of * supported formats * @ingroup ai_datatypes_format * @param[in] type the ai_array_format to check * @return true if the format is valid, false otherwise */ AI_INTERNAL_API ai_bool ai_array_fmt_valid(const ai_array_format type); /*! * @brief Get the complete list of supported @ref ai_array_format formats * @ingroup ai_datatypes_format * @param[out] formats a pointer to an array withj all supported formats listed * @return the number of supported formats */ AI_INTERNAL_API ai_size ai_array_fmt_get_formats(const ai_array_format** formats); /*! ai_buffer section ********************************************************* * Only 25 LSB bits are used for storing actual format bits. 7 bits are reserved * for format atrtributes, see @ref AI_FMT_FLAG_CONST flag */ #define AI_BUFFER_FMT_ENTRY(name_) \ AI_CONCAT(AI_BUFFER_FORMAT_, name_) #define AI_BUFFER_FMT_NAME(type_) \ ai_buffer_fmt_name(type_) #define AI_BUFFER_FMT_VALID(type_) \ ai_buffer_fmt_valid(type_) #define AI_BUFFER_FMT_GET_FORMATS(formats_) \ ai_buffer_fmt_get_formats(formats_) #define AI_BUFFER_TO_ARRAY_FMT(fmt_) \ ai_buffer_to_array_fmt(fmt_) #define AI_BUFFER_GET_BITS_SIZE(fmt) \ AI_ARRAY_GET_BITS_SIZE(AI_BUFFER_TO_ARRAY_FMT(fmt)) /*! * @brief Get a human readable string from the format ID value * @ingroup ai_datatypes_format * @param[in] type the @ref ai_buffer_format to print out * @return a string with a human readable name of the format */ AI_INTERNAL_API const char* ai_buffer_fmt_name( const ai_buffer_format type); /*! * @brief Check if @ref ai_buffer_format is a valid format present in the list * of supported formats * @ingroup ai_datatypes_format * @param[in] type the @ref ai_buffer_format to check * @return true if the format is valid, false otherwise */ AI_INTERNAL_API ai_bool ai_buffer_fmt_valid( const ai_buffer_format type); /*! * @brief Get the complete list of supported @ref ai_buffer_format formats * @ingroup ai_datatypes_format * @param[out] formats a pointer to an array with all supported formats listed * @return the number of supported formats */ AI_INTERNAL_API ai_size ai_buffer_fmt_get_formats( const ai_buffer_format** formats); /*! Conversions section *******************************************************/ /*! * @brief Convert from ai_array_format to ai_buffer_format. * @ingroup ai_datatypes_format * @param fmt the input ai_array_format to convert * @return the converted format as a ai_buffer_format */ AI_INTERNAL_API ai_buffer_format ai_array_to_buffer_fmt( const ai_array_format fmt); /*! * @brief Convert from ai_buffer_format to ai_array_format. * @ingroup ai_datatypes_format * @param fmt the input ai_buffer_format to convert * @return the converted format as a ai_array_format */ AI_INTERNAL_API ai_array_format ai_buffer_to_array_fmt( const ai_buffer_format fmt); /** helpers section ***********************************************************/ /*! * @brief Computes the size in bytes given an ai_array_format and number of * array elements. * @details This routine computes from the number of elements of the array its * size in bytes. If the array is referred by a tensor structure, it is the task * of the latter to handle per-dimension padding (e.g. to align odd rows in a * 4-bit matrix. At array level the padding elements MUST be included in the * number of elements. * @ingroup ai_datatypes_format * @param[in] fmt the input array format as an ai_array_format * @param[in] count the number of elements stored in the data array * @return the size in bytes of the array given the specific format and number * of elements (including padding elements) */ AI_INTERNAL_API ai_size ai_array_get_byte_size( const ai_array_format fmt, const ai_size count); /*! * @brief Computes the size in bytes given an ai_array_format and number of * array elements of the data fields (e.g. LUT table size excluded). * @details This routine computes from the number of elements of the array its * size in bytes. If the array is referred by a tensor structure, it is the task * of the latter to handle per-dimension padding (e.g. to align odd rows in a * 4-bit matrix. At array level the padding elements MUST be included in the * number of elements. * @ingroup ai_datatypes_format * @param[in] fmt the input array format as an ai_array_format * @param[in] count the number of elements stored in the data array * @return the size in bytes of the array given the specific format and number * of elements (including padding elements) */ AI_INTERNAL_API ai_size ai_array_get_data_byte_size( const ai_array_format fmt, const ai_size count); /*! * @brief Computes the number of elements from ai_array_format and * the size in byte of the array. * @ingroup ai_datatypes_format * @param fmt the input array format as an ai_array_format * @param size the size in bytes of the array * @return the number of elements that could be stored given the format */ AI_INTERNAL_API ai_size ai_array_get_elems_from_size( const ai_array_format fmt, const ai_size byte_size); AI_API_DECLARE_END #endif /*AI_DATATYPES_FORMAT_H*/
18,640
C
36.965377
91
0.635569
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/core_net_inspect.h
/** ****************************************************************************** * @file core_net_inspect.h * @author AST Embedded Analytics Research Platform * @brief header file of core network inspection APIs ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef __CORE_NET_INSPECT_H_ #define __CORE_NET_INSPECT_H_ #pragma once #include "core_net_inspect_interface.h" #include "core_common.h" #include "layers_common.h" /*! * @defgroup core_net_inspect Core Network Inspection routines * @brief Implementation of core network inspection routines that allows to * inspect on a node basis a generated network model * @details A network context @ref ai_network basically contains a chained list * of nodes @ref ai_node that have an associated forward function. * Each ai)network context and ai_node datastructs have as a required member * field an opaque handler (i.e. a void pointer) to a klass object. * This handler is intended to be used as a platform specific node context * that implements specific target platform routines. * The inspector module basically acts as a plugin that exploiting these features * by temporary creating an hidden inspection context (see * @ref ai_core_inspect_net_klass) associated to the network and * linking it by re-routing the klass field to this inspection context. The * inspection context saves as part of its state (by a stack push operation), the * internal state of the network (all node / network klass pointers and actual * forward functions). * Thus, for each node it re-routes all node's forward functions to a dedicated * inspection forward function (see @ref _forward_inspect_validate() routine) * This routine is the core of the mechanism and it allows to inspect a network * node by node. Some additional inspection could thus be done inside the * _forward_inspect_validate() routine before and after the actual node * forward function is called; * */ AI_API_DECLARE_BEGIN /*! * @defgroup core_net_inspect Network Inspection Core * @brief Implementation of the validation network routines */ /*! * @brief Initialize the network inspection context on a given network * @ingroup core net inspect * @param network opaque handler to the network instance * @param cfg a pointer to the inspector configuration we want to use * @return true if execution of the API is fine, false otherwise */ AI_API_ENTRY ai_bool ai_network_inspect_init( ai_handle network, const ai_inspect_config* cfg); /*! * @brief Get a summary report from the inspected network * @ingroup core net inspect * @param network opaque handler to the network instance * @param report a pointer to the report provided back by the inspection * @return true if execution of the API is fine, false otherwise */ AI_API_ENTRY ai_bool ai_network_inspect_get_report( ai_handle network, ai_inspect_net_report* report); /*! * @brief Destroy the network inspection context on a given network * @ingroup core net inspect * @param network opaque handler to the network instance * @return true if execution of the API is fine, false otherwise */ AI_API_ENTRY ai_bool ai_network_inspect_destroy(ai_handle network); AI_API_DECLARE_END #endif /*__CORE_NET_INSPECT_H_*/
3,780
C
37.581632
81
0.682804
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_nl_generic_integer.h
#ifndef LITE_NL_GENERIC_INTEGER_H #define LITE_NL_GENERIC_INTEGER_H #pragma once #include "ai_lite_interface.h" /** * @brief forward lite function for a s8 softmax non-linearity where the softmax is applied per channel. * @ingroup lite_nl_generic_integer * @param output The pointer to output buffer (s8). * @param input The pointer to input buffer (s8). * @param in_size. The size of the input (including channels). * @param ch_size The nsize of each channel. * @param in_ch_step The step between consecutive elements (inputs) * @param out_ch_step The step between consecutive elements (outputs) * @param mult * @param shift * @param min_diff */ LITE_API_ENTRY void forward_lite_nl_softmax_is8os8( ai_i8* out_ptr, const ai_i8* in_ptr, const ai_size in_size, const ai_size ch_size, const ai_i32 in_ch_step, const ai_i32 out_ch_step, const ai_i32 mult, const ai_i32 shift, const ai_i32 min_diff, ai_i32* scratch); /** * @brief forward lite function for a u8 softmax non-linearity where the softmax is applied per channel. * @ingroup lite_nl_generic_integer * @param output The pointer to output buffer (s8). * @param input The pointer to input buffer (s8). * @param in_size. The size of the input (including channels). * @param ch_size The nsize of each channel. * @param in_ch_step The step between consecutive elements (inputs) * @param out_ch_step The step between consecutive elements (outputs) * @param mult * @param shift * @param min_diff */ LITE_API_ENTRY void forward_lite_nl_softmax_iu8ou8( ai_u8* out_ptr, const ai_u8* in_ptr, const ai_size in_size, const ai_size ch_size, const ai_i32 in_ch_step, const ai_i32 out_ch_step, const ai_i32 mult, const ai_i32 shift, const ai_i32 min_diff, ai_i32* scratch); #endif /* LITE_NL_GENERIC_INTEGER_H */
1,815
C
34.607842
104
0.713499
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/ai_datatypes.h
/** ****************************************************************************** * @file ai_datatypes.h * @author AST Embedded Analytics Research Platform * @brief Definitions of AI platform private APIs types ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_DATATYPES_H #define AI_DATATYPES_H #pragma once #include <string.h> #include "ai_platform.h" #include "ai_platform_interface.h" /*! * @defgroup datatypes Platform Interface Datatypes * @brief Data structures used by AI platform to implement neural networks * */ /** Count Variable Number of Arguments (up to 64 elements) *******************/ #define AI_NUMARGS(...) \ PP_NARG_(__VA_ARGS__,PP_RSEQ_N()) #define PP_NARG_(...) \ PP_ARG_N(__VA_ARGS__) #define PP_ARG_N( \ _1, _2, _3, _4, _5, _6, _7, _8, _9,_10, \ _11,_12,_13,_14,_15,_16,_17,_18,_19,_20, \ _21,_22,_23,_24,_25,_26,_27,_28,_29,_30, \ _31,_32,_33,_34,_35,_36,_37,_38,_39,_40, \ _41,_42,_43,_44,_45,_46,_47,_48,_49,_50, \ _51,_52,_53,_54,_55,_56,_57,_58,_59,_60, \ _61,_62,_63,N,...) N #define PP_RSEQ_N() \ 63,62,61,60, \ 59,58,57,56,55,54,53,52,51,50, \ 49,48,47,46,45,44,43,42,41,40, \ 39,38,37,36,35,34,33,32,31,30, \ 29,28,27,26,25,24,23,22,21,20, \ 19,18,17,16,15,14,13,12,11,10, \ 9,8,7,6,5,4,3,2,1,0 /*****************************************************************************/ #define AI_PTR_ALIGN(ptr, alignment) \ ((((ai_uptr)(ptr))+((ai_uptr)(alignment)-1))&(~((ai_uptr)(alignment)-1))) /*! * @typedef ai_offset * @ingroup ai_datatypes_internal * @brief Generic index offset type */ typedef int32_t ai_offset; AI_API_DECLARE_BEGIN AI_API_DECLARE_END #endif /* AI_DATATYPES_H */
2,349
C
29.51948
80
0.473819
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/ai_common_config.h
/** ****************************************************************************** * @file ai_common_config.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform common compile configuration defines ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_COMMON_CONFIG_H #define AI_COMMON_CONFIG_H #pragma once /*! * @defgroup layers Layers Compilation Config Definitions * @brief definition * */ #define HAS_PROFILE_FLOAT #define HAS_PROFILE_FIXED #endif /*AI_COMMON_CONFIG_H*/
1,070
C
28.749999
80
0.495327
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_pw_dqnn.h
/** ****************************************************************************** * @file lite_pw_dqnn.h * @author AIS * @brief header file of AI platform lite pw kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_PW_DQNN_H #define LITE_PW_DQNN_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles point wise convolution with binary input, binary output and * binary weights - Lite API version * @ingroup lite_pw_dqnn */ LITE_API_ENTRY void forward_lite_pw_is1os1ws1_bn(const ai_u32 *pDataIn_init, ai_u32 *pDataOut_init, const ai_u32 *pWeights_init, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 *pThreshold); /*! * @brief Handles point wise convolution with binary input, binary output and * binary weights - Lite API version - Optimized thanks to Optim2 * assumptions * @ingroup lite_pw_dqnn */ LITE_API_ENTRY void forward_lite_pw_is1os1ws1_bn_optim2(const ai_u32 *pDataIn_init, ai_u32 *pDataOut_init, const ai_u32 *pWeights_init, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_out, const ai_i32 height_out, const ai_i32 *pThreshold); /*! * @brief Handles point wise convolution with binary input, 8-bits output and * binary weights - Lite API version * @ingroup lite_pw_dqnn */ LITE_API_ENTRY void forward_lite_pw_is1os8ws1_bn(const ai_u32 *pDataIn_init, ai_i8 *pDataOut_init, const ai_u32 *pWeights_init, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_out, const ai_i32 height_out, const ai_float *pScale, const ai_float *pOffset); /*! * @brief Handles point wise convolution with binary input, 8-bits output and * binary weights - Lite API version - Optimized thanks to Optim1 * assumptions * @ingroup lite_pw_dqnn */ LITE_API_ENTRY void forward_lite_pw_is1os8ws1_bn_optim1(const ai_u32 *pDataIn_init, ai_i8 *pDataOut_init, const ai_u32 *pWeights_init, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_out, const ai_i32 height_out, const ai_float *pScale, const ai_float *pOffset); /*! * @brief Handles point-wise convolution with binary input, float32 output * and binary weights - Lite API version * @ingroup lite_pw_dqnn */ LITE_API_ENTRY void forward_lite_pw_is1of32ws1_bn(const ai_u32 *pDataIn_init, ai_float *pDataOut_init, const ai_u32 *pWeights_init, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_out, const ai_i32 height_out, const ai_float *pScale, const ai_float *pOffset); /*! * @brief Handles point-wise convolution with binary input, float32 output * and binary weights - Lite API version - Optimized thanks to Optim1 * assumptions * @ingroup lite_pw_dqnn */ LITE_API_ENTRY void forward_lite_pw_is1of32ws1_bn_optim1(const ai_u32 *pDataIn_init, ai_float *pDataOut_init, const ai_u32 *pWeights_init, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 width_out, const ai_i32 height_out, const ai_float *pScale, const ai_float *pOffset); #endif /*LITE_PW_DQNN_H*/
5,632
C
42
80
0.430753
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/ai_platform.h
/** ****************************************************************************** * @file ai_platform.h * @author AST Embedded Analytics Research Platform * @brief Definitions of AI platform public APIs types ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_PLATFORM_H #define AI_PLATFORM_H #pragma once #include <stdint.h> #include <stddef.h> #include <inttypes.h> #ifndef AI_PLATFORM_API_MAJOR #define AI_PLATFORM_API_MAJOR (1) #endif #ifndef AI_PLATFORM_API_MINOR #define AI_PLATFORM_API_MINOR (2) #endif #ifndef AI_PLATFORM_API_MICRO #define AI_PLATFORM_API_MICRO (0) #endif #define AI_PLATFORM_API_VERSION \ AI_VERSION(AI_PLATFORM_API_MAJOR, \ AI_PLATFORM_API_MINOR, \ AI_PLATFORM_API_MICRO) #ifndef AI_TOOLS_API_VERSION_MAJOR #define AI_TOOLS_API_VERSION_MAJOR (1) #endif #ifndef AI_TOOLS_API_VERSION_MINOR #define AI_TOOLS_API_VERSION_MINOR (5) #endif #ifndef AI_TOOLS_API_VERSION_MICRO #define AI_TOOLS_API_VERSION_MICRO (0) #endif /*****************************************************************************/ #define AI_TOOLS_API_VERSION \ AI_VERSION(AI_TOOLS_API_VERSION_MAJOR, \ AI_TOOLS_API_VERSION_MINOR, \ AI_TOOLS_API_VERSION_MICRO) #define AI_TOOLS_API_VERSION_1_3 \ AI_VERSION(1, 3, 0) #define AI_TOOLS_API_VERSION_1_4 \ AI_VERSION(1, 4, 0) #define AI_TOOLS_API_VERSION_1_5 \ AI_VERSION(1, 5, 0) /*****************************************************************************/ #ifdef __cplusplus #define AI_API_DECLARE_BEGIN extern "C" { #define AI_API_DECLARE_END } #else #include <stdbool.h> #define AI_API_DECLARE_BEGIN /* AI_API_DECLARE_BEGIN */ #define AI_API_DECLARE_END /* AI_API_DECLARE_END */ #endif /*****************************************************************************/ #define AI_FLAG_NONE (0x0) /*****************************************************************************/ AI_API_DECLARE_BEGIN /*! * @typedef ai_flags * @ingroup ai_platform * @brief bitmask for flags management */ typedef uint32_t ai_flags; /*****************************************************************************/ #define AI_CONCAT_ARG(a, b) a ## b #define AI_CONCAT(a, b) AI_CONCAT_ARG(a, b) /*! AI_CAST SECTION ***********************************/ #define AI_CAST(type_, expr_) ((type_)(expr_)) /*****************************************************************************/ #define AI_MAGIC_SIGNATURE \ (0xa1facade) #define AI_PACK(...) \ __VA_ARGS__ /*****************************************************************************/ #define AI_SHAPE_BCWH (0x01u) /*! * @typedef ai_shape_dimension * @ingroup ai_platform * @brief shape dimension type to be used in shape related structs @ref ai_buffer_shape */ typedef uint32_t ai_shape_dimension; /*****************************************************************************/ #if defined(_MSC_VER) #define AI_API_ENTRY __declspec(dllexport) #define AI_ALIGNED(x) /* AI_ALIGNED(x) */ #elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__) #define AI_API_ENTRY /* AI_API_ENTRY */ #define AI_ALIGNED(x) AI_CONCAT(AI_ALIGNED_,x) #define AI_ALIGNED_1 _Pragma("data_alignment = 1") #define AI_ALIGNED_2 _Pragma("data_alignment = 2") #define AI_ALIGNED_4 _Pragma("data_alignment = 4") #define AI_ALIGNED_8 _Pragma("data_alignment = 8") #define AI_ALIGNED_16 _Pragma("data_alignment = 16") #define AI_ALIGNED_32 _Pragma("data_alignment = 32") #elif defined(__CC_ARM) #define AI_API_ENTRY __attribute__((visibility("default"))) #define AI_ALIGNED(x) __attribute__((aligned (x))) /* Keil disallows anonymous union initialization by default */ #pragma anon_unions #elif defined(__GNUC__) //#define AI_API_ENTRY __attribute__((visibility("default"))) #define AI_API_ENTRY /* AI_API_ENTRY */ #define AI_ALIGNED(x) __attribute__((aligned(x))) #else /* Dynamic libraries are not supported by the compiler */ #define AI_API_ENTRY /* AI_API_ENTRY */ #define AI_ALIGNED(x) /* AI_ALIGNED(x) */ #endif #define AI_HANDLE_PTR(ptr_) ((ai_handle)(ptr_)) #define AI_HANDLE_NULL AI_HANDLE_PTR(NULL) #define AI_HANDLE_FUNC_PTR(func) ((ai_handle_func)(func)) #define AI_UNUSED(x) (void)(x); #define AI_DEPRECATED /* AI_DEPRECATED */ #define AI_LEGACY /* AI_LEGACY */ #define AI_MAGIC_MARKER (0xA1FACADE) #if defined(__cplusplus) #define AI_STRUCT_INIT {} #define AI_C_ARRAY_INIT {} #else #define AI_STRUCT_INIT {0} #define AI_C_ARRAY_INIT {0} #endif #define AI_ERROR_FMT AIU32_FMT #define AI_IS_UNSIGNED(type) \ ((((type)0) - 1) > 0) #define AI_CUSTOM_SIZE(type) \ (ai_custom_type_signature)((AI_IS_UNSIGNED(type)) \ ? (0x80|(sizeof(type)&0x7f)) : (sizeof(type)&0x7f)) /*! network buffers struct handlers *******************************************/ #ifdef __cplusplus #define AI_NETWORK_PARAMS_INIT(params_, activations_) \ { \ {{ params_, activations_ }} \ } #define AI_NETWORK_BUFFERS_INIT(weights_buffers_, activations_buffers_) \ { \ AI_MAGIC_SIGNATURE, AI_PACK(weights_buffers_), AI_PACK(activations_buffers_) \ } #else #define AI_NETWORK_PARAMS_INIT(params_, activations_) \ { \ .params = params_, \ .activations = activations_ \ } #define AI_NETWORK_BUFFERS_INIT(weights_buffers_, activations_buffers_) \ { \ .map_signature = AI_MAGIC_SIGNATURE, \ .map_weights = AI_PACK(weights_buffers_), \ .map_activations = AI_PACK(activations_buffers_) \ } #endif // __cplusplus /*! binary padded bits macro helpers *****************************************/ #define AI_PBITS_MASK \ (0x1F) #define AI_PBITS_SHIFTS \ (5) #define AI_PBITS_PADDED_BYTES_COUNT(bits_) \ (((ai_u32)(bits_) + 7) >> 3) #define AI_PBITS_PADDED_WORDS_COUNT(bits_) \ (((ai_size)(bits_) + AI_PBITS_MASK) >> AI_PBITS_SHIFTS) #define AI_PBITS_GET_WORD(word_ptr_, bits_) \ (((ai_pbits*)(word_ptr_)) + ((bits_) >> AI_PBITS_SHIFTS)) #define AI_PAD_CHANNELS(format_, channels_) \ ((AI_BUFFER_FMT_GET_BITS(format_)==1) ? (AI_PBITS_PADDED_WORDS_COUNT(channels_) << AI_PBITS_SHIFTS) : (channels_)) /*! ai_intq_info struct handlers *********************************************/ #define INTQ_CONST const // #define INTQ_CONST #define AI_INTQ_INFO_LIST(list_) \ ((list_)->info) #define AI_INTQ_INFO_LIST_FLAGS(list_) \ ((list_) ? (list_)->flags : 0) #define AI_INTQ_INFO_LIST_SIZE(list_) \ ((list_) ? (list_)->size : 0) #define AI_HAS_INTQ_INFO_LIST(list_) \ ((list_) ? (((list_)->info) && ((list_)->size>0)) : false) #define AI_INTQ_INFO_LIST_SCALE(list_, type_, pos_) \ (((list_) && (list_)->info && ((pos_)<(list_)->size)) \ ? ((type_*)((list_)->info->scale))[(pos_)] : 0) #define AI_INTQ_INFO_LIST_ZEROPOINT(list_, type_, pos_) \ (((list_) && (list_)->info && ((pos_)<(list_)->size)) \ ? ((type_*)((list_)->info->zeropoint))[(pos_)] : 0) /*! ai_buffer format handlers ************************************************/ /*! * @enum buffer format definition * @ingroup ai_platform * * 32 bit signed format list. */ typedef int32_t ai_buffer_format; /*! ai_buffer_meta flags & macros ********************************************/ #define AI_BUFFER_META_HAS_INTQ_INFO (0x1U << 0) #define AI_BUFFER_META_FLAG_SCALE_FLOAT (0x1U << 0) #define AI_BUFFER_META_FLAG_ZEROPOINT_U8 (0x1U << 1) #define AI_BUFFER_META_FLAG_ZEROPOINT_S8 (0x1U << 2) #define AI_BUFFER_META_FLAG_ZEROPOINT_U16 (0x1U << 3) #define AI_BUFFER_META_FLAG_ZEROPOINT_S16 (0x1U << 4) /*! ai_buffer format variable flags & macros *********************************/ #define AI_BUFFER_FMT_TYPE_NONE (0x0) #define AI_BUFFER_FMT_TYPE_FLOAT (0x1) #define AI_BUFFER_FMT_TYPE_Q (0x2) #define AI_BUFFER_FMT_TYPE_BOOL (0x3) #define AI_BUFFER_FMT_FLAG_CONST (0x1U<<30) #define AI_BUFFER_FMT_FLAG_STATIC (0x1U<<29) #define AI_BUFFER_FMT_FLAG_IS_IO (0x1U<<27) #define AI_BUFFER_FMT_FLAG_PERSISTENT (0x1U<<29) #define AI_BUFFER_FMT_PACK(value_, mask_, bits_) \ ( ((value_) & (mask_)) << (bits_) ) #define AI_BUFFER_FMT_UNPACK(fmt_, mask_, bits_) \ ( (AI_BUFFER_FMT_OBJ(fmt_) >> (bits_)) & (mask_) ) #define AI_BUFFER_FMT_OBJ(fmt_) \ ((ai_buffer_format)(fmt_)) #define AI_BUFFER_FMT_GET_FLOAT(fmt_) \ AI_BUFFER_FMT_UNPACK(fmt_, 0x1, 24) #define AI_BUFFER_FMT_GET_SIGN(fmt_) \ AI_BUFFER_FMT_UNPACK(fmt_, 0x1, 23) #define AI_BUFFER_FMT_GET_TYPE(fmt_) \ AI_BUFFER_FMT_UNPACK(fmt_, 0xF, 17) #define AI_BUFFER_FMT_GET_BITS(fmt_) \ AI_BUFFER_FMT_UNPACK(fmt_, 0x7F, 7) #define AI_BUFFER_FMT_SET_BITS(bits_) \ AI_BUFFER_FMT_PACK((bits_), 0x7F, 7) #define AI_BUFFER_FMT_GET_FBITS(fmt_) \ ( (ai_i8)AI_BUFFER_FMT_UNPACK(fmt_, 0x7F, 0) - 64 ) #define AI_BUFFER_FMT_SET_FBITS(fbits_) \ AI_BUFFER_FMT_PACK((fbits_)+64, 0x7F, 0) #define AI_BUFFER_FMT_SET(type_id_, sign_bit_, float_bit_, bits_, fbits_) \ AI_BUFFER_FMT_OBJ( \ AI_BUFFER_FMT_PACK(float_bit_, 0x1, 24) | \ AI_BUFFER_FMT_PACK(sign_bit_, 0x1, 23) | \ AI_BUFFER_FMT_PACK(0, 0x3, 21) | \ AI_BUFFER_FMT_PACK(type_id_, 0xF, 17) | \ AI_BUFFER_FMT_PACK(0, 0x7, 14) | \ AI_BUFFER_FMT_SET_BITS(bits_) | \ AI_BUFFER_FMT_SET_FBITS(fbits_) \ ) #define AI_BUFFER_FMT_SAME(fmt1_, fmt2_) \ ( AI_BUFFER_FMT_GET(fmt1_) == AI_BUFFER_FMT_GET(fmt2_) ) #define AI_BUFFER_FMT_GET(fmt_) \ (AI_BUFFER_FMT_OBJ(fmt_) & 0x01FFFFFF) #define AI_BUFFER_FORMAT(buf_) \ AI_BUFFER_FMT_GET((buf_)->format) /*! * @define shape type index * @ingroup ai_platform * @brief positional ID for generic shapes C structs */ #define AI_SHAPE_EXTENSION (0x5) #define AI_SHAPE_DEPTH (0x4) #define AI_SHAPE_HEIGHT (0x3) #define AI_SHAPE_WIDTH (0x2) #define AI_SHAPE_CHANNEL (0x1) #define AI_SHAPE_IN_CHANNEL (0x0) #define AI_SHAPE_BATCH (0x0) #define AI_SHAPE_TIME (0x0) AI_DEPRECATED #define AI_BUFFER_WIDTH(buf_) \ ((buf_)->shape.data[AI_SHAPE_WIDTH]) AI_DEPRECATED #define AI_BUFFER_HEIGHT(buf_) \ ((buf_)->shape.data[AI_SHAPE_HEIGHT]) AI_DEPRECATED #define AI_BUFFER_CHANNELS(buf_) \ ((buf_)->shape.data[AI_SHAPE_CHANNEL]) AI_DEPRECATED #define AI_BUFFER_N_BATCHES(buf_) \ ((buf_)->shape.data[AI_SHAPE_BATCH]) #define AI_BUFFER_DATA(buf_, type_) \ ((type_*)((buf_)->data)) #define AI_BUFFER_META_INFO(buf_) \ ((buf_)->meta_info) #define AI_BUFFER_META_INFO_INTQ(meta_) \ ((meta_) && ((meta_)->flags & AI_BUFFER_META_HAS_INTQ_INFO)) \ ? ((meta_)->intq_info) : NULL #define AI_BUFFER_META_INFO_INTQ_GET_SIZE(meta_) \ ( (AI_BUFFER_META_INFO_INTQ(meta_)) \ ? AI_INTQ_INFO_LIST_SIZE(AI_BUFFER_META_INFO_INTQ(meta_)) \ : 0 ) #define AI_BUFFER_META_INFO_INTQ_GET_SCALE(meta_, pos_) \ ( (AI_BUFFER_META_INFO_INTQ(meta_)) \ ? AI_INTQ_INFO_LIST_SCALE(AI_BUFFER_META_INFO_INTQ(meta_), ai_float, pos_) \ : 0 ) #define AI_BUFFER_META_INFO_INTQ_GET_ZEROPOINT(meta_, pos_) \ ( (AI_BUFFER_META_INFO_INTQ(meta_)) \ ? ((AI_INTQ_INFO_LIST_FLAGS(AI_BUFFER_META_INFO_INTQ(meta_))&AI_BUFFER_META_FLAG_ZEROPOINT_U8) \ ? AI_INTQ_INFO_LIST_ZEROPOINT(AI_BUFFER_META_INFO_INTQ(meta_), ai_u8, pos_) \ : AI_INTQ_INFO_LIST_ZEROPOINT(AI_BUFFER_META_INFO_INTQ(meta_), ai_i8, pos_) ) \ : 0 ) #define AI_BUFFER_META_INFO_INIT(flags_, intq_info_) { \ .flags = (flags_), \ .intq_info = AI_PACK(intq_info_) \ } #define AI_BUFFER_SIZE(buf_) \ ai_buffer_get_size(buf_, true) #define AI_BUFFER_SIZE_UNPAD(buf_) \ ai_buffer_get_size(buf_, false) #define AI_BUFFER_BYTE_SIZE(count_, fmt_) \ ai_buffer_get_byte_size(count_, fmt_) #define AI_BUFFER_FLAGS(buf_) \ ((buf_) ? (buf_)->flags : 0x0) #define AI_BUFFER_SHAPE_INIT(type_, size_, ...) \ { \ .type = (type_), \ .size = (size_), \ .data = (ai_shape_dimension[]){ __VA_ARGS__ } \ } #define AI_BUFFER_SHAPE_INIT_FROM_ARRAY(type_, size_, array_ptr_) \ { \ .type = (type_), \ .size = (size_), \ .data = (ai_shape_dimension*)(array_ptr_) \ } #define AI_BUFFER_SHAPE_SIZE(buf_) \ ((buf_) ? (buf_)->shape.size : 0) #define AI_BUFFER_SHAPE_TYPE(buf_) \ ((buf_) ? (buf_)->shape.type : 0) #if defined(HAS_AI_ASSERT) && defined(AI_ASSERT) #define AI_BUFFER_SET_SHAPE_ELEM(buf_, pos_, value_) { \ AI_ASSERT(buf_) \ (buf_)->shape.data[pos_] = (value_); \ } #define AI_BUFFER_SHAPE_ELEM(buf_, pos_) \ (((pos_)<AI_BUFFER_SHAPE_SIZE(buf_)) ? (buf_)->shape.data[pos_] : 0) #else #define AI_BUFFER_SET_SHAPE_ELEM(buf_, pos_, value_) { \ (buf_)->shape.data[pos_] = (value_); \ } #define AI_BUFFER_SHAPE_ELEM(buf_, pos_) \ (buf_)->shape.data[pos_] #endif AI_DEPRECATED #define AI_BUFFER_OBJ_INIT(format_, h_, w_, ch_, n_batches_, data_) \ { .format = (ai_buffer_format)(format_), \ .data = (ai_handle)(data_), \ .meta_info = NULL, \ .flags = AI_FLAG_NONE, \ .size = (h_) * (w_) * AI_PAD_CHANNELS(format_, ch_), \ .shape = AI_BUFFER_SHAPE_INIT(AI_SHAPE_BCWH, 4, (n_batches_), (ch_), (w_), (h_)), \ } AI_DEPRECATED #define AI_BUFFER_OBJ_INIT_STATIC(type_, format_, h_, w_, ch_, n_batches_, ...) \ { .format = (ai_buffer_format)(format_), \ .data = (ai_handle)((type_[]){__VA_ARGS__}), \ .meta_info = NULL, \ .flags = AI_FLAG_NONE, \ .size = (h_) * (w_) * AI_PAD_CHANNELS(format_, ch_), \ .shape = AI_BUFFER_SHAPE_INIT(AI_SHAPE_BCWH, 4, (n_batches_), (ch_), (w_), (h_)) \ } /* 7.1 new macro API */ #define AI_BUFFER_INIT(flags_, format_, shape_, size_, meta_info_, data_) \ { .format = (ai_buffer_format)(format_), \ .data = (ai_handle)(data_), \ .meta_info = (meta_info_), \ .flags = (flags_), \ .size = (size_), \ .shape = AI_PACK(shape_) \ } /* 7.1 new macro API */ #define AI_BUFFER_INIT_STATIC(type_, flags_, format_, shape_, size_, meta_info_, ...) \ { .format = (ai_buffer_format)(format_), \ .data = (ai_handle)((type_[]){__VA_ARGS__}), \ .meta_info = (meta_info_), \ .flags = (flags_), \ .size = (size_), \ .shape = AI_PACK(shape_) \ } /*****************************************************************************/ #define AI_NETWORK_BUFFERS_FIELD_DECLARE \ ai_signature map_signature; /*! structure signature (required!) */ \ ai_buffer_array map_weights; /*! info about weights array buffers (required!) */ \ ai_buffer_array map_activations; /*! info about activations array buffers (required!) */ #define AI_NETWORK_PARAMS_FIELDS_DECLARE \ union { \ struct { \ ai_buffer params; /*! info about params buffer(required!) */ \ ai_buffer activations; /*! info about activations buffer (required!) */ \ }; \ struct { \ AI_NETWORK_BUFFERS_FIELD_DECLARE \ }; \ }; /*****************************************************************************/ #define AI_BUFFER_ARRAY_OBJ_INIT(flags_, size_, buffer_array_) \ { \ .flags = (ai_u16)(flags_), \ .size = (ai_u16)(size_), \ .buffer = (ai_buffer*)(buffer_array_) \ } #define AI_BUFFER_ARRAY_OBJ_INIT_STATIC(flags_, size_, ...) \ { \ .flags = (ai_u16)(flags_), \ .size = (ai_u16)(size_), \ .buffer = (ai_buffer*)((ai_buffer[]){__VA_ARGS__}) \ } #define AI_BUFFER_ARRAY_SANE(buf_array_) \ ai_buffer_array_sane(buf_array_) #define AI_BUFFER_ARRAY_FLAGS(buf_array_) \ ((AI_BUFFER_ARRAY_SANE(buf_array_)) ? (buf_array_)->flags : AI_FLAG_NONE) #define AI_BUFFER_ARRAY_SIZE(buf_array_) \ ((AI_BUFFER_ARRAY_SANE(buf_array_)) ? (buf_array_)->size : 0) #define AI_BUFFER_ARRAY_ITEM(buf_array_, pos_) \ ((AI_BUFFER_ARRAY_SANE(buf_array_)) ? ((buf_array_)->buffer + (pos_)) : NULL) #define AI_BUFFER_ARRAY_ITEM_SET_ADDRESS(buf_array_, pos_, address_) \ ai_buffer_array_item_set_address(buf_array_, pos_, address_) /*! * @enum buffer formats enum list * @ingroup ai_platform * * List of supported ai_buffer format types. */ enum { AI_BUFFER_FORMAT_NONE = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_NONE, 0, 0, 0, 0), AI_BUFFER_FORMAT_FLOAT = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_FLOAT, 1, 1, 32, 0), AI_BUFFER_FORMAT_U1 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 1, 0), AI_BUFFER_FORMAT_U8 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 8, 0), AI_BUFFER_FORMAT_U16 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 16, 0), AI_BUFFER_FORMAT_U32 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 32, 0), AI_BUFFER_FORMAT_S1 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 1, 0), AI_BUFFER_FORMAT_S8 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 8, 0), AI_BUFFER_FORMAT_S16 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 16, 0), AI_BUFFER_FORMAT_S32 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 32, 0), AI_BUFFER_FORMAT_Q = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 0, 0), AI_BUFFER_FORMAT_Q7 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 8, 7), AI_BUFFER_FORMAT_Q15 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 1, 0, 16, 15), AI_BUFFER_FORMAT_UQ = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 0, 0), AI_BUFFER_FORMAT_UQ7 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 8, 7), AI_BUFFER_FORMAT_UQ15 = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_Q, 0, 0, 16, 15), AI_BUFFER_FORMAT_BOOL = AI_BUFFER_FMT_SET(AI_BUFFER_FMT_TYPE_BOOL, 0, 0, 8, 0), }; /*****************************************************************************/ #define AI_ERROR_INIT(type_, code_) { \ .type = AI_ERROR_##type_, \ .code = AI_ERROR_CODE_##code_ \ } /* printf formats */ #define SSIZET_FMT "%" PRIu32 #define AII32_FMT "%" PRId32 #define AIU32_FMT "%" PRIu32 #define AII64_FMT "%" PRId64 #define AIU64_FMT "%" PRIu64 #define AI_VERSION(major_, minor_, micro_) \ (((major_)<<24) | ((minor_)<<16) | ((micro_)<<8)) typedef uint8_t ai_custom_type_signature; typedef void* ai_handle; typedef const void* ai_handle_const; typedef float ai_float; typedef double ai_double; typedef bool ai_bool; typedef char ai_char; typedef uint32_t ai_size; typedef int16_t ai_short_size; typedef uintptr_t ai_uptr; typedef unsigned int ai_uint; typedef uint8_t ai_u8; typedef uint16_t ai_u16; typedef uint32_t ai_u32; typedef uint64_t ai_u64; typedef int ai_int; typedef int8_t ai_i8; typedef int16_t ai_i16; typedef int32_t ai_i32; typedef int64_t ai_i64; typedef uint64_t ai_macc; typedef int32_t ai_pbits; typedef uint32_t ai_signature; typedef void (*ai_handle_func)(ai_handle); /*****************************************************************************/ /*! * @struct ai_error * @ingroup ai_platform * @brief Structure encoding details about the last error. */ typedef struct ai_error_ { ai_u32 type : 8; /*!< Error type represented by @ref ai_error_type */ ai_u32 code : 24; /*!< Error code represented by @ref ai_error_code */ } ai_error; /*****************************************************************************/ /*! * @struct ai_intq_info * @ingroup ai_platform * @brief an element of the ai_intq_info_list entry. It reports an array for the * scale and zeropoint values for each buffer. Optional flags are also present */ typedef struct ai_intq_info_ { INTQ_CONST ai_float* scale; INTQ_CONST ai_handle zeropoint; } ai_intq_info; /*! * @struct ai_intq_info_list * @ingroup ai_platform * @brief list reporting meta info for quantized networks integer support * when size > 1 it means a per channel out quantization */ typedef struct ai_intq_info_list_ { ai_u16 flags; /*!< optional flags to store intq info attributes */ ai_u16 size; /*!< number of elements in the the intq_info list */ INTQ_CONST ai_intq_info* info; /*!< pointer to an array of metainfo * associated to the intq_info list */ } ai_intq_info_list; /*****************************************************************************/ /*! * @struct ai_buffer_meta_info * @ingroup ai_platform * @brief Optional meta attributes associated with the I/O buffer. * This datastruct is used also for network querying, where the data field may * may be NULL. */ typedef struct ai_buffer_meta_info_ { ai_u32 flags; /*!< meta info flags */ ai_intq_info_list* intq_info; /*!< meta info related to integer format */ } ai_buffer_meta_info; /*! * @struct ai_buffer_shape * @ingroup ai_platform * @brief Memory buffer shape datatype definition. */ typedef struct ai_buffer_shape_ { ai_u32 type : 8; /*!< shape type: reserved for compatibility */ ai_u32 size : 24; /*!< size: shape cardinality */ ai_shape_dimension* data; /*!< pointer to shape tuple array */ } ai_buffer_shape; /*! * @struct ai_buffer * @ingroup ai_platform * @brief Memory buffer storing data (optional) with a shape, size and type. * This datastruct is used also for network querying, where the data field may * may be NULL. */ typedef struct ai_buffer_ { ai_buffer_format format; /*!< buffer format */ ai_handle data; /*!< pointer to buffer data */ ai_buffer_meta_info* meta_info; /*!< pointer to buffer metadata info */ /* New 7.1 fields */ ai_flags flags; /*!< shape optional flags */ ai_size size; /*!< number of elements of the buffer (including optional padding) */ ai_buffer_shape shape; /*!< n-dimensional shape info */ } ai_buffer; /*! * @struct ai_buffer_array * @ingroup ai_platform * @brief Array of @ref ai_buffer. */ typedef struct ai_buffer_array_ { ai_u16 flags; /*!< buffer array flags */ ai_u16 size; /*!< buffer array size */ ai_buffer* buffer; /*!< buffer array buffers pointer */ } ai_buffer_array; /* enums section */ /*! * @enum ai_error_type * @ingroup ai_platform * * Generic enum to list network error types. */ typedef enum { AI_ERROR_NONE = 0x00, /*!< No error */ AI_ERROR_TOOL_PLATFORM_API_MISMATCH = 0x01, AI_ERROR_TYPES_MISMATCH = 0x02, AI_ERROR_INVALID_HANDLE = 0x10, AI_ERROR_INVALID_STATE = 0x11, AI_ERROR_INVALID_INPUT = 0x12, AI_ERROR_INVALID_OUTPUT = 0x13, AI_ERROR_INVALID_PARAM = 0x14, AI_ERROR_INVALID_SIGNATURE = 0x15, AI_ERROR_INVALID_SIZE = 0x16, AI_ERROR_INVALID_VALUE = 0x17, AI_ERROR_INIT_FAILED = 0x30, AI_ERROR_ALLOCATION_FAILED = 0x31, AI_ERROR_DEALLOCATION_FAILED = 0x32, AI_ERROR_CREATE_FAILED = 0x33, } ai_error_type; /*! * @enum ai_error_code * @ingroup ai_platform * * Generic enum to list network error codes. */ typedef enum { AI_ERROR_CODE_NONE = 0x0000, /*!< No error */ AI_ERROR_CODE_NETWORK = 0x0010, AI_ERROR_CODE_NETWORK_PARAMS = 0x0011, AI_ERROR_CODE_NETWORK_WEIGHTS = 0x0012, AI_ERROR_CODE_NETWORK_ACTIVATIONS = 0x0013, AI_ERROR_CODE_LAYER = 0x0014, AI_ERROR_CODE_TENSOR = 0x0015, AI_ERROR_CODE_ARRAY = 0x0016, AI_ERROR_CODE_INVALID_PTR = 0x0017, AI_ERROR_CODE_INVALID_SIZE = 0x0018, AI_ERROR_CODE_INVALID_FORMAT = 0x0019, AI_ERROR_CODE_OUT_OF_RANGE = 0x0020, AI_ERROR_CODE_INVALID_BATCH = 0x0021, AI_ERROR_CODE_MISSED_INIT = 0x0030, AI_ERROR_CODE_IN_USE = 0x0040, AI_ERROR_CODE_LOCK = 0x0041, } ai_error_code; /*! * @struct ai_platform_version * @ingroup ai_platform * @brief Datastruct storing platform version info */ typedef struct ai_platform_version_ { ai_u8 major; ai_u8 minor; ai_u8 micro; ai_u8 reserved; } ai_platform_version; /*! * @struct ai_network_params * @ingroup ai_platform * * Datastructure to pass parameters during network initialization. */ typedef struct ai_network_params_ { AI_NETWORK_PARAMS_FIELDS_DECLARE } ai_network_params; /*! * @struct ai_network_buffers * @ingroup ai_platform * * Datastructure to pass network buffers during network initialization. */ typedef struct ai_network_buffers_ { AI_NETWORK_BUFFERS_FIELD_DECLARE } ai_network_buffers; /*! * @struct ai_network_report * @ingroup ai_platform * * Datastructure to query a network report with some relevant network detail. */ typedef struct ai_network_report_ { const char* model_name; const char* model_signature; const char* model_datetime; const char* compile_datetime; const char* runtime_revision; ai_platform_version runtime_version; const char* tool_revision; ai_platform_version tool_version; ai_platform_version tool_api_version; ai_platform_version api_version; ai_platform_version interface_api_version; ai_macc n_macc; ai_u16 n_inputs; ai_u16 n_outputs; ai_buffer* inputs; ai_buffer* outputs; AI_NETWORK_PARAMS_FIELDS_DECLARE ai_u32 n_nodes; ai_signature signature; } ai_network_report; /*! * @enum ai_upsample_mode * @ingroup ai_platform * @brief allowed mode in upsample layer */ typedef enum { AI_UPSAMPLE_ZEROS = 0x0, AI_UPSAMPLE_NEAREST, AI_UPSAMPLE_BILINEAR, AI_UPSAMPLE_TRILINEAR } ai_upsample_mode; /*! * @enum ai_resize_mode * @ingroup ai_platform * @brief allowed mode in resize layer */ typedef enum { AI_RESIZE_ZEROS = 0x0, AI_RESIZE_NEAREST, AI_RESIZE_LINEAR, AI_RESIZE_CUBIC } ai_resize_mode; /*! * @enum ai_coord_transf_mode * @ingroup ai_platform * @brief coordinate_transformation_mode in resize layer */ typedef enum { AI_HALF_PIXEL = 0x0, AI_PYTORCH_HALF_PIXEL, AI_ALIGN_CORNERS, AI_ASYMMETRIC, AI_TF_HALF_PIXEL_FOR_NN, AI_TF_CROP_AND_RESIZE } ai_coord_transf_mode; typedef enum { AI_ROUND_PREFER_FLOOR = 0x0, AI_ROUND_PREFER_CEIL, AI_ROUND_FLOOR, AI_ROUND_CEIL } ai_nearest_mode; typedef enum { AI_PAD_CONSTANT = 0x0, AI_PAD_REFLECT, AI_PAD_EDGE, AI_PAD_8BIT_CH1ST_CONSTANT, } ai_pad_mode; #define OUTPUT_PADDING_FLAG (1 << 0) #define CHANNEL_FIRST_FLAG (1 << 1) /* Carefull when changing those definitions bit0 shall always select output padding (Valid vs Same) bit1 shall always select Channel first /channel lst format */ typedef enum { AI_LAYER_FORMAT_CHANNEL_LAST_VALID = 0x0, AI_LAYER_FORMAT_CHANNEL_LAST_SAME = 0x1, AI_LAYER_FORMAT_CHANNEL_FIRST_VALID = 0x2, AI_LAYER_FORMAT_CHANNEL_FIRST_SAME = 0x3, } ai_layer_format_type; /*! ai_platform public APIs **************************************************/ /*! * @brief get the total number of elements of an ai_buffer. * @ingroup ai_platform * @param buffer a pointer to an @ref ai_buffer * @param with_padding when true it considers also padded elements * @return the number of elements of the buffer (with/without padded ones) */ AI_API_ENTRY ai_size ai_buffer_get_size(const ai_buffer* buffer, const ai_bool with_padding); /*! * @brief get the size in bytes of an ai_buffer (given the number of elements and format). * @ingroup ai_platform * @param count the number of elements composing the buffer * @param fmt the format of the ai_buffer * @return the size in bytes of the buffer */ AI_API_ENTRY ai_size ai_buffer_get_byte_size(const ai_size count, const ai_buffer_format fmt); /*! * @brief get total size in bytes of a buffer array. * @ingroup ai_platform * @param barray a pointer to the buffer array * @return the total size in bytes of all the buffer arrays */ AI_API_ENTRY ai_bool ai_buffer_array_is_empty(const ai_buffer_array* barray); /*! * @brief get total size in bytes of a buffer array. * @ingroup ai_platform * @param barray a pointer to the buffer array * @return the total size in bytes of all the buffer arrays */ AI_API_ENTRY ai_bool ai_buffer_array_is_valid(const ai_buffer_array* barray); /*! * @brief check if a buffer array is valid - i.e. not empty. * @ingroup ai_platform * @param barray a pointer to the buffer array * @return true if the array is consistent and not empty, false otherwise */ AI_API_ENTRY ai_bool ai_buffer_array_sane(const ai_buffer_array* barray); /*! * @brief get total size in bytes of a buffer array. * @ingroup ai_platform * @param barray a pointer to the buffer array * @return the total size in bytes of all the buffer arrays */ AI_API_ENTRY ai_size ai_buffer_array_get_byte_size(const ai_buffer_array* barray); /*! * @brief set the address of buffer array item @pos * @ingroup ai_platform * @param barray a pointer to the buffer array * @param pos the index of the element in the array * @param address the address to set * @return true if successful, false otherwise */ AI_API_ENTRY ai_bool ai_buffer_array_item_set_address( ai_buffer_array* barray, const ai_u32 pos, ai_handle address); AI_API_DECLARE_END #endif /*AI_PLATFORM_H*/
30,193
C
30.159959
116
0.58189
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_ml_iforest.h
/** ****************************************************************************** * @file layers_iforest.h * @author AIS * @brief header file of AI platform iForest layers datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_IFOREST_H #define LAYERS_IFOREST_H #pragma once #include "layers_common.h" /*! * @defgroup layers_ml Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /* Allowed tests branch in the iTrees */ typedef enum { AI_IFOREST_BRANCH_LT_IDX = 0, AI_IFOREST_BRANCH_LEQ_IDX, AI_IFOREST_BRANCH_EQ_IDX, AI_IFOREST_BRANCH_END, } ai_iforest_branch_e; /*! * @struct ai_layer_iforest * @ingroup layers_iforest * @brief iForest layer * * The type of iforest function is handled by the specific forward function * @ref forward_iforest */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_iforest_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_float global_average_path_length; /*!< global average path length used to normalized average path length*/ ai_float score_threshold; /*!< score threshold used to center the score around 0 */ } ai_layer_iforest; /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Decodes the iforest ML algorithm. * @ingroup layers_iforest * @param layer iforest layer */ AI_INTERNAL_API void forward_iforest(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_IFOREST_H*/
2,134
C
25.6875
112
0.524367
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_ml_svc.h
/** ****************************************************************************** * @file layers_svc.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform SVM Classifier (SVC) datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_SVC_H #define LAYERS_SVC_H #pragma once #include "layers_common.h" /*! * @defgroup layers_svc Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /* SVM classifier (SVC) kernel types */ typedef enum ai_svc_kernel_e_ { AI_SVC_KERNEL_LINEAR = 0, AI_SVC_KERNEL_POLYNOMIAL, AI_SVC_KERNEL_RBF, AI_SVC_KERNEL_SIGMOID, AI_SVC_KERNEL_UNSUPPORTED } ai_svc_kernel_e; /*! * @struct ai_layer_svc * @ingroup layers_svc * @brief SVM Classifier (SVC) layer * * The type of svc function is handled by the specific forward function * @ref forward_svc */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_svc_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_float gamma; /*!< kernel coefficient for rbf, polynomial and sigmoid functions */ ai_float coef0; /*!< term in polynomial and sigmoid functions */ ai_u32 degree; /*!< polynomial function degree */ ai_svc_kernel_e kernel_type; /*!< kernel type : see ai_svm_kernel_e */ ai_bool proba_support; /*!< whether or not use the parameters learned in Platt scaling */ ai_bool has_classlabels_int; /*!< if True, SVC returns classlabels int, else classlabels string */ } ai_layer_svc; /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Decodes the SVM Classifier ML operator. * @ingroup layers_svc * @param layer svm classifier layer */ AI_INTERNAL_API void forward_svc(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_SVC_H*/
2,548
C
30.085365
110
0.523155
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/core_log.h
/** ****************************************************************************** * @file core_log.h * @author AST Embedded Analytics Research Platform * @brief header file of core log interfaces ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef CORE_LOG_H #define CORE_LOG_H #pragma once #include "ai_platform.h" #include "ai_datatypes_defines.h" /*! * @defgroup core_log Logger core routines wrapper interface * @brief Common macros, datatypes and routines of ai logger module * @details This header defines the wrapping macros interfaces to handle the * global logger module. These macro are defined when the macro HAS_LOG is * defined, otherwise they are all set to NOP routines and no logger code is * compiled at all. When the macro HAS_LOG is defined, only the log messages * having an enum id >= the value of the macro are compiled. Thus to include in * compilation only log messages up to the error level the value of HAS_LOG must * be equal the the enum value of LOG_ERROR macro (i.e. 3). a value of 6 means * to include all log messages up to the lower LOG_TRACE level. */ #if defined HAS_LOG && (HAS_LOG>=0) #include "ai_log.h" #define AI_LOG_SECTION(...) \ { __VA_ARGS__ } #define AI_LOG_ACQUIRE() \ ai_log_acquire() #define AI_LOG_SET_LEVEL(level_) \ AI_WRAP_FUNC(ai_log_set_level(level_);) #define AI_LOG_SET_QUIET(onoff_) \ AI_WRAP_FUNC(ai_log_set_quiet(onoff_);) #define AI_LOG_SET_LOCK_FN(fn_, udata_) \ AI_WRAP_FUNC(ai_log_set_lock(fn_, udata_);) #define AI_LOG_CHANNEL_PUSH(level_, fn_, udata_) \ AI_WRAP_FUNC(ai_log_channel_push(level_, fn_, udata_);) #define AI_LOG_CHANNEL_POP(fn_, udata_) \ AI_WRAP_FUNC(ai_log_channel_pop(fn_, udata_);) #ifdef LOG_USE_FILE #define AI_LOG_SET_FILE_POINTER(fp_) \ AI_WRAP_FUNC(ai_log_set_fp(fp_);) #else #define AI_LOG_SET_FILE_POINTER(fp_) \ AI_WRAP_FUNC(/*AI_LOG_SET_FILE_POINTER()*/) #endif #else #define AI_LOG_SECTION(...) AI_WRAP_FUNC(/*AI_LOG_SECTION()*/) #define AI_LOG_ACQUIRE() (NULL) #define AI_LOG_SET_LEVEL(level_) AI_WRAP_FUNC(/*AI_LOG_SET_LEVEL()*/) #define AI_LOG_SET_QUIET(onoff_) AI_WRAP_FUNC(/*AI_LOG_SET_QUIET()*/) #define AI_LOG_SET_LOCK_FN(fn_, udata_) AI_WRAP_FUNC(/*AI_LOG_SET_LOCK_FN()*/) #define AI_LOG_CHANNEL_PUSH(level_, fn_, udata_) AI_WRAP_FUNC(/*AI_LOG_CHANNEL_PUSH()*/) #define AI_LOG_CHANNEL_POP(fn_, udata_) AI_WRAP_FUNC(/*AI_LOG_CHANNEL_POP()*/) #define AI_LOG_SET_FILE_POINTER(fp_) AI_WRAP_FUNC(/*AI_LOG_SET_FILE_POINTER()*/) #endif #if defined HAS_LOG #define AI_LOG_PRINT(level, fmt, ...) \ AI_WRAP_FUNC(ai_log_print(level, fmt, ##__VA_ARGS__);) #else #define AI_LOG_PRINT(level, fmt, ...) \ AI_WRAP_FUNC(/*AI_LOG_PRINT(...)*/) #endif #if defined HAS_LOG && (HAS_LOG>=LOG_SUDO) #define AI_LOG_SUDO(fmt, ...) \ AI_WRAP_FUNC(ai_log_log(LOG_SUDO, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);) #else #define AI_LOG_SUDO(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_SUDO()*/) #endif #if defined HAS_LOG && (HAS_LOG>=LOG_TRACE) #define AI_LOG_TRACE(fmt, ...) \ AI_WRAP_FUNC(ai_log_log(LOG_TRACE, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);) #else #define AI_LOG_TRACE(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_TRACE()*/) #endif #if defined HAS_LOG && (HAS_LOG>=LOG_DEBUG) #define AI_LOG_DEBUG(fmt, ...) \ AI_WRAP_FUNC(ai_log_log(LOG_DEBUG, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);) #else #define AI_LOG_DEBUG(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_DEBUG()*/) #endif #if defined HAS_LOG && (HAS_LOG>=LOG_INFO) #define AI_LOG_INFO(fmt, ...) \ AI_WRAP_FUNC(ai_log_log(LOG_INFO, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);) #else #define AI_LOG_INFO(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_INFO()*/) #endif #if defined HAS_LOG && (HAS_LOG>=LOG_WARN) #define AI_LOG_WARN(fmt, ...) \ AI_WRAP_FUNC(ai_log_log(LOG_WARN, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);) #else #define AI_LOG_WARN(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_WARN()*/) #endif #if defined HAS_LOG && (HAS_LOG>=LOG_ERROR) #define AI_LOG_ERROR(fmt, ...) \ AI_WRAP_FUNC(ai_log_log(LOG_ERROR, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);) #else #define AI_LOG_ERROR(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_ERROR()*/) #endif #if defined HAS_LOG && (HAS_LOG>=LOG_FATAL) #define AI_LOG_FATAL(fmt, ...) \ AI_WRAP_FUNC(ai_log_log(LOG_FATAL, __FILE__, __LINE__, fmt LOG_CR, ##__VA_ARGS__);) #else #define AI_LOG_FATAL(fmt, ...) AI_WRAP_FUNC(/*AI_LOG_FATAL()*/) #endif #endif /*CORE_LOG_H*/
5,222
C
37.404411
97
0.572769
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_dense_if32.h
#ifndef _LITE_DENSE_IF32_H #define _LITE_DENSE_IF32_H #pragma once #include "ai_lite_interface.h" /*! * @brief Forward function for a dense layer with signed float input, * signed float output, and float weights. * @ingroup lite_dense_if32 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param bias The pointer to bias (NULL if not available). * @param n_channel_in The number of channels of the input. * @param n_channel_out The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_if32of32wf32( ai_float* output, const ai_float* input, const ai_float* weights, const ai_float* bias, const ai_u32 n_channel_in, const ai_u32 n_channel_out); #endif /*_LITE_DENSE_IF32_H*/
855
C
30.703703
69
0.71462
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_dense.h
/** ****************************************************************************** * @file layers_dense.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform dense layers datatypes ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_DENSE_H #define LAYERS_DENSE_H #pragma once #include "layers_common.h" /*! * @defgroup layers Normalization Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /*! * @brief Computes the activations of a fixed point dense (fully connected) layer. * @ingroup layers_dense * @param layer the dense layer */ AI_INTERNAL_API void forward_dense_fixed(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_DENSE_H*/
1,264
C
24.3
82
0.524525
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_generic_dqnn.h
/** ****************************************************************************** * @file layers_generic_dqnn.h * @author AIS * @brief header file of AI platform DQNN generic datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_GENERIC_DQNN_H #define LAYERS_GENERIC_DQNN_H #pragma once #include "layers_common.h" #include "layers_generic.h" /*! * @defgroup layers_generic_dqnn Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles concat with binary input, binary output and * binary weights * @ingroup layers_generic_dqnn * @param layer concat layer */ AI_INTERNAL_API void forward_concat_is1os1(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_GENERIC_DQNN_H*/
1,537
C
26.464285
80
0.454782
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_generic_float.h
/** ****************************************************************************** * @file lite_conv2d_dqnn.h * @author AIS * @brief header file of AI platform lite conv kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_GENERIC_FLOAT_H #define LITE_GENERIC_FLOAT_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights - with 0 padding (QKeras like) - Lite I/F * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_topK_axis_0_if32of32(const ai_float *pDataIn_init, ai_float *pDataOut_values_init, ai_i32 *pDataOut_index_init, const ai_size height_in, const ai_size width_in, const ai_size n_channel_in, const ai_size k, ai_i16 largest, void (*f)(const ai_float* inputs, ai_float* values, ai_i32* indices, ai_size k, ai_size n_elements, ai_i32 stride, ai_i16 largest) ); /*! * @brief Handles 2D convolution with binary input, binary output and * binary weights - with 0 padding (QKeras like) - Lite I/F * - Optimized thanks to Optim0 assumptions * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_topK_axis_1_if32of32(const ai_float *pDataIn_init, ai_float *pDataOut_values_init, ai_i32 *pDataOut_index_init, const ai_size height_in, const ai_size width_in, const ai_size n_channel_in, const ai_size k, ai_i16 largest, void (*f)(const ai_float* inputs, ai_float* values, ai_i32* indices, ai_size k, ai_size n_elements, ai_i32 stride, ai_i16 largest) ); /*! * @brief Handles 2D convolution with binary input, 8-bits output and * binary weights - with 0 padding (QKeras like) - Lite I/F * @ingroup lite_conv2d_dqnn */ LITE_API_ENTRY void forward_lite_topK_axis_2_if32of32(const ai_float *pDataIn_init, ai_float *pDataOut_values_init, ai_i32 *pDataOut_index_init, const ai_size height_in, const ai_size width_in, const ai_size n_channel_in, const ai_size k, ai_i16 largest, void (*f)(const ai_float* inputs, ai_float* values, ai_i32* indices, ai_size k, ai_size n_elements, ai_i32 stride, ai_i16 largest) ); LITE_API_ENTRY void forward_lite_func_reduce_l1_if32of32( ai_float* out_ptr, const ai_float* in_ptr, const ai_size out_size, const ai_size in_step, const ai_size axis_size, const ai_size axis_step); LITE_API_ENTRY void forward_lite_func_reduce_l2_if32of32( ai_float* out_ptr, const ai_float* in_ptr, const ai_size out_size, const ai_size in_step, const ai_size axis_size, const ai_size axis_step); #endif /*LITE_GENERIC_FLOAT_H*/
4,287
C
42.755102
148
0.463028
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_nl.h
/** ****************************************************************************** * @file layers_nl.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform nonlinearity layers datatypes ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_NL_H #define LAYERS_NL_H #pragma once #include "layers_common.h" /*! * @defgroup layers_nl Normalization Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_nl * @ingroup layers_nl * @brief Generic Nonlinearity layer * * The type of nonlinearity is handled by the specific forward function. * It is a sequential layer. see @ref ai_layer */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_nl_ { AI_LAYER_COMMON_FIELDS_DECLARE AI_CONST ai_array* nl_params; /*!< associated parameters array */ } ai_layer_nl; /*! * @struct ai_layer_sm * @ingroup layers_nl * @brief Softmax Nonlinearity layer * * It is a sequential layer. see @ref ai_layer */ typedef ai_layer_nl ai_layer_sm; /*! * @typedef (*func_nl) * @ingroup layers_nl * @brief Fuction pointer for generic non linear transform * this function pointer abstracts a generic non linear layer. * see @ref nl_func_tanh_array_f32 and similar as examples. */ //typedef void (*func_nl)(ai_array *out, const ai_array *in, // const ai_size size, const ai_handle params); typedef void (*func_nl)(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Softmax pooling computed on a single float channel * @ingroup layers_nl * @param out opaque handler to float output channel * @param in opaque handler to float input channel * @param channel_size number of elements of the input channel * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_sm_channel_f32(ai_tensor *out, const ai_tensor *in, const ai_size channel_size, const ai_handle params); /*! * @brief Softmax normalization computed on an array of float channels * @ingroup layers_nl * @param out opaque handler to float output channel array * @param in opaque handler to float input channel array * @param in_size total size (number of elements) to process on the input * @param channel_size number of elements of the input channel * @param in_channel_step number of elements to move to next input element * @param out_channel_step number of elements to move to next output element */ AI_INTERNAL_API void nl_func_sm_array_f32(ai_tensor *out, ai_tensor *in, const ai_size in_size, const ai_size channel_size, const ai_size in_channel_step, const ai_size out_channel_step); /*! * @brief Softmax zero pooling computed on a single float channel * @ingroup layers_nl * @param out opaque handler to float output channel * @param in opaque handler to float input channel * @param channel_size number of elements of the input channel * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_sm_zero_channel_f32(ai_tensor *out, const ai_tensor *in, const ai_size channel_size, const ai_handle params); /*! * @brief Probit non linearity * @ingroup layers_nl * @param out opaque handler to float output channel * @param in opaque handler to float input channel * @param channel_size number of elements of the input channel * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_probit_f32(ai_tensor *out, const ai_tensor *in, const ai_size channel_size, const ai_handle params); /*! * @brief Computes the tanh function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_tanh_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the tanh function on a fixed point data array * @ingroup layers_nl * @param in opaque handler to input elements to process * @param out opaque handler to output elements * @param size total size (number of elements) to process on the input * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_tanh_array_fixed(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the sigmoid function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_sigmoid_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the sigmoid function on a fixed point data array * @ingroup layers_nl * @param in opaque handler to input elements to process * @param out opaque handler to output elements * @param size total size (number of elements) to process on the input * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_sigmoid_array_fixed(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the hard sigmoid function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_hard_sigmoid_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the logistic function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_logistic_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the swish function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_swish_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the hard swish function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_hard_swish_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the absolute value function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_abs_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the cosine function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_cos_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the inverse cosine function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_acos_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the hyperbolic cosine function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_cosh_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the inverse hyperbolic cosine function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_acosh_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the sine function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_sin_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the inverse sine function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_asin_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the hyperbolic sine function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_sinh_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the inverse hyperbolic sine function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_asinh_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the tangent function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_tan_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the inverse tangent function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_atan_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the inverse hyperbolic tangent function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_atanh_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the error function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_erf_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the natural logarithm function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_log_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the reciprocal square root function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_rsqrt_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the squarefunction on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_square_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the floor function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_floor_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the ceil function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_ceil_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the rounding function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_round_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the exponential function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_exp_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the sign negation function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_neg_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the sign negation function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_not_array_bool(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the reciprocal function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_reciprocal_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the square root function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_sqrt_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the soft plus function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer */ AI_INTERNAL_API void nl_func_soft_plus_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the soft sign function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_soft_sign_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the sign function on a single float element. * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer */ AI_INTERNAL_API void nl_func_sign_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the clip function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_clip_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the hardmax function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param axis direction of the max index to be searched */ AI_INTERNAL_API void nl_func_hardmax_array_f32(ai_tensor *out, const ai_tensor *in, const ai_shape *shape, const ai_handle params); /*! * @brief Computes the generic relu function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_relu_generic_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the thresholded relu function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_relu_thresholded_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the relu function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_relu_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the relu function on a fixed point data array * @ingroup layers_nl * @param in opaque handler to input elements to process * @param out opaque handler to output elements * @param size total size (number of elements) to process on the input * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_relu_array_fixed(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the relu function on an integer-quantized data array * @ingroup layers_nl * @param in opaque handler to input elements to process * @param out opaque handler to output elements * @param size total size (number of elements) to process on the input * @param params opaque handler to optional nl parameters */ void nl_func_relu_array_integer(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the clip function on an integer-quantized data array * @ingroup layers_nl * @param in opaque handler to input elements to process * @param out opaque handler to output elements * @param size total size (number of elements) to process on the input * @param params opaque handler to optional nl parameters */ void nl_func_clip_array_integer(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the activation function on an integer-quantized data array * @ingroup layers_nl * @param in opaque handler to input elements to process * @param out opaque handler to output elements * @param size total size (number of elements) to process on the input * @param params opaque handler to generated and used LUT */ void nl_func_array_integer(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the elu function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_elu_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the max relu function on a fixed point data array * @ingroup layers_nl * @param in opaque handler to input elements to process * @param out opaque handler to output elements * @param size total size (number of elements) to process on the input * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_relu_max_array_fixed(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the selu function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size number of elements in the input buffer * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_selu_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the prelu function on a float data array * @ingroup layers_nl * @param in opaque handler to float, size should be 1 * @param slope opaque handler to float, size should be 1 * @param out opaque handler to float output elem * @param size size of the input data in bytes * @param params opaque handler to optional nl parameters */ AI_INTERNAL_API void nl_func_prelu_array_f32(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /*! * @brief Computes the prelu function on an integer-quantized data array * @ingroup layers_nl * @param in opaque handler to input elements to process * @param out opaque handler to output elements * @param size total size (number of elements) to process on the input * @param params opaque handler to optional nl parameters */ void nl_func_prelu_array_integer(ai_tensor *out, const ai_tensor *in, const ai_size size, const ai_handle params); /******************************************************************************/ /** Forward Functions Section **/ /******************************************************************************/ /*! * @brief Computes the activations of a ReLU nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_relu(ai_layer* layer); /*! * @brief Computes the activations of a fixed point ReLU nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_relu_fixed(ai_layer *pLayer); /*! * @brief Computes the activations of a integer-quantized ReLU nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_relu_integer(ai_layer *pLayer); /*! * @brief Computes the activations of a clip integer-quantized nonlinear layer. * @ingroup layers_nl * @param pLayer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_clip_integer(ai_layer *pLayer); /*! * @brief Computes the activations of a ReLU6 nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_relu_thresholded(ai_layer* layer); /*! * @brief Computes the activations of a fixed point max ReLU layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_relu_max_fixed(ai_layer *pLayer); /*! * @brief Computes the activations of a ELU nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_elu(ai_layer* layer); /*! * @brief Computes the activations of a SELU nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_selu(ai_layer* layer); /*! * @brief Computes the activations of a PRELU nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_prelu(ai_layer* layer); /*! * @brief Computes the activations of a binary tanh (sign) nonlinear layer. * @ingroup layers * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_sign(ai_layer* layer); /*! * @brief Computes the activations of a clip nonlinear layer. * @ingroup layers * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_clip(ai_layer* layer); /*! * @brief Computes the activations of a sigmoid nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_sigmoid(ai_layer* layer); /*! * @brief Computes the activations of a fixed point sigmoid nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_sigmoid_fixed(ai_layer *pLayer); /*! * @brief Computes the activations of a hard sigmoid nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_hard_sigmoid(ai_layer* layer); /*! * @brief Computes the activations of a swish nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_swish(ai_layer* layer); /*! * @brief Computes the activations of a hard swish nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_hard_swish(ai_layer* layer); /*! * @brief Computes the activations of an exponential nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_exp(ai_layer* layer); /*! * @brief Computes the activations of an square root nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_sqrt(ai_layer* layer); /*! * @brief Computes the activations of a soft plus nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_soft_plus(ai_layer* layer); /*! * @brief Computes the activations of a soft sign nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_soft_sign(ai_layer* layer); /*! * @brief Computes the activations of a cosine (cos) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_cos(ai_layer* layer); /*! * @brief Computes the activations of a inverse cosine (acos) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_acos(ai_layer* layer); /*! * @brief Computes the activations of a hyperbolic cosine (cosh) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_cosh(ai_layer* layer); /*! * @brief Computes the activations of a inverse hyperbolic cosine (acosh) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_acosh(ai_layer* layer); /*! * @brief Computes the activations of a sine (sin) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_sin(ai_layer* layer); /*! * @brief Computes the activations of a inverse sine (asin) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_asin(ai_layer* layer); /*! * @brief Computes the activations of a hyperbolic sine (sinh) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_sinh(ai_layer* layer); /*! * @brief Computes the activations of a inverse hyperbolic sine (asinh) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_asinh(ai_layer* layer); /*! * @brief Computes the activations of a tangent (tan) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_tan(ai_layer* layer); /*! * @brief Computes the activations of a inverse tangent (atan) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_atan(ai_layer* layer); /*! * @brief Computes the activations of a hyperbolic tangent (tanh) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_tanh(ai_layer* layer); /*! * @brief Computes the activations of a inverse hyperbolic tangent (atanh) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_atanh(ai_layer* layer); /*! * @brief Computes the activations of a fixed point tanh nonlinear layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_tanh_fixed(ai_layer *pLayer); /*! * @brief Computes the activations of a error function (erf) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_erf(ai_layer* layer); /*! * @brief Computes the activations of a natural logarithm (log) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_log(ai_layer* layer); /*! * @brief Computes the activations of a reciprocal square root (rsqrt) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_rsqrt(ai_layer* layer); /*! * @brief Computes the activations of a square layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_square(ai_layer* layer); /*! * @brief Computes the activations of an absolute value (abs) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_abs(ai_layer* layer); /*! * @brief Computes the activations of a ceil layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_ceil(ai_layer* layer); /*! * @brief Computes the activations of a floor layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_floor(ai_layer* layer); /*! * @brief Computes the activations of a rounding layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_round(ai_layer* layer); /*! * @brief Computes the activations of a sign negation (neg) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_neg(ai_layer* layer); /*! * @brief Computes the activations of a sign negation (not) layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_not(ai_layer* layer); /*! * @brief Computes the activations of a reciprocal layer. * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_reciprocal(ai_layer* layer); /*! * @brief Hardmax on an input tensors * @ingroup layers_generic * @param layer the hardmax layer */ AI_INTERNAL_API void forward_hardmax(ai_layer* layer); /*! * @brief Computes the activations of a softmax nonlinear layer. * @ingroup layers_nl * @param layer the softmax (sm) layer */ AI_INTERNAL_API void forward_sm(ai_layer* layer); /*! * @brief Computes the activations of a softmax nonlinear layer (integer version). * @ingroup layers_nl * @param layer the softmax (sm) layer */ AI_INTERNAL_API void forward_sm_integer(ai_layer* layer); /*! * @brief Computes the activations of an integer quantized nonlinear layer. * Non linear operation is function of used LUT defined through * (pLayer->nl_params->data) * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_nl_integer(ai_layer *pLayer); /*! * @brief Computes the activations of an integer quantized PReLu. * Slope params are located like weights, not params because they are * quantized * @ingroup layers_nl * @param layer the nonlinear (nl) layer */ AI_INTERNAL_API void forward_prelu_integer(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_NL_H*/
37,339
C
32.63964
84
0.688663
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_convert_dqnn.h
/** ****************************************************************************** * @file lite_convert_dqnn.h * @author AIS * @brief header file of AI platform lite convert kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_CONVERT_DQNN_H #define LITE_CONVERT_DQNN_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ LITE_API_ENTRY void forward_lite_node_convert_is1os8( const ai_pbits *p_in, ai_i8 *p_out, const ai_i32 n_channels, const ai_i32 n_pixels, const ai_i8 *n_values); LITE_API_ENTRY void forward_lite_node_convert_is1os16( const ai_pbits *p_in, ai_i16 *p_out, const ai_i32 n_channels, const ai_i32 n_pixels, const ai_i16 *n_values); LITE_API_ENTRY void forward_lite_node_convert_is1of32( const ai_pbits *p_in, ai_float *p_out, const ai_i32 n_channels, const ai_i32 n_pixels, const ai_float *n_values); /*! * @brief Handles data conversion from 8-bits signed input to signed binary * outputs - Lite API version * @ingroup lite_pw_dqnn */ LITE_API_ENTRY void forward_lite_node_convert_is8os1( const ai_i8 *p_in, ai_pbits *p_out, const ai_i32 n_channels, const ai_i32 n_pixels, const ai_i8 zp, const ai_i8 pad); LITE_API_ENTRY void forward_lite_node_convert_is16os1( const ai_i16 *p_in, ai_pbits *p_out, const ai_i32 n_channels, const ai_i32 n_pixels, const ai_i8 zp, const ai_i8 pad); LITE_API_ENTRY void forward_lite_node_convert_if32os1( const ai_float *p_in, ai_pbits *p_out, const ai_i32 n_channels, const ai_i32 n_pixels, const ai_i8 zp, const ai_i8 pad); LITE_API_ENTRY void forward_lite_node_convert_integer_if32os8( const ai_float *p_in, ai_i8 *p_out, const ai_u32 size, const ai_float out_scale, const ai_i8 out_zeropoint); LITE_API_ENTRY void forward_lite_node_convert_integer_if32ou8( const ai_float *p_in, ai_u8 *p_out, const ai_u32 size, const ai_float out_scale, const ai_u8 out_zeropoint); LITE_API_ENTRY void forward_lite_node_convert_integer_is8of32( const ai_i8 *p_in, ai_float *p_out, const ai_u32 size, const ai_float in_scale, const ai_i8 in_zeropoint); LITE_API_ENTRY void forward_lite_node_convert_integer_iu8of32( const ai_u8 *p_in, ai_float *p_out, const ai_u32 size, const ai_float in_scale, const ai_u8 in_zeropoint); LITE_API_ENTRY void forward_lite_node_convert_if32os16( const ai_float *p_in, ai_i16 *p_out, const ai_u32 size, const ai_float out_scale, const ai_i16 out_zeropoint); LITE_API_ENTRY void forward_lite_node_convert_if32ou16( const ai_float *p_in, ai_u16 *p_out, const ai_u32 size, const ai_float out_scale, const ai_u16 out_zeropoint); LITE_API_ENTRY void forward_lite_node_convert_is16of32( const ai_i16 *p_in, ai_float *p_out, const ai_u32 size, const ai_float in_scale, const ai_i16 in_zeropoint); LITE_API_ENTRY void forward_lite_node_convert_iu16of32( const ai_u16 *p_in, ai_float *p_out, const ai_u32 size, const ai_float in_scale, const ai_u16 in_zeropoint); LITE_API_ENTRY void forward_lite_node_convert_integer_iu8ou8( const ai_u8 *p_in, ai_u8 *p_out, const ai_i32 n_elems, const ai_float scale_ratio, const ai_u8 in_zp, const ai_u8 out_zp); LITE_API_ENTRY void forward_lite_node_convert_integer_iu8os8( const ai_u8 *p_in, ai_i8 *p_out, const ai_i32 n_elems, const ai_float scale_ratio, const ai_u8 in_zp, const ai_i8 out_zp); LITE_API_ENTRY void forward_lite_node_convert_integer_iu8os8_fast( const ai_u8 *p_in, ai_i8 *p_out, const ai_i32 n_elems, const ai_float scale_ratio, const ai_u8 in_zp, const ai_i8 out_zp); LITE_API_ENTRY void forward_lite_node_convert_integer_is8ou8( const ai_i8 *p_in, ai_u8 *p_out, const ai_i32 n_elems, const ai_float scale_ratio, const ai_i8 in_zp, const ai_u8 out_zp); LITE_API_ENTRY void forward_lite_node_convert_integer_is8ou8_fast( const ai_i8 *p_in, ai_u8 *p_out, const ai_i32 n_elems, const ai_float scale_ratio, const ai_i8 in_zp, const ai_u8 out_zp); LITE_API_ENTRY void forward_lite_node_convert_is16ou16( const ai_i16 *p_in, ai_u16 *p_out, const ai_i32 n_elems, const ai_float scale_ratio, const ai_i16 in_zp, const ai_u16 out_zp); #endif /*LITE_CONVERT_DQNN_H*/
5,069
C
21.533333
80
0.616098
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_dense_ws1.h
#ifndef LITE_DENSE_WS1_H #define LITE_DENSE_WS1_H #pragma once #include "ai_lite_interface.h" /*! * @brief Forward function for a dense layer with signed 16bit input, * signed 16bit output, binary weights and binary bias. * @ingroup lite_dense_ws1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param bias The pointer to bias. * @param scratch The pointer to the scratch buffer (unused). * @param n_channel_in The number of channels of the input. * @param n_channel_out The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_is16os16ws1( ai_i16* output, const ai_i16* input, const ai_pbits* weights, const ai_pbits* bias, ai_i32* scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out); /*! * @brief Forward function for a dense layer with signed 16bit input, * signed 16bit output, binary weights and binary bias. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup lite_dense_ws1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param scale The pointer to scale. * @param offset The pointer to offset. * @param scratch The pointer to the scratch buffer (unused). * @param n_channel_in The number of channels of the input. * @param n_channel_out The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_is16os16ws1_bn( ai_i16* output, const ai_i16* input, const ai_pbits* weights, const ai_float *scale, const ai_float *offset, ai_i32* scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out); /*! * @brief Forward function for a dense layer with signed f32 input, * f32 output, binary weights and binary bias. * @ingroup lite_dense_ws1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param bias The pointer to bias. * @param scratch The pointer to the scratch buffer (unused). * @param n_channel_in The number of channels of the input. * @param n_channel_out The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_if32os1ws1( ai_pbits *output, const ai_float *input, const ai_pbits *weights, const ai_float *bias, ai_float *scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out); /*! * @brief Forward function for a dense layer with signed f32 input, * f32 output, binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup lite_dense_ws1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param scale The pointer to scale. * @param offset The pointer to offset. * @param scratch The pointer to the scratch buffer (unused). * @param n_channel_in The number of channels of the input. * @param n_channel_out The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_if32os1ws1_bn( ai_pbits *output, const ai_float *input, const ai_pbits *weights, const ai_float *scale, const ai_float *offset, ai_float *scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out); /*! * @brief Forward function for a dense layer with signed f32 input, * f32 output, and binary weights. * @ingroup lite_dense_ws1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param bias The pointer to binary bias. * @param scratch The pointer to the scratch buffer (unused). * @param n_channel_in The number of channels of the input. * @param n_channel_out The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_if32of32ws1( ai_float* output, const ai_float* input, const ai_pbits* weights, const ai_pbits* bias, ai_float* scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out); /*! * @brief Forward function for a dense layer with signed f32 input, * f32 output, and binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup lite_dense_ws1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param scale The pointer to scale. * @param offset The pointer to offset. * @param scratch The pointer to the scratch buffer (unused). * @param n_channel_in The number of channels of the input. * @param n_channel_out The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_if32of32ws1_bn( ai_float *output, const ai_float *input, const ai_pbits *weights, const ai_float *scale, const ai_float *offset, ai_float *scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out); #endif /* LITE_DENSE_IS1WS1_H */
5,884
C
39.586207
80
0.720768
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_pad_dqnn.h
/** ****************************************************************************** * @file layers_pad_dqnn.h * @author AIS * @brief header file of AI platform DQNN padding datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_PADDING_DQNN_H #define LAYERS_PADDING_DQNN_H #pragma once #include "layers_common.h" #include "layers_generic.h" /*! * @defgroup layers_generic_dqnn Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles padding with binary input and binary output * @ingroup layers_generic_dqnn * @param layer pad layer */ AI_INTERNAL_API void forward_pad_is1os1(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_PADDING_DQNN_H*/
1,499
C
26.777777
80
0.451634
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_upsample.h
/** ****************************************************************************** * @file lite_upsample.h * @author AIS * @brief header file of AI platform lite pw kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_UPSAMPLE_H #define LITE_UPSAMPLE_H #pragma once #include "ai_lite_interface.h" void forward_lite_upsample_bilinear_if32of32(const ai_float* in_data, ai_float* out_data, const ai_size width_in, const ai_size height_in, const ai_float width_scale, const ai_float height_scale, const ai_size width_out, const ai_size height_out, const ai_bool center, const ai_size n_channel); void forward_lite_upsample_bilinear_is8os8(const ai_i8* in_data, ai_i8* out_data, const ai_size width_in, const ai_size height_in, const ai_float width_scale, const ai_float height_scale, const ai_size width_out, const ai_size height_out, const ai_bool center, const ai_size n_channel); void forward_lite_upsample_bilinear_iu8ou8(const ai_u8* in_data, ai_u8* out_data, const ai_size width_in, const ai_size height_in, const ai_float width_scale, const ai_float height_scale, const ai_size width_out, const ai_size height_out, const ai_bool center, const ai_size n_channel); void forward_lite_upsample_bilinear_is16os16(const ai_i16* in_data, ai_i16* out_data, const ai_size width_in, const ai_size height_in, const ai_float width_scale, const ai_float height_scale, const ai_size width_out, const ai_size height_out, const ai_bool center, const ai_size n_channel); void forward_lite_upsample_bilinear_iu16ou16(const ai_u16* in_data, ai_u16* out_data, const ai_size width_in, const ai_size height_in, const ai_float width_scale, const ai_float height_scale, const ai_size width_out, const ai_size height_out, const ai_bool center, const ai_size n_channel); #endif /*LITE_UPSAMPLE__H*/
3,970
C
48.024691
80
0.380605
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_generic.h
/** ****************************************************************************** * @file layers_generic.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform generic layers datatypes ****************************************************************************** * @attention * * Copyright (c) 2018 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_GENERIC_H #define LAYERS_GENERIC_H #pragma once #include "layers_common.h" typedef enum { KTfLiteNone = 0, KTfLiteActRelu, KTfLiteActRelu1, KTfLiteActRelu6, KTfLiteActTanh, KTfLiteActSignBit, KTfLiteActSigmoid } ai_tflitefused_activation; /*! * @defgroup layers_generic Generic Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_time_delay * @ingroup layers_generic * @brief TimeDelay layer with sparse kernel */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_time_delay_ { AI_LAYER_COMMON_FIELDS_DECLARE AI_CONST ai_array* mask; /*!< sparse filter mask */ } ai_layer_time_delay; /*! * @struct ai_layer_split * @ingroup layers_generic * @brief Split layer definition * * This layer defines the params of a splitting layer. It is intended to be used * by his associated forward function @ref forward_split */ //typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_split_ { // AI_LAYER_COMMON_FIELDS_DECLARE // ai_u16 out_layers_count; /*!< number of output layers to split*/ // ai_u16 out_layer_curr; /*!< current layer to split */ // ai_layer** out_layers; /*!< output layers list */ // ai_tensor** out_tensors; /*!< output tensors list */ // ai_tensor* in_tensor; /*!< input tensor */ // func_copy_tensor copy_to_out_tensor; /*!< pointer to copy tensor func // (NULL = no copy) */ //} ai_layer_split; typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_split_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape_dimension axis; //ai_tensor* num_or_size_splits; } ai_layer_split; /*! * @struct ai_layer_topK * @ingroup layers_generic * @brief topK layer definition */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_topK_{ AI_LAYER_COMMON_FIELDS_DECLARE ai_i16 axis; ai_i16 largest; } ai_layer_topK; typedef AI_ALIGNED_TYPE(struct,4)ai_layer_svdf_{ AI_LAYER_COMMON_FIELDS_DECLARE ai_size rank; ai_tflitefused_activation activation; } ai_layer_svdf; /*! * @struct ai_layer_slice * @ingroup layers_generic * @brief Slice layer definition * * This layer defines the params of a slicing layer. It is intended to be used * by his associated forward function @ref forward_slice */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_slice_ { AI_LAYER_COMMON_FIELDS_DECLARE AI_CONST ai_array* axes; /*!< Axes that 'starts' and 'ends' apply to. It's optional*/ AI_CONST ai_array* starts; /*!< Starting indices of corrisponding axis in axes*/ AI_CONST ai_array* ends; /*!< Ending indices (exclusive) of corrisponding axis in axes*/ } ai_layer_slice; /*! * @struct ai_layer_gather * @ingroup layers_generic * @brief Gather layer definition * * This layer defines the params of a gathering layer. It is intended to be used * by his associated forward function @ref forward_gather */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_gather_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_i16 axis; /*!< Which axis to gather on It's optional*/ ai_tensor* indices; /*!< Indices of corrisponding axis in axes*/ } ai_layer_gather; /*! * @struct ai_layer_tile * @ingroup layers generic * @brief Tile layer definition * * This layer defines the param of an tile layer. It constructs a tensor by tiling a * given tensor. It is intended to be used by its associated forward function * @ref forward_upsample */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_tile_{ AI_LAYER_COMMON_FIELDS_DECLARE AI_CONST ai_array* repeats; /*!< numbers of repeated copies along each dimension */ } ai_layer_tile; /*! * @struct ai_layer_shape * @ingroup layers generic * @brief Shape layer definition * * This layer defines the param of a shape layer. It returns the shape of the * input tensor. It is intended to be used by its associated forward function * @ref forward_shape */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_shape_{ AI_LAYER_COMMON_FIELDS_DECLARE } ai_layer_shape; /*! * @struct ai_layer_upsample * @ingroup layers generic * @brief Upsample layer definition * * This layer defines the param of an upsampling layer. It overloads its params * to allow zeros upsampling, helpful traspose convolutions, for instance. * It is intended to be used by its associated forward function @ref forward_upsample */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_upsample_{ AI_LAYER_COMMON_FIELDS_DECLARE ai_upsample_mode mode; /*!< upsample mode */ ai_bool center; /*!< center pixels */ AI_CONST ai_array* scales; /*!< scale array along each dimension */ ai_nearest_mode nearest_mode; /*!< used in nearest mode */ } ai_layer_upsample; /*! * @struct ai_layer_resize * @ingroup layers generic * @brief Resize layer definition * * This layer defines the param of a resize layer. * It is intended to be used by its associated forward function @ref forward_resize */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_resize_{ AI_LAYER_COMMON_FIELDS_DECLARE ai_coord_transf_mode coord_transf_mode; /*!< coordinate tranformation mode */ ai_float cubic_coeff_a; /*!< the coefficient 'a' used in cubic interpolation */ ai_bool exclude_outside; /*!< exclude outside pixels flag */ ai_float extrapol_val; /*!< used in tf_crop_and_resize cas */ ai_resize_mode mode; /*!< resize mode */ ai_nearest_mode nearest_mode; /*!< used in nearest mode */ AI_CONST ai_array* scales; /*!< scale array along each dimension */ AI_CONST ai_array* roi; /*!< roi array, used in tf_crop_and_resize case */ } ai_layer_resize; /*! * @struct ai_layer_instanceNormalization * @ingroup layers generic * @brief instance normalization layer definition * * This layer defines the params of an instance normalization layer. * It is intended to be used by its associated forward function @ref forward_instanceNormalization */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_instanceNormaization_{ AI_LAYER_COMMON_FIELDS_DECLARE ai_float eps; /*!< epsilon value, to avoid by zero division */ } ai_layer_instanceNormalization; /*! * @struct ai_layer_mode * @ingroup layers generic * @brief Pad layer definition * * This layer defines the param of an pad layer. It pad a tensor. * It is intended to be used by its associated forward function @ref forward_pad */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pad_{ AI_LAYER_COMMON_FIELDS_DECLARE ai_pad_mode mode; /*!< pad mode */ ai_shape pads; /*!< Number of padding to add or remove at the beginning and end of each axis */ const ai_array* value; /*!< Indicates the value to be filled */ } ai_layer_pad; /*! * @struct ai_layer_mode * @ingroup layers generic * @brief ConstantOfShape layer definition * * This layer defines the param of an constantofshape layer. It constantofshape a tensor. * It is intended to be used by its associated forward function @ref forward_constantofshape */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_constantofshape_{ AI_LAYER_COMMON_FIELDS_DECLARE const ai_array* value; /*!< Indicates the value to be filled */ } ai_layer_constantofshape; /*! * @struct ai_layer_add * @ingroup layers_generic * @brief Add layer definition * * This layer defines the params of an add layer. */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_add_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_u16 in_layers_count; /*!< number of input layers to concat */ ai_u16 in_layer_curr; /*!< current layer to concat */ ai_tensor** in_tensors; /*!< input tensors list (if NULL==no copy) */ ai_tensor* out_tensor; /*!< output tensor (if NULL==no copy) */ func_copy_tensor copy_to_out_tensor; /*!< pointer to copy tensor func (NULL = no copy) */ ai_layer_base* split_layer; /*!< pointer to associated split layer */ ai_layer_base* next_layer; /*!< pointer to next layer to process */ } ai_layer_add; typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_argmax_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_i16 axis; ai_i16 select_last_index; } ai_layer_argmax; typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_argmin_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_i16 axis; ai_i16 select_last_index; } ai_layer_argmin; // TODO: REMOVE This legacy typedef ai_layer_argmax ai_layer_ArgMax; typedef ai_layer_argmin ai_layer_ArgMin; /*! * @struct ai_layer_transpose * @ingroup layers_generic * @brief Transpose layer datastruct declaration. This defines the params of a * transpose layer. It is intended to be used by his associated forward function * @ref forward_transpose */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_transpose_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape out_mapping; /*!< transpose output mapping order. I.e. tt is a permutation of the input tensor shape */ } ai_layer_transpose; /*! * @struct ai_layer_transpose_batch * @ingroup layers_generic * @brief Transpose batch layer datastruct declaration. This defines the params of a * transpose layer. It is intended to be used by his associated forward function * @ref forward_transpose_batch */ typedef ai_layer_base ai_layer_transpose_batch; #define AI_TIME_DISTRIBUTED_AXIS (AI_SHAPE_HEIGHT) /*! * @struct ai_layer_time_distributed * @ingroup layers_generic * @brief Time distributed layer datastruct declaration. This defines the params * of a time distributed layer. It is intended to be used by his associated * forward function @ref forward_time_distributed */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_time_distributed_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_layer_base* inner_layer; /*!< inner layer to process */ } ai_layer_time_distributed; /*! * @struct ai_layer_concat * @ingroup layers_generic * @brief Concatenation layer * * Concat Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_concat_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape_dimension axis; /*!< which axis to concatenate on */ } ai_layer_concat; /*! * @struct ai_layer_pack * @ingroup layers_generic * @brief pack layer * * Pack Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_pack_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape_dimension axis; /*!< which axis to concatenate on */ } ai_layer_pack; /*! * @struct ai_layer_unpack * @ingroup layers_generic * @brief unpack layer * * Unpack Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_unpack_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape_dimension axis; /*!< which axis to concatenate on */ } ai_layer_unpack; typedef void (*func_binary)(ai_handle out,const ai_handle a, const ai_handle b); typedef void (*func_buffer_binary)(ai_handle out,const ai_handle a, const ai_handle b, const ai_size loop); typedef void (*func_buffer_binary_integer)(ai_handle out,const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle scale1, const ai_handle zp1, const ai_handle scale2, const ai_handle zp2, const ai_handle scaleout, const ai_handle zpout, const ai_i32 scalar_op); /*! * @struct ai_layer_eltwise * @ingroup layers_generic * @brief General element-wise transformation layer * * Elementwise Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_eltwise_ { AI_LAYER_COMMON_FIELDS_DECLARE func_binary operation; /*!< operation to apply elementwise */ func_buffer_binary buffer_operation; /*!< operation to apply elementwise */ } ai_layer_eltwise; /*! * @struct ai_layer_eltwise_integer * @ingroup layers_generic * @brief General element-wise transformation layer for integer data * * Elementwise Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_eltwise_integer_ { AI_LAYER_COMMON_FIELDS_DECLARE func_binary operation; /*!< operation to apply elementwise */ func_buffer_binary_integer buffer_operation; /*!< operation to apply elementwise */ } ai_layer_eltwise_integer; /*! * @struct ai_layer_reduce * @ingroup layers_generic * @brief General dimension reduction layer * * reduction Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reduce_ { AI_LAYER_COMMON_FIELDS_DECLARE const ai_array* neutral_value; /*!< Initialization value for operation */ func_binary operation; /*!< operation to apply elementwise */ } ai_layer_reduce; /*! * @struct ai_layer_reduce_log_sum_exp * @ingroup layers_generic * @brief General dimension reduction layer * * reduction Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reduce_log_sum_exp_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_shape_dimension axis; } ai_layer_reduce_log_sum_exp; /*! * @struct ai_layer_reduce l1 * @ingroup layers_generic * @brief General dimension reduction layer * * reduction Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reduce_l1_ { AI_LAYER_COMMON_FIELDS_DECLARE AI_CONST ai_array* axes; } ai_layer_reduce_l1; /*! * @struct ai_layer_reduce l2 * @ingroup layers_generic * @brief General dimension reduction layer * * reduction Layer. * It is a sequential layer. see @ref ai_layer_sequential */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reduce_l2_ { AI_LAYER_COMMON_FIELDS_DECLARE AI_CONST ai_array* axes; } ai_layer_reduce_l2; /*! * @struct ai_layer_where * @ingroup layers generic * @brief Where layer definition * * This layer operates on 3 input tensors: condition, X and Y. * It return elements, either from X or Y, depending on condition * (with Numpy-style broadcasting support). * @ref forward_where */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_where_ { AI_LAYER_COMMON_FIELDS_DECLARE const ai_array *shapes_len; ai_bool channel_first; } ai_layer_where; /*! * @struct ai_layer_reverse * @ingroup layers_reverse * @brief Reverse layer * * The type of reverse function is handled by the specific forward function * @ref forward_svm_regressor */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_reverse_ { AI_LAYER_COMMON_FIELDS_DECLARE ai_i32 axis; /*!< selected axis to perform the operation */ } ai_layer_reverse; /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Dummy forward routine with no processing. * @ingroup layers_generic * @param generic layer handle */ AI_INTERNAL_API void forward_nop(ai_layer* layer); /*! * @brief Computes the activations of a TimeDelay layer. * @ingroup layers_generic * @param layer the time delay layer */ AI_INTERNAL_API void forward_time_delay(ai_layer* layer); /*! * @brief Split network computation in N parallel branches. * @ingroup layers_generic * @param layer the split layer */ AI_INTERNAL_API void forward_split(ai_layer* layer); /*! * @brief Add network computation from N parallel branches. * @ingroup layers_generic * @param layer the add layer */ AI_INTERNAL_API void forward_add(ai_layer* layer); /*! * @brief Compute the indices of the max elements of the input tensor's element along the provided axis. * @ingroup layers_generic * @param layer argmax layer */ AI_INTERNAL_API void forward_argmax(ai_layer* layer); /*! * @brief Compute the indices of the min elements of the input tensor's element along the provided axis. * @ingroup layers_generic * @param layer argmin layer */ AI_INTERNAL_API void forward_argmin(ai_layer* layer); /*! * @brief Svdf layer. * @ingroup layers_generic * @param layer svdf layer */ AI_INTERNAL_API void forward_svdf(ai_layer* layer); /*! * @brief Transpose a tensor along a pivot and save transposed values into an output * tensor * @ingroup layers_generic * @param layer the transpose layer */ AI_INTERNAL_API void forward_transpose(ai_layer* layer); /*! * @brief Transpose batch and save transposed values of a determinate batch into an output * tensor * @ingroup layers_generic * @param layer the transpose batch layer */ AI_INTERNAL_API void forward_transpose_batch(ai_layer* layer); /*! * @brief TimeDistrubuted forward layer function. This forward function * implements the timedistributed layer. * @ingroup layers_generic * @param layer the time distributed layer */ AI_INTERNAL_API void forward_time_distributed(ai_layer* layer); /*! * @brief Packing a list of tensors in a single tensor * @ingroup layers generic * @param layer the packing layer */ AI_INTERNAL_API void forward_pack(ai_layer* layer); /*! * @brief Unpacking a single of tensors in a list tensor * @ingroup layers generic * @param layer the unpacking layer */ AI_INTERNAL_API void forward_unpack(ai_layer* layer); /*! * @brief Concatenates a list of tensors into a single tensor. * @ingroup layers_generic * @param layer the concatenation layer */ AI_INTERNAL_API void forward_concat(ai_layer* layer); /*! * @brief Gather an input tensor * @ingroup layers_generic * @param layer the gathered layer */ AI_INTERNAL_API void forward_gather(ai_layer* layer); /*! * @brief Slice an input tensors * @ingroup layers_generic * @param layer the sliced layer */ AI_INTERNAL_API void forward_slice(ai_layer* layer); /*! * @brief Tile an input tensors * @ingroup layers_generic * @param layer the tiled layer */ AI_INTERNAL_API void forward_tile(ai_layer* layer); /*! * @brief Returns the shape of an input tensors * @ingroup layers_generic * @param layer the Shape layer */ AI_INTERNAL_API void forward_shape(ai_layer* layer); /*! * @brief TopK an input tensors * @ingroup layers_generic * @param layer the Topked layer */ AI_INTERNAL_API void forward_topK(ai_layer* layer); /*! * @brief Pad an input tensors * @ingroup layers_generic * @param layer the pad layer */ AI_INTERNAL_API void forward_pad(ai_layer* layer); /*! * @brief ConstantofShape an input tensors * @ingroup layers_generic * @param layer the constantofshape layer */ AI_INTERNAL_API void forward_constantofshape(ai_layer* layer); /*! * @brief Upsample an input tensors * @ingroup layers_generic * @param layer the upsampled layer */ AI_INTERNAL_API void forward_upsample(ai_layer* layer); /*! * @brief Resize an input tensors * @ingroup layers_generic * @param layer the resized layer */ AI_INTERNAL_API void forward_resize(ai_layer* layer); /*! * @brief Instance Normalization on an input tensors * @ingroup layers_generic * @param layer the instance normalization layer */ AI_INTERNAL_API void forward_instanceNormalization(ai_layer* layer); /*! * @brief Apply an elementwise transformation to the input tensors * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_eltwise(ai_layer* layer); /*! * @brief Apply an elementwise transformation to the integer input tensors * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_eltwise_integer(ai_layer* layer); /*! * @brief Apply an elementwise transformation to the signed integer input tensors * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_eltwise_integer_INT8(ai_layer* layer); /*! * @brief Apply an elementwise transformation to the unsigned integer input tensors * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_eltwise_integer_UINT8(ai_layer* layer); /*! * @brief Apply a reduce transformation to the input tensors * @ingroup layers_generic * @param layer the reduce layer */ AI_INTERNAL_API void forward_reduce(ai_layer* layer); /*! * @brief Apply a reduce transformation to the input tensors * @ingroup layers_generic * @param layer the reduce layer */ AI_INTERNAL_API void forward_reduce_log_sum_exp(ai_layer* layer); /*! * @brief Apply a reduce transformation to the input tensors * @ingroup layers_generic * @param layer the reduce layer */ AI_INTERNAL_API void forward_reduce_l1(ai_layer* layer); /*! * @brief Apply a reduce transformation to the input tensors * @ingroup layers_generic * @param layer the reduce layer */ AI_INTERNAL_API void forward_reduce_l2(ai_layer* layer); /*! * @brief Behave like numpy.where with Numpy-style broadcasting support * @ingroup layers_generic * @param layer the where layer */ AI_INTERNAL_API void forward_where(ai_layer* layer); /*! * @brief Apply an elementwise addition to the input tensors * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_add_integer(ai_layer* layer); /*! * @brief Apply an elementwise addition to the input tensors * with int8 I/O * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_add_integer_INT8(ai_layer* layer); /*! * @brief Apply an elementwise addition to the input tensors * with uint8 I/O * @ingroup layers_generic * @param layer the elementwise layer */ AI_INTERNAL_API void forward_add_integer_UINT8(ai_layer* layer); /*! * @brief Reverse layer. * @ingroup layers_generic * @param layer reverse layer */ AI_INTERNAL_API void forward_reverse(ai_layer *pLayer); /*! * @brief Upsample an input tensors with unsigned 8-bit integer input,. * It is to be used also for other formats, since the function only * performs memory copy. * @ingroup layers_generic * @param layer the upsampled layer */ AI_INTERNAL_API void forward_upsample_generic(ai_layer* layer); AI_API_DECLARE_END #endif /*LAYERS_GENERIC_H*/
22,964
C
28.292092
130
0.683505
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/ai_datatypes_defines.h
/** ****************************************************************************** * @file ai_datatypes_defines.h * @author AST Embedded Analytics Research Platform * @brief Definitions of AI platform private APIs types ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_DATATYPES_DEFINES_H #define AI_DATATYPES_DEFINES_H #pragma once #include "ai_platform.h" /*! * @defgroup datatypes_defines Internal Datatypes Defines Header * @brief Data structures used internally to implement neural networks * */ /* define to track datatypes used by codegen */ #define AI_INTERFACE_TYPE /* AI_INTERFACE_TYPE */ #define AI_INTERNAL_API /* AI_INTERNAL_API */ #define AI_CONST const #define AI_STATIC static #define AI_STATIC_CONST static const /******************************************************************************/ /* NOP operation used by codegen */ #define AI_NOP /* NOP */ #define AI_WRAP_FUNC(fn_) do { fn_ } while (0); #define AI_CAT(a, ...) AI_PRIMITIVE_CAT(a, __VA_ARGS__) #define AI_PRIMITIVE_CAT(a, ...) a ## __VA_ARGS__ /******************************************************************************/ #ifdef HAS_AI_ASSERT #include <assert.h> #define AI_ASSERT(cond) \ { assert(cond); } #else #define AI_ASSERT(cond) \ AI_WRAP_FUNC(/*AI_ASSERT*/) #endif /*HAS_AI_ASSERT*/ /******************************************************************************/ #define AI_NO_PACKED_STRUCTS /* Macro for defining packed structures (compiler dependent). * This just reduces memory requirements, but is not required. */ #if defined(AI_NO_PACKED_STRUCTS) /* Disable struct packing */ #define AI_PACKED_STRUCT_START /* AI_PACKED_STRUCT_START */ #define AI_PACKED_STRUCT_END /* AI_PACKED_STRUCT_END */ #define AI_PACKED /* AI_PACKED */ #elif defined(__GNUC__) || defined(__clang__) /* For GCC and clang */ #define AI_PACKED_STRUCT_START /* AI_PACKED_STRUCT_START */ #define AI_PACKED_STRUCT_END /* AI_PACKED_STRUCT_END */ #define AI_PACKED __attribute__((packed)) #elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__) || defined(__CC_ARM) /* For IAR ARM and Keil MDK-ARM compilers */ #define AI_PACKED_STRUCT_START _Pragma("pack(push, 1)") #define AI_PACKED_STRUCT_END _Pragma("pack(pop)") #define AI_PACKED /* AI_PACKED */ #elif defined(_MSC_VER) && (_MSC_VER >= 1500) /* For Microsoft Visual C++ */ #define AI_PACKED_STRUCT_START __pragma(pack(push, 1)) #define AI_PACKED_STRUCT_END __pragma(pack(pop)) #define AI_PACKED /* AI_PACKED */ #else /* Unknown compiler */ #define AI_PACKED_STRUCT_START /* AI_PACKED_STRUCT_START */ #define AI_PACKED_STRUCT_END /* AI_PACKED_STRUCT_END */ #define AI_PACKED /* AI_PACKED */ #endif /* AI_NO_PACKED_STRUCTS */ /******************************************************************************/ #define AI_STRINGIFY_ARG(contents) # contents #define AI_STRINGIFY(macro_or_string) AI_STRINGIFY_ARG (macro_or_string) /******************************************************************************/ #if defined(_MSC_VER) #define AI_DECLARE_STATIC static __inline // #define AI_FORCE_INLINE static __forceinline #define AI_FORCE_INLINE static __inline #define AI_HINT_INLINE static __inline #define AI_ALIGNED_TYPE(type, x) type __declspec(align(x)) #define AI_INTERFACE_ENTRY __declspec(dllexport) #elif defined(__ICCARM__) || defined (__IAR_SYSTEMS_ICC__) #define AI_DECLARE_STATIC static inline // #define AI_FORCE_INLINE static _Pragma("inline=forced") // TODO: check this definition! #define AI_FORCE_INLINE static inline #define AI_HINT_INLINE static inline #define AI_ALIGNED_TYPE(type, x) type #define AI_INTERFACE_ENTRY /* AI_INTERFACE_ENTRY */ #elif defined(__GNUC__) #define AI_DECLARE_STATIC static __inline #define AI_FORCE_INLINE static __inline #define AI_HINT_INLINE static __inline #define AI_ALIGNED_TYPE(type, x) type __attribute__ ((aligned(x))) #define AI_INTERFACE_ENTRY /* AI_INTERFACE_ENTRY */ #else /* _MSC_VER */ #define AI_DECLARE_STATIC static __inline // #define AI_FORCE_INLINE static __forceinline #define AI_FORCE_INLINE static __inline #define AI_HINT_INLINE static __inline #define AI_ALIGNED_TYPE(type, x) type __attribute__ ((aligned(x))) #define AI_INTERFACE_ENTRY __attribute__((visibility("default"))) #endif /* _MSC_VER */ /******************************************************************************/ #define AI_ALIGN_MASKED(value, mask) ( ((value)+(mask))&(~(mask)) ) #define AI_GET_VERSION_STRING(major, minor, micro) \ AI_STRINGIFY_ARG(major) "." \ AI_STRINGIFY_ARG(minor) "." \ AI_STRINGIFY_ARG(micro) \ #define AI_PACK_TENSORS_PTR(...) \ AI_PACK(__VA_ARGS__) #define AI_PACK_INFO(size_) (ai_tensor_info[1]) { { \ .buffer = (ai_buffer[size_])AI_STRUCT_INIT, \ .state = (ai_tensor_state[size_])AI_STRUCT_INIT, \ } } #define AI_CR "\r\n" #if (defined HAS_AI_DEBUG || defined HAS_DEBUG_LIB) #include <stdio.h> #define AI_DEBUG(...) __VA_ARGS__ #define AI_DEBUG_PRINT(fmt, ...) { printf(fmt, ##__VA_ARGS__); } #else #define AI_DEBUG(...) AI_WRAP_FUNC(/*AI_DEBUG*/) #define AI_DEBUG_PRINT(fmt, ...) AI_WRAP_FUNC(/*AI_DEBUG_PRINT*/) #endif #define AI_FLAG_SET(mask, flag) (mask) |= (flag) #define AI_FLAG_UNSET(mask, flag) (mask) &= (~(flag)) #define AI_FLAG_IS_SET(mask, flag) ((flag)==((mask)&(flag))) #endif /*AI_DATATYPES_DEFINES_H*/
6,551
C
39.444444
105
0.526637
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_dense_dqnn.h
/** ****************************************************************************** * @file layers_dense_dqnn.h * @author AST Embedded Analytics Research Platform * @brief header file of deeply quantized dense layers. ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_DENSE_DQNN_H #define LAYERS_DENSE_DQNN_H #pragma once #include "layers_common.h" /*! * @defgroup layers_dense_dqnn Quantized Dense Layers definition. * @brief Implements the kernels and the forward functions to implement * dense layers with quantized inputs, weights, or outputs. */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_dense_dqnn * @ingroup layers_dense_dqnn * @brief Specific instance of deeply quantized dense layers. */ typedef ai_layer_base ai_layer_dense_dqnn; /*****************************************************************************/ /* Forward Functions Section */ /*****************************************************************************/ /*! * @brief Forward function for a dense layer with signed binary input, * signed binary output, and signed binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os1ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * signed binary output, and signed binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os1ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 8-bit signed output, and signed binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os8ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 8-bit signed output, and signed binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os16ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 32-bit floating point output, and signed binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1of32ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 32-bit floating point output, and signed binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1of32ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 32-bit floating point output, and 32-bit floating point weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1of32wf32(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 32-bit floating point output, and 32-bit floating point weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1of32wf32_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 32-bit floating point output, and 8-bit signed weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1of32ws8(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 32-bit floating point output, and 8-bit signed weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1of32ws8_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * binary output, and 8-bit signed weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os1ws8(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * binary output, and 8-bit signed weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os1ws8_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 8-bit signed output, and 8-bit signed weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os8ws8(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed binary input, * 16-bit signed output, and 8-bit signed weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is1os16ws8(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * float output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8of32ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * float output, and binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8of32ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * 1-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8os1ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * 1-bit signed output, and binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8os1ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * binary weights and binary output. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8os1ws1_bn_fxp(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * 8-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8os8ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 8-bit input, * 16-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is8os16ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 16-bit input, * 1-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is16os1ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 16-bit input, * 1-bit signed output, and binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is16os1ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 16-bit input, * 8-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is16os8ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 16-bit input, * 16-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is16os16ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 16-bit input, * f32 output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is16of32ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed 16-bit input, * f32 output, and binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_is16of32ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed f32 input, * 1-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_if32os1ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed f32 input, * 1-bit signed output, and binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_if32os1ws1_bn(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed f32 input, * 8-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_if32os8ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed f32 input, * 16-bit signed output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_if32os16ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed f32 input, * f32 output, and binary weights. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_if32of32ws1(ai_layer* layer); /*! * @brief Forward function for a dense layer with signed f32 input, * f32 output, and binary weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup layers_dense_dqnn * @param layer template layer as an opaque pointer */ AI_INTERNAL_API void forward_dense_if32of32ws1_bn(ai_layer* layer); AI_API_DECLARE_END #endif /*LAYERS_DENSE_DQNN_H*/
14,182
C
34.546366
80
0.709632
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/ai_lite.h
/** ****************************************************************************** * @file ai_lite.h * @author AST Embedded Analytics Research Platform * @brief Definitions and implementations of runtime-lite public APIs ****************************************************************************** * @attention * * Copyright (c) 2022 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_LITE_H #define AI_LITE_H #pragma once #include "ai_platform.h" #include "ai_lite_inspect.h" #define LITE_API_ENTRY \ /* LITE_API_ENTRY */ #define LITE_GRAPH_INIT(_inputs, _outputs, _activations, _weights, _cb, _cb_cookie) { \ .inputs = (_inputs), \ .outputs = (_outputs), \ .activations = (_activations), \ .weights = (const ai_handle*)(_weights), \ .cb = ((ai_lite_inspect_cb)(_cb)), \ .cb_cookie = ((ai_handle)(_cb_cookie)), \ } AI_API_DECLARE_BEGIN typedef enum { LITE_OK = 0, LITE_KO_INPUTS, LITE_KO_OUTPUTS, LITE_KO_WEIGHTS, LITE_KO_ACTIVATIONS, LITE_KO_GRAPH, } lite_result; typedef struct { ai_handle* inputs; ai_handle* outputs; ai_handle* activations; const ai_handle* weights; ai_lite_inspect_cb cb; ai_handle cb_cookie; } lite_graph; AI_API_DECLARE_END #endif /* AI_LITE_H */
1,699
C
25.5625
87
0.521483
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_ml_linearclassifier.h
/** ****************************************************************************** * @file layers_ml_linearclassifier.h * @author SRA * @brief header file of AI platform LinearClassifier datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_LINEARCLASSIFIER_H #define LAYERS_LINEARCLASSIFIER_H #pragma once #include "layers_common.h" #include "layers_nl.h" /*! * @defgroup layers_linearclassifier Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN /*! * @struct ai_layer_linearclassifier * @ingroup layers_linearclassifier * @brief Linearclassifier layer * * The type of svmreg function is handled by the specific forward function * @ref forward_linearclassifier */ typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_linearclassifier_ { AI_LAYER_COMMON_FIELDS_DECLARE func_nl nl_func; /*!< function pointer to non linear transform */ \ ai_bool multi_class; /*!< Indicates whether to do OvR or multinomial */ ai_bool has_classlabels_int; /*!< if True, LinearClassifier returns classlabels int, else classlabels string */ } ai_layer_linearclassifier; /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Decodes the LinearClassifier ML operator. * @ingroup layers_linaerclassifier * @param layer linear classifier layer */ AI_INTERNAL_API void forward_linearclassifier(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_LINEARCLASSIFIER_H*/
2,176
C
29.661971
118
0.542279
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_nl_list.h
/** ****************************************************************************** * @file lite_nl_list.h * @author AST Embedded Analytics Research Platform * @brief header file of lite supported non-linearities routines ****************************************************************************** * @attention * * Copyright (c) 2022 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ // #define LITE_NL_ENTRY(nl_id_, nl_name_, nl_op_, nl_op_args_) /* No sentry. This is deliberate!! */ LITE_NL_ENTRY(1, abs, AI_ABS, 1) LITE_NL_ENTRY(2, acos, AI_MATH_ACOS, 1) LITE_NL_ENTRY(3, acosh, AI_MATH_ACOSH, 1) LITE_NL_ENTRY(4, asin, AI_MATH_ASIN, 1) LITE_NL_ENTRY(5, asinh, AI_MATH_ASINH, 1) LITE_NL_ENTRY(6, atan, AI_MATH_ATAN, 1) LITE_NL_ENTRY(7, atanh, AI_MATH_ATANH, 1) LITE_NL_ENTRY(8, ceil, AI_CEIL, 1) LITE_NL_ENTRY(9, cos, AI_MATH_COS, 1) LITE_NL_ENTRY(10, cosh, AI_MATH_COSH, 1) LITE_NL_ENTRY(11, erf, AI_MATH_ERF, 1) LITE_NL_ENTRY(12, exp, AI_MATH_EXP, 1) LITE_NL_ENTRY(13, floor, AI_FLOOR, 1) LITE_NL_ENTRY(14, hardmax, /**/, 0) LITE_NL_ENTRY(15, log, AI_MATH_LOG, 1) LITE_NL_ENTRY(16, logistic, AI_MATH_LOGISTIC, 1) LITE_NL_ENTRY(17, neg, AI_NEG, 1) LITE_NL_ENTRY(18, rsqrt, AI_MATH_RSQRT, 1) LITE_NL_ENTRY(19, sin, AI_MATH_SIN, 1) LITE_NL_ENTRY(20, sinh, AI_MATH_SINH, 1) LITE_NL_ENTRY(21, tan, AI_MATH_TAN, 1) LITE_NL_ENTRY(22, square, AI_MATH_SQUARE, 1) LITE_NL_ENTRY(23, reciprocal, AI_RECIPROCAL, 1) LITE_NL_ENTRY(24, round, AI_ROUND, 1) LITE_NL_ENTRY(25, sigmoid, AI_MATH_SIGMOID, 1) LITE_NL_ENTRY(26, swish, AI_MATH_SWISH, 1) LITE_NL_ENTRY(27, hard_swish, AI_MATH_HARD_SWISH, 1) LITE_NL_ENTRY(28, sign, AI_SIGN, 1) LITE_NL_ENTRY(29, sqrt, AI_MATH_SQRT, 1) // LITE_NL_ENTRY(30, softmax, /**/, 0) // for future changes // LITE_NL_ENTRY(31, softmax_zero_channel, /**/, 0) // for future changes LITE_NL_ENTRY(32, soft_plus, AI_MATH_SOFT_PLUS, 1) LITE_NL_ENTRY(33, soft_sign, AI_MATH_SOFT_SIGN, 1) LITE_NL_ENTRY(34, tanh, AI_MATH_TANH, 1) LITE_NL_ENTRY(35, prelu, /**/, 0) LITE_NL_ENTRY(36, relu, AI_MATH_RELU, 1) LITE_NL_ENTRY(37, relu_generic, /**/, 0) LITE_NL_ENTRY(101, elu, AI_MATH_ELU, 2) LITE_NL_ENTRY(102, relu_thresholded, AI_MATH_RELU_THRESHOLDED, 2) LITE_NL_ENTRY(201, clip, AI_CLAMP, 3) LITE_NL_ENTRY(202, hard_sigmoid, AI_MATH_HARD_SIGMOID, 3) LITE_NL_ENTRY(203, selu, AI_MATH_SELU, 3) #undef LITE_NL_ENTRY #undef LITE_NL_IIF_0 #undef LITE_NL_IIF_1 #undef LITE_NL_IIF_2 #undef LITE_NL_IIF_3
2,844
C
35.474359
80
0.60443
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/ai_math_helpers.h
/** ****************************************************************************** * @file ai_math_helpers.h * @author AST Embedded Analytics Research Platform * @brief Math helpers routines header file. ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_MATH_HELPERS_H #define AI_MATH_HELPERS_H #include "ai_lite_math_helpers.h" //#if defined(HAS_X86) || defined(__CC_ARM) || defined(CM4) || defined(CM7) #define _AI_CONV_2D_LOOP_UNROLLING_OPTIM //#endif #define STM32_DOT_INLINE_OPTIM /* Modes for element wise integer optimized implementation */ #define AI_ELTWISE_NO_SCALAR (0) #define AI_ELTWISE_SCALAR1 (1) #define AI_ELTWISE_SCALAR2 (2) #define AI_ELTWISE_SCALAR_CH1 (3) #define AI_ELTWISE_SCALAR_CH2 (4) AI_API_DECLARE_BEGIN /*! * @typedef ai_vec4_float * @ingroup ai_datatypes_internal * @brief 32bit X 4 float (optimization for embedded MCU) */ typedef struct _ai_vec4_float { ai_float a1; ai_float a2; ai_float a3; ai_float a4; } ai_vec4_float; #define AI_VEC4_FLOAT(ptr_) \ _get_vec4_float((ai_handle)(ptr_)) AI_DECLARE_STATIC ai_vec4_float _get_vec4_float(const ai_handle fptr) { return *((const ai_vec4_float*)fptr); } #if defined(STM32_DOT_INLINE_OPTIM) AI_DECLARE_STATIC void __ai_math_dot_array( ai_float* out, const ai_float* data0, const ai_float* data1, ai_size data_size) { register ai_float sum = 0.0f; /* Temporary result storage */ /* Run the below code for Cortex-M4 and Cortex-M3 */ #if defined(_AI_CONV_2D_LOOP_UNROLLING_OPTIM) /* First part of the processing with loop unrolling. Compute 16 outputs at a time. ** a second loop below computes the remaining 1 to 15 samples. */ while (data_size >= 16u) { register ai_vec4_float ch_in_f = AI_VEC4_FLOAT(data1); register ai_vec4_float weights_in_f = AI_VEC4_FLOAT(data0); sum += weights_in_f.a1 * ch_in_f.a1; sum += weights_in_f.a2 * ch_in_f.a2; sum += weights_in_f.a3 * ch_in_f.a3; sum += weights_in_f.a4 * ch_in_f.a4; data1 += 4; data0 += 4; ch_in_f = AI_VEC4_FLOAT(data1); weights_in_f = AI_VEC4_FLOAT(data0); sum += weights_in_f.a1 * ch_in_f.a1; sum += weights_in_f.a2 * ch_in_f.a2; sum += weights_in_f.a3 * ch_in_f.a3; sum += weights_in_f.a4 * ch_in_f.a4; data1 += 4; data0 += 4; ch_in_f = AI_VEC4_FLOAT(data1); weights_in_f = AI_VEC4_FLOAT(data0); sum += weights_in_f.a1 * ch_in_f.a1; sum += weights_in_f.a2 * ch_in_f.a2; sum += weights_in_f.a3 * ch_in_f.a3; sum += weights_in_f.a4 * ch_in_f.a4; data1 += 4; data0 += 4; ch_in_f = AI_VEC4_FLOAT(data1); weights_in_f = AI_VEC4_FLOAT(data0); sum += weights_in_f.a1 * ch_in_f.a1; sum += weights_in_f.a2 * ch_in_f.a2; sum += weights_in_f.a3 * ch_in_f.a3; sum += weights_in_f.a4 * ch_in_f.a4; data1 += 4; data0 += 4; data_size -= 16u; } #else /* First part of the processing with loop unrolling. Compute 4 outputs at a time. ** a second loop below computes the remaining 1 to 3 samples. */ while (data_size >= 4u) { /* C = A[0]* B[0] + A[1]* B[1] + A[2]* B[2] + .....+ A[blockSize-1]* B[blockSize-1] */ /* Calculate dot product and then store the result in a temporary buffer */ sum += (*data0++) * (*data1++); sum += (*data0++) * (*data1++); sum += (*data0++) * (*data1++); sum += (*data0++) * (*data1++); /* Decrement the loop counter */ data_size -= 4u; } #endif while (data_size > 0u) { /* C = A[0]* B[0] + A[1]* B[1] + A[2]* B[2] + .....+ A[blockSize-1]* B[blockSize-1] */ /* Calculate dot product and then store the result in a temporary buffer. */ sum += (*data0++) * (*data1++); /* Decrement the loop counter */ data_size--; } /* Directly accumulate the result back in the destination buffer */ *out += sum; } #undef AI_MATH_DOT_ARRAY #define AI_MATH_DOT_ARRAY(dst, src0, src1, size) \ { __ai_math_dot_array(dst, src0, src1, size); } #else /* STM32_DOT_INLINE_OPTIM */ #undef AI_MATH_DOT_ARRAY #define AI_MATH_DOT_ARRAY(dst, src0, src1, size) \ { ai_math_dot_array(dst, src0, src1, size); } #endif /*! * @defgroup math_helpers Math helpers * @brief Common math functions * * Math functions are mapped to the underlying platform through those utility * functions. On x86 and ARM v7 they are mapped to the float math functions in * the C99 standard library; on MCUs they are mapped to the ARM DSP functions. */ /*! * @brief platform optimized dot product of float vectors * * Computes the dot product between vectors and adds the result to out. * @ingroup math_helpers * @param out scalar result of the dot product * @param data0 the first float vector * @param data1 the second float vector * @param data_size the size of both vectors */ AI_INTERFACE_ENTRY void ai_math_dot_array( ai_float* out, const ai_float* data0, const ai_float* data1, const ai_size data_size); /*! * @brief ErfInv a float value * @ingroup math_helpers * @param x input value * @return square root of the value */ AI_INTERFACE_ENTRY ai_float ai_math_erfinv(const ai_float x); /*! * @brief platform optimized exponential on a float value * @ingroup math_helpers * @param x input value * @return exponential of the value */ AI_INTERFACE_ENTRY ai_float ai_math_exp(const ai_float x); /*! * @brief platform logical not * @ingroup math_helpers * @param x input value * @return not of the value */ AI_INTERFACE_ENTRY ai_bool ai_logical_not(const ai_bool x); /*! * @brief platform optimized pow on a float value * @ingroup math_helpers * @param x input value * @param e input value * @return pow of the value ^ e */ AI_INTERFACE_ENTRY ai_float ai_math_pow(const ai_float x, const ai_float e); /*! * @brief platform optimized tangent on a float value * @ingroup math_helpers * @param x input value * @return hyperbolic tangent of the value */ AI_INTERFACE_ENTRY ai_float ai_math_tanh(const ai_float x); /*! * @brief platform optimized relu on a float value * @ingroup math_helpers * @param x input value * @return relu of the value ( x if x>0 else 0) */ AI_INTERFACE_ENTRY ai_float ai_math_relu(const ai_float x); /*! * @brief platform optimized parametric relu on a float value * @ingroup math_helpers * @param x input value * @param slope input value * @return parametric relu of the value */ AI_INTERFACE_ENTRY ai_float ai_math_prelu(const ai_float x, const ai_float slope); /*! * @brief platform optimized parametric sigmoid on a float value * @ingroup math_helpers * @param x input value * @return sigmoid of the value */ AI_INTERFACE_ENTRY ai_float ai_math_sigmoid(const ai_float x); /*! * @brief platform optimized parametric hard sigmoid on a float value * @ingroup math_helpers * @param x input value * @return hard sigmoid of the value */ AI_INTERFACE_ENTRY ai_float ai_math_hard_sigmoid(const ai_float x); // const ai_float alpha, const ai_float beta); /*! * @brief platform optimized parametric swish on a float value * @ingroup math_helpers * @param x input value * @return swish of the value */ AI_INTERFACE_ENTRY ai_float ai_math_swish(const ai_float x); /*! * @brief platform optimized parametric hard_swish on a float value * @ingroup math_helpers * @param x input value * @return hard_swish of the value */ AI_INTERFACE_ENTRY ai_float ai_math_hard_swish(const ai_float x); /*! * @brief platform optimized parametric sign function on a float value * @ingroup math_helpers * @param x input value * @return sign of the value */ AI_INTERFACE_ENTRY ai_float ai_math_sign(const ai_float x); /*! * @brief optimized parametric rectified linear unit on a float value * @ingroup math_helpers * @param x input value * @param slope parameter value * @return x if x is positive and x*slope otherwise */ AI_INTERFACE_ENTRY ai_float ai_fast_prelu(const ai_float x, const ai_float slope); AI_INTERFACE_ENTRY void ai_div(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_div_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_div_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_div_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_bitshift_right(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_right_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_right_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_right_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_right_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_right_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_right_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_right_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_left(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_left_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_left_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_left_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_left_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_left_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_bitshift_left_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_bitshift_left_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_floor_div(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_floor_div_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_floor_mod(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_floor_mod_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_max_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_max_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_max_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_min(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_min_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_min_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_min_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_mul(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_mul_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_mul_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_mul_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_pow(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_pow_buffer(ai_handle out, const ai_handle b, const ai_handle e, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sub_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sub_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_sub_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_sum(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_sum_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_sum_buffer_INT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_sum_buffer_UINT8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop, const ai_handle pScale1, const ai_handle pZp1, const ai_handle pScale2, const ai_handle pZp2, const ai_handle pScaleout, const ai_handle pZpout, const ai_i32 scalar_op); AI_INTERFACE_ENTRY void ai_and(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_and_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_or(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_or_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_xor(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_xor_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_greater_or_equal_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_greater_or_equal_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_less_or_equal_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_less_or_equal_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_f32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_f32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_s32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_s32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_s16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_s16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_s8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_s8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_u32(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_u32(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_u16(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_u16(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_equal_u8(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_equal_buffer_u8(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_INTERFACE_ENTRY void ai_squared_diff(ai_handle out, const ai_handle a, const ai_handle b); AI_INTERFACE_ENTRY void ai_squared_diff_buffer(ai_handle out, const ai_handle a, const ai_handle b, const ai_size loop); AI_API_DECLARE_END #endif /* AI_MATH_HELPERS_H */
34,676
C
61.820652
137
0.706252
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_ml_treeensembleregressor.h
/** ****************************************************************************** * @file layers_svmregressor.h * @author AIS * @brief header file of AI platform SVM Regressor datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_TREE_ENSEMBLE_REGRESSOR_H #define LAYERS_TREE_ENSEMBLE_REGRESSOR_H #pragma once #include "layers_common.h" #include "layers_ml_treeensembleclassifier.h" #include "layers_nl.h" /*! * @defgroup layers_svmreg Layers Definitions * @brief definition * */ AI_API_DECLARE_BEGIN typedef AI_ALIGNED_TYPE(struct, 4) ai_layer_tree_ensemble_regressor_ { AI_LAYER_COMMON_FIELDS_DECLARE func_nl nl_func; uint8_t all_weights_are_positive; ai_float nodes_values_offset; ai_float nodes_values_scale; ai_float target_weights_offset; ai_float target_weights_scale; } ai_layer_tree_ensemble_regressor; /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Decodes the TreeEnsembleRegressor ML operator. * @ingroup layers_svmreg * @param layer tree ensemble regressor layer */ AI_INTERNAL_API void forward_tree_ensemble_regressor(ai_layer *pLayer); AI_API_DECLARE_END #endif /*LAYERS_SVMREGRESSOR_H*/
1,923
C
29.0625
80
0.520021
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_pool_f32.h
/** ****************************************************************************** * @file lite_maxpool_dqnn.h * @author AIS * @brief header file of AI platform lite maxpool kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_POOL_F32_H #define LITE_POOL_F32_H #include "ai_lite_interface.h" #define FUNC_POOL(handle) \ ((func_pool)(handle)) /*! * @typedef (*func_pool) * @ingroup layers_pool * @brief Fuction pointer for generic pooling transform * this function pointer abstracts a generic pooling layer. * see @ref pool_func_ap_array_f32 as examples */ typedef void (*func_pool)(ai_float* in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_float* out); /******************************************************************************/ /** Conv2d Functions Section **/ /******************************************************************************/ AI_INTERNAL_API void pool_func_mp_array_f32(ai_float* pData_in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_float* pData_out); AI_INTERNAL_API void pool_func_ap_array_f32(ai_float *pData_in, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, ai_float *pData_out); #endif // LITE_POOL_F32_H_
2,936
C
39.232876
80
0.466962
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_gru_f32.h
#ifndef LITE_GRU_F32_H #define LITE_GRU_F32_H #pragma once #include "ai_lite_interface.h" /*! * @brief Forward function for a stateless GRU (gate recurrent unit) layer with * signed float input, signed float output, and float parameters. * @ingroup lite_gru_f32 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param gru_kernel The pointer to gru kernel param. * @param gru_recurrent The pointer to gru recurrent param. * @param gru_bias The pointer to bias. * @param gru_scratch The pointer to GRU scratch. * @param n_units The number of GRU cells (dimensionality of output space). * @param n_timesteps The number of timesteps of the input sequence. * @param n_features The number of features of the input sequence. * @param activation_nl The activation function used to update memory state. * @param recurrent_nl The activation function to use for the recurrent step. * @param return_seq If True, returns the full output sequence, else only the last output. * @param go_backwards If True, process the input sequence backwards. * @param reverse_seq If True, reverse the input sequence * @param reset_after Whether to apply reset gate after (True) or before (False) matmul. * @param activation_param The parameters for activation_nl (can be NULL) * @param recurrent_param The parameters for recurrent_nl (can be NULL) */ LITE_API_ENTRY void forward_lite_gru_if32of32wf32( ai_float* output, const ai_float* input, const ai_float* gru_kernel, const ai_float* gru_recurrent, const ai_float* gru_bias, ai_float* gru_scratch, const ai_u32 n_units, const ai_size n_timesteps, const ai_size n_features, ai_handle activation_nl, ai_handle recurrent_nl, ai_bool return_seq, ai_bool go_backwards, ai_bool reverse_seq, ai_bool reset_after, const ai_float* activation_param, const ai_float* recurrent_param); #endif /* LITE_GRU_F32_H */
1,910
C
46.774999
90
0.746597
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_dense_is1.h
#ifndef _LITE_DENSE_IS1_H #define _LITE_DENSE_IS1_H #pragma once #include "ai_lite_interface.h" /*! * @brief Forward function for a dense layer with signed binary input, * signed float output, and float weights. * @ingroup lite_dense_is1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param bias The pointer to bias (NULL if not available). * @param scratch The pointer to the scratch buffer (unused). * @param n_channel_in The number of channels of the input. * @param n_channel_ouy The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_is1of32wf32( ai_float *output, const ai_pbits *input, const ai_float *weights, const ai_float *bias, ai_float *scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out ); /*! * @brief Forward function for a dense layer with signed binary input, * signed float output, and float weights. * The BN is fused, i.e., the layer requires weights, scale, and offset, where * weights are those of the dense layer, scale is that of the BN, and the offset * corresponds to dense bias * bn scale + bn offset. If the parameters do not * agree with such convention, the behavior is undefined. * @ingroup lite_dense_is1 * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param weights The pointer to weights. * @param scale The pointer to scale. * @param offset The pointer to offset. * @param scratch The pointer to the scratch buffer (unused). * @param n_channel_in The number of channels of the input. * @param n_channel_ouy The number of channels of the output, i.e., * the number of dense hidden neurons. */ LITE_API_ENTRY void forward_lite_dense_is1of32wf32_bn( ai_float *output, const ai_pbits *input, const ai_float *weights, const ai_float *scale, const ai_float *offset, ai_float *scratch, const ai_u32 n_channel_in, const ai_u32 n_channel_out ); #endif /*_LITE_DENSE_IS1_H*/
2,078
C
36.799999
80
0.720404
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_nl_generic_float.h
#ifndef LITE_NL_GENERIC_FLOAT_H #define LITE_NL_GENERIC_FLOAT_H #pragma once #include "ai_lite_interface.h" #define LITE_NL_ENTRY(nl_id_, nl_name_, nl_op_, nl_op_args_) \ /** \ * @brief lite function for a templated non-linearity nl_op_. \ * @ingroup lite_nl_generic_float \ * @param out_ptr The pointer to output buffer. \ * @param in_ptr The pointer to input buffer. \ * @param in_size. The size of the input. \ * @param params opaque handler to optional NL params (not used). \ */ \ LITE_API_ENTRY \ void forward_lite_nl_ ## nl_name_ ## _if32of32( \ ai_handle out_ptr, const ai_handle in_ptr, const ai_i32 in_size, const ai_handle params); #include "lite_nl_list.h" /** * @brief lite function for a float softmax non-linearity where the softmax is applied per channel. * @ingroup lite_nl_generic_float * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param in_size. The size of the input. * @param channel_size The nsize of each channel. * @param in_channel_step * @param out_channel_step */ LITE_API_ENTRY void forward_lite_nl_softmax_if32of32( ai_handle out_ptr, const ai_handle in_ptr, const ai_i32 in_size, const ai_size ch_size, const ai_i32 in_ch_step, const ai_i32 out_ch_step); /** * @brief lite function for a float softmax zero channel non-linearity where the softmax is applied per channel. * @ingroup lite_nl_generic_float * @param output The pointer to output buffer. * @param input The pointer to input buffer. * @param in_size. The size of the input. * @param channel_size The nsize of each channel. * @param in_channel_step * @param out_channel_step */ LITE_API_ENTRY void forward_lite_nl_softmax_zero_channel_if32of32( ai_handle out_ptr, const ai_handle in_ptr, const ai_i32 in_size, const ai_size ch_size, const ai_i32 in_ch_step, const ai_i32 out_ch_step); #endif /* LITE_NL_GENERIC_FLOAT_H */
1,907
C
33.071428
112
0.708967
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_pw.h
/** ****************************************************************************** * @file lite_pw.h * @author AIS * @brief header file of AI platform lite pw kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_PW_H #define LITE_PW_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles pw convolutions generic case * @ingroup lite_pw */ LITE_API_ENTRY void forward_lite_pw_sssa8_ch(const ai_i8 *pData_in, const ai_u16 width_in, const ai_u16 height_in, const ai_u16 n_channel_in, const ai_i8 *pWeights, const ai_u16 n_channel_out, const ai_i32 *pBias, const ai_i8 in_zeropoint, const ai_i8 out_zeropoint, const ai_layer_format_type out_ch_format, ai_i8 *pData_out, ai_u32 height_loop_cnt, ai_u16 weights_prefetch_enabled, ai_i32 scratch_size, ai_i16 *pBuffer_a); #endif /*LITE_PW_H*/
1,973
C
34.249999
80
0.387228
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/ai_lite_inspect.h
/** ****************************************************************************** * @file ai_lite_inspect.h * @author AST Embedded Analytics Research Platform * @brief Definitions and implementations of runtime-lite inspection routines ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef AI_LITE_INSPECT_H #define AI_LITE_INSPECT_H #pragma once #include "ai_platform.h" //#define HAS_LITE_INSPECT AI_API_DECLARE_BEGIN /* Types needed by inspect callback signature */ typedef ai_i32 ai_data_format; typedef ai_i32 ai_data_id; /* Lite inspect callback definition */ typedef void (*ai_lite_inspect_cb)( const ai_handle cookie, const ai_data_id node_id, const ai_handle data, const ai_size data_size, const ai_data_format data_fmt, const ai_data_id data_id); #ifdef HAS_LITE_INSPECT #define LITE_INSPECT_CB(_node_id, _data, _data_size, _data_fmt, _data_id) { \ if (graph->cb) { \ graph->cb(graph->cb_cookie, \ (ai_data_id)(_node_id), (ai_handle)(_data), (ai_size)(_data_size), \ (ai_data_format)(_data_fmt), (ai_data_id)(_data_id)); \ } \ } #else #define LITE_INSPECT_CB(_node_id, _data, _data_size, _data_fmt, _data_id) { \ do { /* LITE_INSPECT_CB() */ } while (0); \ } #endif /* HAS_LITE_INSPECT */ AI_API_DECLARE_END #endif /* AI_LITE_INSPECT_H */
1,858
C
28.507936
82
0.545748
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/lite_maxpool_dqnn.h
/** ****************************************************************************** * @file lite_maxpool_dqnn.h * @author AIS * @brief header file of AI platform lite maxpool kernel datatypes ****************************************************************************** * @attention * * Copyright (c) 2021 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LITE_MAXPOOL_DQNN_H #define LITE_MAXPOOL_DQNN_H #pragma once #include "ai_lite_interface.h" /******************************************************************************/ /* Forward Functions Section */ /******************************************************************************/ /*! * @brief Handles maxpool with binary input and binary output - Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_is1os1(const ai_u32 *pDataIn_init, ai_u32 *pDataOut_init, const ai_i32 width_in, const ai_i32 width_out, const ai_i32 height_in, const ai_i32 height_out, const ai_u32 n_channel_in, const ai_u32 n_channel_out, const ai_i32 pool_width, const ai_i32 pool_height, const ai_i32 pool_pad_x, const ai_i32 pool_pad_y, const ai_i32 pool_stride_x, const ai_i32 pool_stride_y, const ai_u32 pool_pad_value, ai_float *pScratch_32); /*! * @brief Handles maxpool with 8 bits signed input and output with a positive scale of the input- Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_is8os8_scalepos(const ai_i8 *pDataIn, ai_i8 *pDataOut, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_float InOut_ScaleRatio, const ai_i8 In_ZeroPoint, const ai_i8 Out_ZeroPoint); /*! * @brief Handles maxpool with 8 bits signed input and output with a negative scale of the input- Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_is8os8_scaleneg(const ai_i8 *pDataIn, ai_i8 *pDataOut, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_float InOut_ScaleRatio, const ai_i8 In_ZeroPoint, const ai_i8 Out_ZeroPoint); /*! * @brief Handles maxpool with 8 bits unsigned input and output with a positive scale of the input- Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_iu8ou8_scalepos(const ai_u8 *pDataIn, ai_u8 *pDataOut, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_float InOut_ScaleRatio, const ai_u8 In_ZeroPoint, const ai_u8 Out_ZeroPoint); /*! * @brief Handles maxpool with 8 bits unsigned input and output with a negative scale of the input- Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_iu8ou8_scaleneg(const ai_u8 *pDataIn, ai_u8 *pDataOut, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_float InOut_ScaleRatio, const ai_u8 In_ZeroPoint, const ai_u8 Out_ZeroPoint); /*! * @brief Handles maxpool with 16 bits signed input and output with a positive scale of the input- Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_is16os16_scalepos(const ai_i16 *pApInput, ai_i16 *pApOutput, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_float InOut_ScaleRatio, const ai_i16 In_ZeroPoint, const ai_i16 Out_ZeroPoint); /*! * @brief Handles maxpool with 16 bits unsigned input and output with a positive scale of the input- Lite I/F * @ingroup lite_maxpool_dqnn */ LITE_API_ENTRY void forward_lite_maxpool_iu16ou16_scalepos(const ai_u16 *pApInput, ai_u16 *pApOutput, const ai_u16 dim_im_in_x, const ai_u16 dim_im_in_y, const ai_u16 ch_im_in, const ai_u16 dim_kernel_x, const ai_u16 dim_kernel_y, const ai_u16 padding_x, const ai_u16 padding_y, const ai_u16 stride_x, const ai_u16 stride_y, const ai_u16 dim_im_out_x, const ai_u16 dim_im_out_y, const ai_float InOut_ScaleRatio, const ai_u16 In_ZeroPoint, const ai_u16 Out_ZeroPoint); #endif /*LITE_MAXPOOL_DQNN_H*/
8,459
C
51.546584
109
0.422154
Tbarkin121/GuardDog/stm32/AnymalNet/Middlewares/ST/AI/Inc/layers_common.h
/** ****************************************************************************** * @file layers_common.h * @author AST Embedded Analytics Research Platform * @brief header file of AI platform layers datatypes ****************************************************************************** * @attention * * Copyright (c) 2017 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** @verbatim @endverbatim ****************************************************************************** */ #ifndef LAYERS_COMMON_H #define LAYERS_COMMON_H #pragma once // #include <stdlib.h> #ifdef USE_CYCLE_MEASUREMENTS #include "layers_cycles_estimation.h" #endif #include "ai_platform.h" #include "ai_common_config.h" #include "core_common.h" /* optimizations */ #define AI_OPTIM_DICT8_DOT_ARRAY_F32 (1) #define AI_OPTIM_DICT8_DTCM (1) #define AI_OPTIM_FUNC_MP_ARRAY_F32 (0) #define AI_LAYER_OBJ(obj_) \ ((ai_layer_base*)(obj_)) #define AI_LAYER_FUNC(func_) \ ((layer_func)(func_)) #define AI_LAYER_TYPE(type_) \ ( (ai_layer_type)((ai_u32)(type_)&0xFFFF) ) #define AI_LAYER_TYPE_ENTRY(type_) \ AI_CONCAT(AI_CONCAT(AI_LAYER_, type_), _TYPE) #define AI_LAYER_TYPE_NAME(type_) \ ai_layer_type_name(AI_LAYER_TYPE(type_)) #if (AI_TOOLS_API_VERSION <= AI_TOOLS_API_VERSION_1_3) #pragma message ("Including deprecated AI_LAYER_OBJ_INIT, AI_LAYER_OBJ_DECLARE") AI_DEPRECATED #define AI_LAYER_OBJ_INIT(type_, id_, network_, \ next_, forward_, ...) \ { \ AI_NODE_COMMON_INIT(AI_CONCAT(AI_LAYER_, type_), id_, 0x0, \ NULL, network_, next_, forward_), \ ## __VA_ARGS__ \ } AI_DEPRECATED #define AI_LAYER_OBJ_DECLARE(varname_, id_, type_, struct_, forward_func_, \ network_, next_, attr_, ...) \ AI_ALIGNED(4) \ attr_ AI_CONCAT(ai_layer_, struct_) varname_ = \ AI_LAYER_OBJ_INIT(type_, id_, network_, \ next_, forward_func_, \ ## __VA_ARGS__); #else #define AI_LAYER_OBJ_INIT(type_, id_, flags_, klass_, network_, \ next_, forward_, tensors_, ...) \ { \ AI_NODE_COMMON_INIT(AI_CONCAT(AI_LAYER_, type_), id_, flags_, \ klass_, network_, next_, forward_), \ .tensors = (tensors_), \ ## __VA_ARGS__ \ } #define AI_LAYER_OBJ_DECLARE( \ varname_, id_, \ type_, flags_, klass_obj_, \ struct_, forward_func_, \ tensors_chain_, \ network_, next_, attr_, ...) \ AI_ALIGNED(4) \ attr_ AI_CONCAT(ai_layer_, struct_) varname_ = \ AI_LAYER_OBJ_INIT(type_, id_, flags_, klass_obj_, network_, \ next_, forward_func_, tensors_chain_, ## __VA_ARGS__); #endif /* AI_TOOLS_API_VERSION_1_3 */ #ifdef HAS_AI_ASSERT #define AI_LAYER_IO_GET(layer_, in_, out_) \ ASSERT_LAYER_SANITY(layer_) \ const ai_tensor* in_ = GET_TENSOR_IN((layer_)->tensors, 0); \ ai_tensor* out_ = GET_TENSOR_OUT((layer_)->tensors, 0); \ ASSERT_TENSOR_DATA_SANITY(in_) \ ASSERT_TENSOR_DATA_SANITY(out_) #define AI_LAYER_TENSOR_LIST_IO_GET(layer_, tlist_in_, tlist_out_) \ ASSERT_LAYER_SANITY(layer_) \ const ai_tensor_list* tlist_in_ = GET_TENSOR_LIST_IN((layer_)->tensors); \ ai_tensor_list* tlist_out_ = GET_TENSOR_LIST_OUT((layer_)->tensors); \ ASSERT_TENSOR_LIST_SANITY(tlist_in_) \ ASSERT_TENSOR_LIST_SANITY(tlist_out_) #define AI_LAYER_WEIGHTS_GET(layer_, weights_, bias_) \ const ai_tensor* weights_ = GET_TENSOR_WEIGHTS((layer_)->tensors, 0); \ const ai_tensor* bias_ = (GET_TENSOR_LIST_SIZE(GET_TENSOR_LIST_WEIGTHS((layer_)->tensors))>1) \ ? GET_TENSOR_WEIGHTS((layer_)->tensors, 1) \ : NULL; \ ASSERT_TENSOR_DATA_SANITY(weights_) \ if (bias_) { ASSERT_TENSOR_DATA_SANITY(bias_) } #else #define AI_LAYER_IO_GET(layer_, in_, out_) \ const ai_tensor* in_ = GET_TENSOR_IN((layer_)->tensors, 0); \ ai_tensor* out_ = GET_TENSOR_OUT((layer_)->tensors, 0); #define AI_LAYER_TENSOR_LIST_IO_GET(layer_, tlist_in_, tlist_out_) \ const ai_tensor_list* tlist_in_ = GET_TENSOR_LIST_IN((layer_)->tensors); \ ai_tensor_list* tlist_out_ = GET_TENSOR_LIST_OUT((layer_)->tensors); #define AI_LAYER_WEIGHTS_GET(layer_, weights_, bias_) \ const ai_tensor* weights_ = GET_TENSOR_WEIGHTS((layer_)->tensors, 0); \ const ai_tensor* bias_ = (GET_TENSOR_LIST_SIZE(GET_TENSOR_LIST_WEIGTHS((layer_)->tensors))>1) \ ? GET_TENSOR_WEIGHTS((layer_)->tensors, 1) \ : NULL; \ #endif /*HAS_AI_ASSERT*/ AI_API_DECLARE_BEGIN /*! * @defgroup layers_common Layers Common * @brief Implementation of the common layers datastructures * This header enumerates the layers specific definition implemented in the * library toghether with the macros and datatypes used to manipulate them. */ /*! * @typedef (*func_copy_tensor) * @ingroup layers_common * @brief Fuction pointer for generic tensor copy routines * this function pointer abstracts a generic tensor copy routine. */ typedef ai_bool (*func_copy_tensor)(ai_tensor* dst, const ai_tensor* src); /*! * @enum ai_layer_type * @ingroup layers_common * @brief ai_tools supported layers type id */ typedef enum { #define LAYER_ENTRY(type_, id_, struct_, forward_func_, init_func_, destroy_func_) \ AI_LAYER_TYPE_ENTRY(type_) = id_, #include "layers_list.h" } ai_layer_type; #define AI_LAYER_COMMON_FIELDS_DECLARE \ AI_NODE_COMMON_FIELDS_DECLARE #define AI_LAYER_STATEFUL_FIELDS_DECLARE \ AI_NODE_STATEFUL_FIELDS_DECLARE /*! * @typedef void (*layer_func)(struct ai_layer_* layer) * @ingroup layers_common * @brief Callback signatures for all layers forward functions */ typedef node_func layer_func; /*! * @struct ai_layer_base * @ingroup layers_common * @brief Structure encoding a base layer in the network * */ typedef ai_node ai_layer_base; /*! * @struct ai_layer_stateful * @ingroup layers_common * @brief Structure encoding a stateful layer in the network * */ typedef ai_node_stateful ai_layer_stateful; /*! * @brief Check the custom network types against the internally compiled ones * Helper function to check if the private APIs where compiled with a different * `datatypes_network.h` than the one provided to the caller. * @ingroup layers_common * @param signatures list of type sizes signatures (first element is the number of types) * @return false if there is a type size mismatch */ AI_INTERNAL_API ai_bool ai_check_custom_types(const ai_custom_type_signature* signatures); /*! * @brief Helper API to retrieve a human readable layer type from enum * @ingroup layers_common * @param type in type of layer * @return string defining the type of the layer */ AI_INTERNAL_API const char* ai_layer_type_name(const ai_layer_type type); /*! * @brief Helper API to check if a node is a valid layer type * @ingroup layers_common * @param type in type of layer * @return true if the layer is one of the ones listed in the enum, * false otherwise */ AI_INTERNAL_API ai_bool ai_layer_type_is_valid(const ai_layer_type type); #ifdef HAS_AI_ASSERT /*! * @brief chack scratch size computed with actual scratch buffer size * @ingroup layers * @param layer_type the layer type * @param fmt buffers format * @param filt_width filter width (when relevant) * @param filt_height filter height (when relevant) * @param n_channel_in the number of channels in * @param n_channel_out the number of channels out * @param is_pointwise is pointwise convulation (conv2d) * @param is_rgb is rgb convolution (conv2d) * @param is depthwise is depthwise convolution (conv2d) * @param is_ch_wise has weights per channel * @param is_sssa is signed * @param p_tensor_scratch the scratch tensor * @param p_function_name the name of the function * @param line_nb the the line of the function */ AI_INTERNAL_API ai_size ai_layer_get_scratch_size( ai_layer_type layer_type, ai_array_format fmt, ai_size filt_width, ai_size filt_height, ai_u16 n_channel_in, ai_u16 n_channel_out, ai_bool is_pointwise, ai_bool is_rgb, ai_bool is_depthwise, ai_bool is_ch1st, ai_bool is_ch_wise, ai_bool is_sss); /*! * @brief chack scratch size computed with actual scratch buffer size * @ingroup layers * @param layer_type the layer type * @param fmt buffers format * @param filt_width filter width (when relevant) * @param filt_height filter height (when relevant) * @param n_channel_in the number of channels in * @param n_channel_out the number of channels out * @param is_pointwise is pointwise convulation (conv2d) * @param is_rgb is rgb convolution (conv2d) * @param is depthwise is depthwise convolution (conv2d) * @param is_ch_wise has weights per channel * @param is_sssa is signed * @param p_tensor_scratch the scratch tensor * @param p_function_name the name of the function * @param line_nb the the line of the function */ AI_INTERNAL_API void ai_layer_check_scratch_size( ai_layer_type layer_type, ai_array_format fmt, ai_size filt_width, ai_size filt_height, ai_u16 n_channel_in, ai_u16 n_channel_out, ai_bool is_pointwise, ai_bool is_rgb, ai_bool is_depthwise, ai_bool is_ch1st, ai_bool is_ch_wise, ai_bool is_sssa, ai_tensor *p_tensor_scratch, const char *p_function_name, int line_nb); #define CHECK_SCRATCH_BUFFER_SIZE( layer_type, fmt, \ filt_width, filt_height, \ n_channel_in, n_channel_out, \ is_pointwise, is_rgb, \ is_depthwise, is_ch1st, is_ch_wise, \ is_sssa_ch, p_tensor_scratch) \ ai_layer_check_scratch_size(layer_type, fmt, \ filt_width, filt_height, \ n_channel_in, n_channel_out, \ is_pointwise, is_rgb, \ is_depthwise, is_ch1st, is_ch_wise, \ is_sssa_ch, p_tensor_scratch,\ __FUNCTION__, __LINE__); #endif AI_API_DECLARE_END #endif /*LAYERS_COMMON_H*/
10,739
C
34.681063
99
0.607133
Tbarkin121/GuardDog/stm32/AnymalNet/X-CUBE-AI/constants_ai.h
/** ****************************************************************************** * @file constants.h * @author X-CUBE-AI C code generator * @brief AI constants definitions ****************************************************************************** * @attention * * Copyright (c) 2024 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ /* Define to prevent recursive inclusion -------------------------------------*/ #ifndef __CONSTANTS_H #define __CONSTANTS_H #ifdef __cplusplus extern "C" { #endif /* Constants definitions ------------------------------------------------------------------*/ #ifdef __cplusplus } #endif #endif /*__constants_ai_h_H */
975
C
30.48387
93
0.44
Tbarkin121/GuardDog/stm32/AnymalNet/X-CUBE-AI/App/network_data.h
/** ****************************************************************************** * @file network_data.h * @author AST Embedded Analytics Research Platform * @date Sat Jan 6 20:35:01 2024 * @brief AI Tool Automatic Code Generator for Embedded NN computing ****************************************************************************** * Copyright (c) 2024 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** */ #ifndef NETWORK_DATA_H #define NETWORK_DATA_H #pragma once #include "network_config.h" #include "network_data_params.h" AI_DEPRECATED #define AI_NETWORK_DATA_ACTIVATIONS(ptr_) \ ai_network_data_activations_buffer_get(AI_HANDLE_PTR(ptr_)) AI_DEPRECATED #define AI_NETWORK_DATA_WEIGHTS(ptr_) \ ai_network_data_weights_buffer_get(AI_HANDLE_PTR(ptr_)) AI_API_DECLARE_BEGIN extern const ai_u64 s_network_weights_array_u64[7747]; /*! * @brief Get network activations buffer initialized struct. * @ingroup network_data * @param[in] ptr a pointer to the activations array storage area * @return an ai_buffer initialized struct */ AI_DEPRECATED AI_API_ENTRY ai_buffer ai_network_data_activations_buffer_get(const ai_handle ptr); /*! * @brief Get network weights buffer initialized struct. * @ingroup network_data * @param[in] ptr a pointer to the weights array storage area * @return an ai_buffer initialized struct */ AI_DEPRECATED AI_API_ENTRY ai_buffer ai_network_data_weights_buffer_get(const ai_handle ptr); /*! * @brief Get network weights array pointer as a handle ptr. * @ingroup network_data * @return a ai_handle pointer to the weights array */ AI_DEPRECATED AI_API_ENTRY ai_handle ai_network_data_weights_get(void); /*! * @brief Get network params configuration data structure. * @ingroup network_data * @return true if a valid configuration is present, false otherwise */ AI_API_ENTRY ai_bool ai_network_data_params_get(ai_network_params* params); AI_API_DECLARE_END #endif /* NETWORK_DATA_H */
2,262
C
26.26506
80
0.655172
Tbarkin121/GuardDog/stm32/AnymalNet/X-CUBE-AI/App/network_data.c
/** ****************************************************************************** * @file network_data.c * @author AST Embedded Analytics Research Platform * @date Sat Jan 6 20:35:01 2024 * @brief AI Tool Automatic Code Generator for Embedded NN computing ****************************************************************************** * @attention * * Copyright (c) 2024 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** */ #include "network_data.h" #include "ai_platform_interface.h" AI_API_DECLARE_BEGIN ai_buffer g_network_data_map_activations[AI_NETWORK_DATA_ACTIVATIONS_COUNT] = { AI_BUFFER_INIT(AI_FLAG_NONE, AI_BUFFER_FORMAT_U8, AI_BUFFER_SHAPE_INIT(AI_SHAPE_BCWH, 4, 1, 1536, 1, 1), 1536, NULL, NULL), /* heap_overlay_pool */ }; ai_buffer g_network_data_map_weights[AI_NETWORK_DATA_WEIGHTS_COUNT] = { AI_BUFFER_INIT(AI_FLAG_NONE, AI_BUFFER_FORMAT_U8, AI_BUFFER_SHAPE_INIT(AI_SHAPE_BCWH, 4, 1, 61976, 1, 1), 61976, NULL, s_network_weights_array_u64), /* weights_array */ }; /*! * @brief Get network activations buffer initialized struct. * @ingroup network_data * @param[in] ptr a pointer to the activations array storage area * @return an ai_buffer initialized struct */ AI_DEPRECATED AI_API_ENTRY ai_buffer ai_network_data_activations_buffer_get(const ai_handle ptr) { ai_buffer buf = AI_BUFFER_INIT( AI_FLAG_NONE, AI_BUFFER_FORMAT_U8, AI_BUFFER_SHAPE_INIT(AI_SHAPE_BCWH, 4, 1, AI_NETWORK_DATA_ACTIVATIONS_SIZE, 1, AI_NETWORK_DATA_ACTIVATIONS_COUNT), AI_NETWORK_DATA_ACTIVATIONS_SIZE, NULL, ptr); return buf; } /*! * @brief Get network weights buffer initialized struct. * @ingroup network_data * @param[in] ptr a pointer to the weights array storage area * @return an ai_buffer initialized struct */ AI_DEPRECATED AI_API_ENTRY ai_buffer ai_network_data_weights_buffer_get(const ai_handle ptr) { ai_buffer buf = AI_BUFFER_INIT( AI_FLAG_NONE, AI_BUFFER_FORMAT_U8|AI_BUFFER_FMT_FLAG_CONST, AI_BUFFER_SHAPE_INIT(AI_SHAPE_BCWH, 4, 1, AI_NETWORK_DATA_WEIGHTS_SIZE, 1, AI_NETWORK_DATA_WEIGHTS_COUNT), AI_NETWORK_DATA_WEIGHTS_SIZE, NULL, ptr); return buf; } /*! * @brief Get network weights array pointer as a handle ptr. * @ingroup network_data * @return a ai_handle pointer to the weights array */ AI_DEPRECATED AI_API_ENTRY ai_handle ai_network_data_weights_get(void) { return AI_HANDLE_PTR(g_network_weights_table); } /*! * @brief Get network params configuration data structure. * @ingroup network_data * @return true if a valid configuration is present, false otherwise */ AI_API_ENTRY ai_bool ai_network_data_params_get(ai_network_params* params) { if (!params) return false; const ai_buffer_array map_activations = AI_BUFFER_ARRAY_OBJ_INIT(AI_FLAG_NONE, AI_NETWORK_DATA_ACTIVATIONS_COUNT, g_network_data_map_activations); const ai_buffer_array map_weights = AI_BUFFER_ARRAY_OBJ_INIT(AI_FLAG_NONE, AI_NETWORK_DATA_WEIGHTS_COUNT, g_network_data_map_weights); return ai_platform_bind_network_params(params, &map_weights, &map_activations); } AI_API_DECLARE_END
3,411
C
31.188679
118
0.661976
Tbarkin121/GuardDog/stm32/AnymalNet/X-CUBE-AI/App/network_data_params.h
/** ****************************************************************************** * @file network_data_params.h * @author AST Embedded Analytics Research Platform * @date Sat Jan 6 20:35:01 2024 * @brief AI Tool Automatic Code Generator for Embedded NN computing ****************************************************************************** * Copyright (c) 2024 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. ****************************************************************************** */ #ifndef NETWORK_DATA_PARAMS_H #define NETWORK_DATA_PARAMS_H #pragma once #include "ai_platform.h" /* #define AI_NETWORK_DATA_WEIGHTS_PARAMS \ (AI_HANDLE_PTR(&ai_network_data_weights_params[1])) */ #define AI_NETWORK_DATA_CONFIG (NULL) #define AI_NETWORK_DATA_ACTIVATIONS_SIZES \ { 1536, } #define AI_NETWORK_DATA_ACTIVATIONS_SIZE (1536) #define AI_NETWORK_DATA_ACTIVATIONS_COUNT (1) #define AI_NETWORK_DATA_ACTIVATION_1_SIZE (1536) #define AI_NETWORK_DATA_WEIGHTS_SIZES \ { 61976, } #define AI_NETWORK_DATA_WEIGHTS_SIZE (61976) #define AI_NETWORK_DATA_WEIGHTS_COUNT (1) #define AI_NETWORK_DATA_WEIGHT_1_SIZE (61976) #define AI_NETWORK_DATA_ACTIVATIONS_TABLE_GET() \ (&g_network_activations_table[1]) extern ai_handle g_network_activations_table[1 + 2]; #define AI_NETWORK_DATA_WEIGHTS_TABLE_GET() \ (&g_network_weights_table[1]) extern ai_handle g_network_weights_table[1 + 2]; #endif /* NETWORK_DATA_PARAMS_H */
1,719
C
27.196721
80
0.596277
Tbarkin121/GuardDog/stm32/AnymalNet/X-CUBE-AI/App/app_x-cube-ai.h
/* Define to prevent recursive inclusion -------------------------------------*/ #ifndef __APP_AI_H #define __APP_AI_H #ifdef __cplusplus extern "C" { #endif /** ****************************************************************************** * @file app_x-cube-ai.h * @author X-CUBE-AI C code generator * @brief AI entry function definitions ****************************************************************************** * @attention * * Copyright (c) 2024 STMicroelectronics. * All rights reserved. * * This software is licensed under terms that can be found in the LICENSE file * in the root directory of this software component. * If no LICENSE file comes with this software, it is provided AS-IS. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "ai_platform.h" void MX_X_CUBE_AI_Init(void); void MX_X_CUBE_AI_Process(void); /* USER CODE BEGIN includes */ /* USER CODE END includes */ #ifdef __cplusplus } #endif #endif /*__STMicroelectronics_X-CUBE-AI_8_1_0_H */
1,132
C
30.472221
80
0.470848