text
stringlengths 1
2.05k
|
---|
dequantize<
Q, T, impl QIntoT: Into<Q, T>, impl TSub: Sub<T>, impl TMul: Mul<T>, impl TDrop: Drop<T>
>(
x: Q, x_scale: T, x_zero_point: T
) -> T {
(x.into() - x_zero_point) * x_scale
} |
use orion::numbers::NumberTrait;
use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear;
use orion::operators::tensor::quantization::quantize_linear::quantize_linear;
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::utils::saturate;
fn dynamic_quantize_linear<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TDiv: Div<T>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
x: @Tensor<T>, min: T, max: T, zero: T, one: T
) -> (Tensor<Q>, Tensor<T>, Tensor<T>) {
let mut x_max: T = x.max_in_tensor();
let mut x_min: T = x.min_in_tensor();
if x_max < zero {
x_max = zero;
}
if x_min > zero {
x_min = zero
}
let mut y_scale_values = ArrayTrait::new();
let y_scale_value: T = (x_max - x_min) / (max - min);
if x_max == x_min {
y_scale_values.append(one);
} else {
y_scale_values.append(y_scale_value);
}
let mut y_scale_tensor_shape: Array<u32> = array![];
y_scale_tensor_shape.append(y_scale_values.len());
let y_scale = TensorTrait::<
T
>::new(shape: y_scale_tensor_shape.span(), data: y_scale_values.span(),);
let intermediate_zero_point: T = min - x_min / y_scale_value;
let mut y_zero_point_value: T = saturate(min, max, intermediate_zero_point);
let mut y_zero_point_values: Array<T> = array![];
y_zero_point_values.append(y_zero_point_value);
let mut y_zero_point_tensor_shape: Array<u32> = array![];
y_zero_point_tensor_shape.append(y_zero_point_values.len());
let mut y_zero_point_values: Array<T> = array![];
y_zero_point_values.append(y_zero_point_value);
let mut y_zero_point = TensorTrait::<
T
>::new(shape: y_zero_point_tensor_shape. |
span(), data: y_zero_point_values.span(),);
(quantize_linear(x, @y_scale, @y_zero_point, min, max), y_scale, y_zero_point)
} |
use orion::numbers::{NumberTrait};
use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear;
use orion::operators::tensor::quantization::quantize_linear::quantize_linear;
use orion::operators::tensor::{TensorTrait, Tensor};
fn qlinear_add<
T,
MAG,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl QIntoT: Into<Q, T>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TDiv: Div<T>,
impl TMul: Mul<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TTryInto: TryInto<T, Q>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
a: @Tensor<Q>,
a_scale: @Tensor<T>,
a_zero_point: @Tensor<T>,
b: @Tensor<Q>,
b_scale: @Tensor<T>,
b_zero_point: @Tensor<T>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>,
min: T,
max: T
) -> Tensor<Q> {
let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point);
let mut dequantized_b = dequantize_linear(@(*b), b_scale, b_zero_point);
let mut x = (dequantized_a + dequantized_b).into();
quantize_linear(@x, y_scale, y_zero_point, min, max)
}
|
use orion::numbers::{NumberTrait};
use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear;
use orion::operators::tensor::quantization::quantize_linear::quantize_linear;
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::math::concat::{
validate_shapes, compute_output_size, concatenate_data
};
fn qlinear_concat<
T,
MAG,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl QIntoT: Into<Q, T>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TDiv: Div<T>,
impl TMul: Mul<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TTryInto: TryInto<T, Q>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
tensors: Span<Tensor<Q>>,
scales: Span<Tensor<T>>,
zero_points: Span<Tensor<T>>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>,
axis: usize,
min: T,
max: T
) -> Tensor<Q> {
assert(tensors.len() == scales.len(), 'Each Tensors must have a scale');
assert(tensors.len() == zero_points.len(), 'Each Tensors must have a scale');
let mut x = concat_dequantize(tensors, scales, zero_points, axis, min, max);
quantize_linear(@x, y_scale, y_zero_point, min, max)
}
fn concat_dequantize<
T,
MAG,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl QIntoT: Into<Q, T>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TTryInto: TryInto<T, Q>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop< |
T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
tensors: Span<Tensor<Q>>,
scales: Span<Tensor<T>>,
zero_points: Span<Tensor<T>>,
axis: usize,
min: T,
max: T
) -> Tensor<T> {
assert(tensors.len() >= 2, 'Input tensors must be > 1');
let base_tensor = *tensors.at(0);
let base_shape = base_tensor.shape;
let dimension = base_shape.len();
assert(dimension > axis, 'Out of bounds for dimension');
validate_shapes(tensors, base_shape, axis);
let output_size = compute_output_size(base_shape, tensors, axis);
let tensors = dequantize_tensors(tensors, scales, zero_points, min, max);
let output_data: Array<T> = concatenate_data(tensors, axis, base_shape);
TensorTrait::<T>::new(output_size.span(), output_data.span())
}
fn dequantize_tensors<
Q,
T,
impl TTensor: TensorTrait<T>,
impl QIntoT: Into<Q, T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TDrop: Drop<T>,
impl TCopy: Copy<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>
>(
mut tensors: Span<Tensor<Q>>,
scales: Span<Tensor<T>>,
zero_points: Span<Tensor<T>>,
min: T,
max: T
) -> Span<Tensor<T>> {
let mut array: Array<Tensor<T>> = array![];
let mut i = 0;
loop {
match tensors.pop_front() {
Option::Some(tensor) => {
array
.append(dequantize_linear(@(*tensor), @(*scales.at(i)), @(*zero_points.at(i))));
},
Option::None => { break; }
};
i += 1;
};
array.span()
} |
use orion::numbers::{NumberTrait};
use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear;
use orion::operators::tensor::quantization::quantize_linear::quantize_linear;
use orion::operators::tensor::{TensorTrait, Tensor};
fn qlinear_leakyrelu<
T,
MAG,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl QIntoT: Into<Q, T>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TDiv: Div<T>,
impl TMul: Mul<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TTryInto: TryInto<T, Q>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>
>(
a: @Tensor<Q>, a_scale: @Tensor<T>, a_zero_point: @Tensor<T>, alpha: T, min: T, max: T
) -> Tensor<Q> {
let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point);
let mut result_data: Array<T> = array![];
loop {
match dequantized_a.data.pop_front() {
Option::Some(elem) => {
if *elem < NumberTrait::zero() {
result_data.append(*elem * alpha);
} else {
result_data.append(*elem);
}
},
Option::None => { break; }
};
};
quantize_linear(
@TensorTrait::new(dequantized_a.shape, result_data.span()), a_scale, a_zero_point, min, max
)
}
|
use orion::numbers::{NumberTrait};
use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear;
use orion::operators::tensor::quantization::quantize_linear::quantize_linear;
use orion::operators::tensor::{TensorTrait, Tensor};
fn qlinear_matmul<
T,
MAG,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl QIntoT: Into<Q, T>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TDiv: Div<T>,
impl TMul: Mul<T>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TTryInto: TryInto<T, Q>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
a: @Tensor<Q>,
a_scale: @Tensor<T>,
a_zero_point: @Tensor<T>,
b: @Tensor<Q>,
b_scale: @Tensor<T>,
b_zero_point: @Tensor<T>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>,
min: T,
max: T
) -> Tensor<Q> {
let a_shape = *a.shape;
let b_shape = *b.shape;
let a_ndim = (a_shape).len();
let b_ndim = (b_shape).len();
if a_ndim <= 2 && b_ndim <= 2 {
let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point);
let mut dequantized_b = dequantize_linear(@(*b), b_scale, b_zero_point);
let mut x = dequantized_a.matmul(@dequantized_b);
return quantize_linear(@x, y_scale, y_zero_point, min, max);
}
assert(a_ndim == b_ndim, 'dim missmatch');
let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point);
let mut dequantized_b = dequantize_linear(@(*b), b_scale, b_zero_point);
let mut x_shape: Array<usize> = array![];
let mut x_data: Array<T> = array![];
assert(a_shape[a_ndim - 1] == b_shape[b_ndim - 2], 'incompatible dim for matmul');
let m = *a_shape[a_ndim - 2];
let k = *a_shape[a_ndim - |
1];
let n = *b_shape[b_ndim - 1];
let mut a_shape_reduced: Array<usize> = array![];
a_shape_reduced.append(m);
a_shape_reduced.append(k);
let mut b_shape_reduced: Array<usize> = array![];
b_shape_reduced.append(k);
b_shape_reduced.append(n);
let mut i = 0;
while i != stride(a_shape) / (m * k) {
result_updates(
@subtensor(@dequantized_a, i * (m * k), a_shape_reduced.span()),
@subtensor(@dequantized_b, i * (k * n), b_shape_reduced.span()),
ref x_data
);
i += 1;
};
x_shape(ref x_shape, a_shape, m, n);
let x = TensorTrait::new(x_shape.span(), x_data.span());
quantize_linear(@x, y_scale, y_zero_point, min, max)
} |
fn x_shape(ref x_data: Array<usize>, mut shape: Span<usize>, m: usize, n: usize) {
while shape.len() != 2 {
match shape.pop_front() {
Option::Some(elem) => { x_data.append(*elem); },
Option::None => { break; }
};
};
x_data.append(m);
x_data.append(n);
}
fn stride(mut shape: Span<usize>) -> usize {
let shape_len = shape.len();
assert(shape_len > 0, 'shape cannot be empty');
let mut accumulated: usize = 1;
loop {
match shape.pop_back() {
Option::Some(i) => { accumulated *= *i; },
Option::None => { break; }
};
};
accumulated
}
fn subtensor<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
x: @Tensor<T>, start: usize, shape: Span<usize>
) -> Tensor::<T> {
let mut data = ArrayTrait::<T>::new();
let mut stride = stride(shape);
let mut i = 0;
while i != stride {
data.append(*x.data[start + i]);
i += 1;
};
TensorTrait::new(shape, data.span())
}
fn result_updates<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mat1: @Tensor<T>, mat2: @Tensor<T>, ref result_data: Array<T>
) {
let m = *mat1.shape[0];
let n = *mat1.shape[1];
let p = *mat2.shape[1];
let mat1 = *mat1.data;
let mat2 = *mat2.data;
let mut result_shape: Array<usize> = array![];
result_shape.append(m);
result_shape.append(p);
let mut i = 0_usize;
while i != m {
let mut j = 0_usize;
while j != p {
let mut sum: T = NumberTrait::zero();
let mut k = 0_usize;
while k != n {
let mat1_index = i * n + k;
let mat2_index = k * p + j;
sum += *mat1[mat1_index] * *mat2[mat2_index];
k += 1;
};
result_data.append(sum);
j += 1;
}; |
i += 1;
};
} |
use orion::numbers::{NumberTrait};
use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear;
use orion::operators::tensor::quantization::quantize_linear::quantize_linear;
use orion::operators::tensor::{TensorTrait, Tensor};
fn qlinear_mul<
T,
MAG,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl QIntoT: Into<Q, T>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TDiv: Div<T>,
impl TMul: Mul<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TTryInto: TryInto<T, Q>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
a: @Tensor<Q>,
a_scale: @Tensor<T>,
a_zero_point: @Tensor<T>,
b: @Tensor<Q>,
b_scale: @Tensor<T>,
b_zero_point: @Tensor<T>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>,
min: T,
max: T
) -> Tensor<Q> {
let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point);
let mut dequantized_b = dequantize_linear(@(*b), b_scale, b_zero_point);
let mut x = (dequantized_a * dequantized_b).into();
quantize_linear(@x, y_scale, y_zero_point, min, max)
}
|
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::helpers::check_compatibility;
use orion::operators::tensor::math::arithmetic::saturated_add;
use orion::utils::saturate;
fn quantize_linear<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TAdd: Add<T>,
impl TDiv: Div<T>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
>(
x: @Tensor<T>, y_scale: @Tensor<T>, y_zero_point: @Tensor<T>, min: T, max: T
) -> Tensor::<Q> {
if (*y_scale.data).len() == 1 && (*y_zero_point.data).len() == 1 {
quantize_element_wise(*x, *y_scale.data[0], *y_zero_point.data[0], min, max)
} else {
check_compatibility(*x.shape, *y_scale.shape);
check_compatibility(*x.shape, *y_zero_point.shape);
check_compatibility(*y_scale.shape, *y_zero_point.shape);
quantize_per_axis(x, y_scale, y_zero_point, min, max)
}
}
fn quantize_per_axis<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TAdd: Add<T>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QDrop: Drop<Q>,
>(
x: @Tensor<T>, y_scale: @Tensor<T>, y_zero_point: @Tensor<T>, min: T, max: T
) -> Tensor::<Q> {
saturated_add(@(*x / *y_scale), y_zero_point, min, max)
}
fn quantize_element_wise<
T,
Q,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>,
impl TPartialOrd: PartialOrd<T>,
impl TDiv: Div<T>,
impl TAdd: Add<T>,
impl TTryIntoQ: TryInto<T, Q>,
impl QTensor: TensorTrait<Q>
>(
mut x: Tensor::<T>, y_scale: T, y_zero_point: T, min: T, max: T
) -> Tensor::<Q> {
let mut result_data: Array<Q> = array![];
loop {
match x.data.pop_front() { |
Option::Some(item) => {
let quantized = quantize(*item, y_scale, y_zero_point, min, max);
result_data.append(quantized);
},
Option::None => { break; }
};
};
TensorTrait::new(x.shape, result_data.span())
}
fn quantize<
T,
Q,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TPartialOrd: PartialOrd<T>,
impl TDiv: Div<T>,
impl TAdd: Add<T>,
impl TTryIntoQ: TryInto<T, Q>
>(
x: T, y_scale: T, y_zero_point: T, min: T, max: T
) -> Q {
saturate(min, max, ((x / y_scale) + y_zero_point)).try_into().unwrap()
} |
use core::box::BoxTrait;
use core::traits::Into;
use core::nullable::{Nullable, match_nullable, FromNullableResult, nullable_from_box};
use alexandria_data_structures::vec::{VecTrait};
use orion::numbers::NumberTrait;
struct NullableVec<T> {
items: Felt252Dict<Nullable<T>>,
len: usize,
}
impl DestructNullableVec<T, impl TDrop: Drop<T>> of Destruct<NullableVec<T>> {
fn destruct(self: NullableVec<T>) nopanic {
self.items.squash();
}
}
impl NullableVecImpl<
T, MAG, impl TDrop: Drop<T>, impl TCopy: Copy<T>, +NumberTrait<T, MAG>
> of VecTrait<NullableVec<T>, T> {
fn new() -> NullableVec<T> {
NullableVec { items: Default::default(), len: 0 }
}
fn get(ref self: NullableVec<T>, index: usize) -> Option<T> {
if (index < self.len()) {
return match match_nullable(self.items.get(index.into())) {
FromNullableResult::Null(()) => { Option::Some(NumberTrait::zero()) },
FromNullableResult::NotNull(val) => { Option::Some(val.unbox()) },
};
} else {
Option::<T>::None
}
}
fn at(ref self: NullableVec<T>, index: usize) -> T {
assert(index < self.len(), 'Index out of bounds');
return match self.get(index) {
Option::Some(val) => val,
Option::None => NumberTrait::zero(),
};
}
fn push(ref self: NullableVec<T>, value: T) -> () {
self.items.insert(self.len.into(), nullable_from_box(BoxTrait::new(value)));
self.len = core::integer::u32_wrapping_add(self.len, 1_usize);
}
fn set(ref self: NullableVec<T>, index: usize, value: T) {
if index >= self.len() {
self.len = index + 1;
}
self.items.insert(index.into(), nullable_from_box(BoxTrait::new(value)));
}
fn len(self: @NullableVec<T>) -> usize {
*self.len
}
}
|
mod tensor;
|
mod fixed_point;
mod i32;
mod i8;
mod u32;
|
mod fp8x23;
mod fp16x16;
|
use orion::numbers::fixed_point::core::{FixedTrait};
use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16;
use orion::operators::tensor::implementations::tensor_fp16x16::FP16x16Tensor;
use orion::operators::tensor::{TensorTrait, Tensor};
fn fp_tensor_1x3_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_1x3_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x2_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x2_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait |
::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false),
FixedTrait::new_unscaled(8, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true),
FixedTrait::new_unscaled(8, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x1_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 1];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new |
_unscaled(1, false),
FixedTrait::new_unscaled(2, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x1_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 1];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x3_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![2, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x3_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![2, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x2x2_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_ten |
sor_2x2x2_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2x2_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false),
FixedTrait::new_unscaled(8, false),
FixedTrait::new_unscaled(9, false),
FixedTrait::new_unscaled(10, false),
FixedTrait::new_unscaled(11, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2x2_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true),
FixedTrait::new_unscaled(8, true),
FixedTrait::new_unscaled(9, true),
FixedTrait::new_unscaled(10, true),
FixedTrait::new_unscaled(11, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tenso |
r
}
fn fp_tensor_3x3x3_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false),
FixedTrait::new_unscaled(8, false),
FixedTrait::new_unscaled(9, false),
FixedTrait::new_unscaled(10, false),
FixedTrait::new_unscaled(11, false),
FixedTrait::new_unscaled(12, false),
FixedTrait::new_unscaled(13, false),
FixedTrait::new_unscaled(14, false),
FixedTrait::new_unscaled(15, false),
FixedTrait::new_unscaled(16, false),
FixedTrait::new_unscaled(17, false),
FixedTrait::new_unscaled(18, false),
FixedTrait::new_unscaled(19, false),
FixedTrait::new_unscaled(20, false),
FixedTrait::new_unscaled(21, false),
FixedTrait::new_unscaled(22, false),
FixedTrait::new_unscaled(23, false),
FixedTrait::new_unscaled(24, false),
FixedTrait::new_unscaled(25, false),
FixedTrait::new_unscaled(26, false)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3x3_neg_helper() -> Tensor<FP16x16> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true),
FixedTrait::new_unscaled(8, true),
FixedTrait::new_unscaled(9, true),
FixedTrait::new_unscaled(10, true),
Fixed |
Trait::new_unscaled(11, true),
FixedTrait::new_unscaled(12, true),
FixedTrait::new_unscaled(13, true),
FixedTrait::new_unscaled(14, true),
FixedTrait::new_unscaled(15, true),
FixedTrait::new_unscaled(16, true),
FixedTrait::new_unscaled(17, true),
FixedTrait::new_unscaled(18, true),
FixedTrait::new_unscaled(19, true),
FixedTrait::new_unscaled(20, true),
FixedTrait::new_unscaled(21, true),
FixedTrait::new_unscaled(22, true),
FixedTrait::new_unscaled(23, true),
FixedTrait::new_unscaled(24, true),
FixedTrait::new_unscaled(25, true),
FixedTrait::new_unscaled(26, true)
];
let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
tensor
} |
use orion::numbers::fixed_point::core::{FixedTrait};
use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23;
use orion::operators::tensor::implementations::tensor_fp8x23::FP8x23Tensor;
use orion::operators::tensor::{TensorTrait, Tensor};
fn fp_tensor_1x3_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_1x3_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x2_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x2_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscale |
d(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false),
FixedTrait::new_unscaled(8, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true),
FixedTrait::new_unscaled(8, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x1_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 1];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false), |
FixedTrait::new_unscaled(2, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x1_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 1];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x3_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![2, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x3_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![2, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x2x2_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_2x2x2_neg_helper() -> Tens |
or<FP8x23> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2x2_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false),
FixedTrait::new_unscaled(8, false),
FixedTrait::new_unscaled(9, false),
FixedTrait::new_unscaled(10, false),
FixedTrait::new_unscaled(11, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x2x2_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true),
FixedTrait::new_unscaled(8, true),
FixedTrait::new_unscaled(9, true),
FixedTrait::new_unscaled(10, true),
FixedTrait::new_unscaled(11, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3x3_helper() -> |
Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, false),
FixedTrait::new_unscaled(2, false),
FixedTrait::new_unscaled(3, false),
FixedTrait::new_unscaled(4, false),
FixedTrait::new_unscaled(5, false),
FixedTrait::new_unscaled(6, false),
FixedTrait::new_unscaled(7, false),
FixedTrait::new_unscaled(8, false),
FixedTrait::new_unscaled(9, false),
FixedTrait::new_unscaled(10, false),
FixedTrait::new_unscaled(11, false),
FixedTrait::new_unscaled(12, false),
FixedTrait::new_unscaled(13, false),
FixedTrait::new_unscaled(14, false),
FixedTrait::new_unscaled(15, false),
FixedTrait::new_unscaled(16, false),
FixedTrait::new_unscaled(17, false),
FixedTrait::new_unscaled(18, false),
FixedTrait::new_unscaled(19, false),
FixedTrait::new_unscaled(20, false),
FixedTrait::new_unscaled(21, false),
FixedTrait::new_unscaled(22, false),
FixedTrait::new_unscaled(23, false),
FixedTrait::new_unscaled(24, false),
FixedTrait::new_unscaled(25, false),
FixedTrait::new_unscaled(26, false)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
}
fn fp_tensor_3x3x3_neg_helper() -> Tensor<FP8x23> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data = array![
FixedTrait::new_unscaled(0, false),
FixedTrait::new_unscaled(1, true),
FixedTrait::new_unscaled(2, true),
FixedTrait::new_unscaled(3, true),
FixedTrait::new_unscaled(4, true),
FixedTrait::new_unscaled(5, true),
FixedTrait::new_unscaled(6, true),
FixedTrait::new_unscaled(7, true),
FixedTrait::new_unscaled(8, true),
FixedTrait::new_unscaled(9, true),
FixedTrait::new_unscaled(10, true),
FixedTrait::new_unscaled(11, true), |
FixedTrait::new_unscaled(12, true),
FixedTrait::new_unscaled(13, true),
FixedTrait::new_unscaled(14, true),
FixedTrait::new_unscaled(15, true),
FixedTrait::new_unscaled(16, true),
FixedTrait::new_unscaled(17, true),
FixedTrait::new_unscaled(18, true),
FixedTrait::new_unscaled(19, true),
FixedTrait::new_unscaled(20, true),
FixedTrait::new_unscaled(21, true),
FixedTrait::new_unscaled(22, true),
FixedTrait::new_unscaled(23, true),
FixedTrait::new_unscaled(24, true),
FixedTrait::new_unscaled(25, true),
FixedTrait::new_unscaled(26, true)
];
let tensor = TensorTrait::<FP8x23>::new(sizes.span(), data.span());
tensor
} |
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::I32Tensor;
fn i32_tensor_1x3_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3];
let mut data: Array<i32> = array![0, 1, 2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_1x3_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3];
let mut data: Array<i32> = array![0, -1, -2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_2x2_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![2, 2];
let mut data: Array<i32> = array![0, 1, 2, 3];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_2x2_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![2, 2];
let mut data: Array<i32> = array![0, -1, -2, -3];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x3_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 3];
let mut data: Array<i32> = array![0, 1, 2, 3, 4, 5, 6, 7, 8];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x3_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 3];
let mut data: Array<i32> = array![0, -1, -2, -3, -4, -5, -6, -7, -8];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x2_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 2];
let mut data: Array<i32> = array![0, 1, 2, 3, 4, 5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x2_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 2];
let mut data: Array<i32> = array![0, -1, -2, -3, -4, -5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x1_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 1];
let mut data: Array<i32> = array![0, 1, 2 |
];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x1_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 1];
let mut data: Array<i32> = array![0, -1, -2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_2x3_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![2, 3];
let mut data: Array<i32> = array![0, 1, 2, 3, 4, 5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_2x3_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![2, 3];
let mut data: Array<i32> = array![0, -1, -2, -3, -4, -5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_2x2x2_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data: Array<i32> = array![0, 1, 2, 3, 4, 5, 6, 7];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_2x2x2_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data: Array<i32> = array![0, -1, -2, -3, -4, -5, -6, -7];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x2x2_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data: Array<i32> = array![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x2x2_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data: Array<i32> = array![0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x3x3_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data: Array<i32> = array![
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12, |
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i32_tensor_3x3x3_neg_helper() -> Tensor<i32> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data: Array<i32> = array![
0,
-1,
-2,
-3,
-4,
-5,
-6,
-7,
-8,
-9,
-10,
-11,
-12,
-13,
-14,
-15,
-16,
-17,
-18,
-19,
-20,
-21,
-22,
-23,
-24,
-25,
-26
];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
} |
use orion::operators::tensor::{TensorTrait, Tensor};
use orion::operators::tensor::I8Tensor;
fn i8_tensor_1x3_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3];
let mut data: Array<i8> = array![0, 1, 2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_1x3_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3];
let mut data: Array<i8> = array![0, -1, 2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_2x2_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![2, 2];
let mut data: Array<i8> = array![0, 1, 2, 3];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_2x2_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![2, 2];
let mut data: Array<i8> = array![0, -1, -2, -3];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x3_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 3];
let mut data: Array<i8> = array![0, 1, 2, 3, 4, 5, 6, 7, 8];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x3_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 3];
let mut data: Array<i8> = array![0, -1, -2, -3, -4, -5, -6, -7, -8];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x2_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 2];
let mut data: Array<i8> = array![0, 1, 2, 3, 4, 5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x2_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 2];
let mut data: Array<i8> = array![0, -1, -2, -3, -4, -5];
let tensor = TensorTrait::new(sizes.span(), data.span());
return tensor;
}
fn i8_tensor_3x1_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 1];
let mut data: Array<i8> = array![0, 1, 2];
let tensor = |
TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x1_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 1];
let mut data: Array<i8> = array![0, -1, -2];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_2x3_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![2, 3];
let mut data: Array<i8> = array![0, 1, 2, 3, 4, 5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_2x3_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![2, 3];
let mut data: Array<i8> = array![0, -1, -2, -3, -4, -5];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_2x2x2_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data: Array<i8> = array![0, 1, 2, 3, 4, 5, 6, 7];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_2x2x2_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data: Array<i8> = array![0, -1, -2, -3, -4, -5, -6, -7];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x2x2_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data: Array<i8> = array![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x2x2_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data: Array<i8> = array![0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x3x3_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data: Array<i8> = array![
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16, |
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
}
fn i8_tensor_3x3x3_neg_helper() -> Tensor<i8> {
let mut sizes: Array<u32> = array![3, 3, 3];
let mut data: Array<i8> = array![
0,
-1,
-2,
-3,
-4,
-5,
-6,
-7,
-8,
-9,
-10,
-11,
-12,
-13,
-14,
-15,
-16,
-17,
-18,
-19,
-20,
-21,
-22,
-23,
-24,
-25,
-26
];
let tensor = TensorTrait::new(sizes.span(), data.span());
tensor
} |
use orion::operators::tensor::U32Tensor;
use orion::operators::tensor::{TensorTrait, Tensor};
fn u32_tensor_1x3_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![3];
let mut data: Array<u32> = array![0, 1, 2];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_2x2_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![2, 2];
let mut data: Array<u32> = array![0, 1, 2, 3];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_3x3_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![3, 3];
let mut data: Array<u32> = array![0, 1, 2, 3, 4, 5, 6, 7, 8];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_3x2_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![3, 2];
let mut data: Array<u32> = array![0, 1, 2, 3, 4, 5];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_3x1_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![3, 1];
let mut data: Array<u32> = array![0, 1, 2];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_2x3_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![2, 3];
let mut data: Array<u32> = array![0, 1, 2, 3, 4, 5];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_2x2x2_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![2, 2, 2];
let mut data: Array<u32> = array![0, 1, 2, 3, 4, 5, 6, 7];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_3x2x2_helper() -> Tensor<u32> {
let mut sizes: Array<u32> = array![3, 2, 2];
let mut data: Array<u32> = array![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
}
fn u32_tensor_3x3x3_helper() -> Tensor<u32> {
let mut sizes: Array<u32> |
= array![3, 3, 3];
let mut data: Array<u32> = array![
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
];
let tensor = TensorTrait::<u32>::new(sizes.span(), data.span());
tensor
} |
use orion::operators::tensor::{Tensor, TensorTrait};
fn u32_max(a: u32, b: u32) -> u32 {
if a > b {
a
} else {
b
}
}
fn saturate<T, impl TCopy: Copy<T>, impl TDrop: Drop<T>, impl PartialOrdT: PartialOrd<T>>(
min: T, max: T, x: T
) -> T {
if x < min {
min
} else if x > max {
max
} else {
x
}
}
fn assert_eq<T, impl TPartialEq: PartialEq<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
lhs: T, rhs: T
) {
assert(lhs == rhs, 'should be equal');
}
fn assert_seq_eq<T, impl TPartialEq: PartialEq<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
lhs: Array<T>, rhs: Array<T>
) {
assert(lhs.len() == rhs.len(), 'should be equal');
let mut i = 0;
while i != lhs.len() {
assert_eq(lhs[i], rhs[i]);
i += 1;
}
}
fn get_row<T, +Drop<T>, +Copy<T>>(self: @Tensor<T>, row: usize) -> Span<T> {
assert((*self).shape.len() == 2, 'Expected a 2D tensor');
let row_length = *self.shape[1];
let start = row * row_length;
(*self).data.slice(start, row_length)
}
|
mod numbers;
mod performance;
mod tensor_core;
mod nodes;
mod ml;
mod operators;
|
mod tree_ensemble_classifier;
mod tree_ensemble_regressor;
mod linear_regressor_test;
mod linear_classifier_test;
mod svm_regressor_test;
mod svm_classifier_test;
mod normalizer_test;
|
use orion::numbers::FP16x16;
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor};
use orion::operators::ml::linear::linear_classifier::{
LinearClassifierTrait, POST_TRANSFORM, LinearClassifier
};
use core::debug::PrintTrait; |
fn test_linear_classifier_multi_none() {
let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::NONE);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 2, 'labels[1]');
assert(*labels[2] == 2, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
assert(*scores.data[0] == FP16x16 { mag: 157942, sign: false }, '*scores[0] == 2.41');
assert(*scores.data[1] == FP16x16 { mag: 138936, sign: true }, '*scores[1] == -2.12');
assert(*scores.data[2] == FP16x16 { mag: 38666, sign: false }, '*scores[2] == 0.59');
assert(*scores.data[3] == FP16x16 { mag: 43910, sign: false }, '*scores[3] == 0.67');
assert(*scores.data[4] == FP16x16 { mag: 74710, sign: true }, '*scores[4] == -1.14');
assert(*scores.data[5] == FP16x16 { mag: 88472, sign: false }, '*scores[5] == 1.35');
assert(*scores.data[6] == FP16x16 { mag: 70122, sign: true }, '*scores[6] == -1.07');
assert(*scores.data[7] == FP16x16 { mag: 10484, sign: true }, '*scores[7] == -0.16');
assert(*scores.data[8] == FP16x16 { mag: 138278, sign: false }, '*scores[8] == 2.11');
} |
fn test_linear_classifier_multi_softmax() {
let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAX);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 2, 'labels[1]');
assert(*labels[2] == 2, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
assert(*scores.data[0] == FP16x16 { mag: 55879, sign: false }, '*scores[0] == 0.852656');
assert(*scores.data[1] == FP16x16 { mag: 602, sign: false }, '*scores[1] == 0.009192');
assert(*scores.data[2] == FP16x16 { mag: 9053, sign: false }, '*scores[2] == 0.138152');
assert(*scores.data[3] == FP16x16 { mag: 20888, sign: false }, '*scores[3] == 0.318722');
assert(*scores.data[4] == FP16x16 { mag: 3418, sign: false }, '*scores[4] == 0.05216');
assert(*scores.data[5] == FP16x16 { mag: 41229, sign: false }, '*scores[5] == 0.629118');
assert(*scores.data[6] == FP16x16 { mag: 2380, sign: false }, '*scores[6] == 0.036323');
assert(*scores.data[7] == FP16x16 { mag: 5914, sign: false }, '*scores[7] == 0.090237');
assert(*scores.data[8] == FP16x16 { mag: 57241, sign: false }, '*scores[8] == 0.87344');
} |
fn test_linear_classifier_multi_softmax_zero() {
let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAXZERO);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 2, 'labels[1]');
assert(*labels[2] == 2, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
assert(*scores.data[0] == FP16x16 { mag: 55879, sign: false }, '*scores[0] == 0.852656');
assert(*scores.data[1] == FP16x16 { mag: 602, sign: false }, '*scores[1] == 0.009192');
assert(*scores.data[2] == FP16x16 { mag: 9053, sign: false }, '*scores[2] == 0.138152');
assert(*scores.data[3] == FP16x16 { mag: 20888, sign: false }, '*scores[3] == 0.318722');
assert(*scores.data[4] == FP16x16 { mag: 3418, sign: false }, '*scores[4] == 0.05216');
assert(*scores.data[5] == FP16x16 { mag: 41229, sign: false }, '*scores[5] == 0.629118');
assert(*scores.data[6] == FP16x16 { mag: 2380, sign: false }, '*scores[6] == 0.036323');
assert(*scores.data[7] == FP16x16 { mag: 5914, sign: false }, '*scores[7] == 0.090237');
assert(*scores.data[8] == FP16x16 { mag: 57241, sign: false }, '*scores[8] == 0.87344');
} |
fn test_linear_classifier_multi_logistic() {
let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::LOGISTIC);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
assert(*labels[0] == 0, 'labels[0] == 0');
assert(*labels[1] == 2, 'labels[1] == 2');
assert(*labels[2] == 2, 'labels[2] == 2');
assert(labels.len() == 3, 'len(labels) == 3');
assert(*scores.data[0] == FP16x16 { mag: 60135, sign: false }, '*scores[0] == 0.917587');
assert(*scores.data[1] == FP16x16 { mag: 7023, sign: false }, '*scores[1] == 0.107168');
assert(*scores.data[2] == FP16x16 { mag: 42163, sign: false }, '*scores[2] == 0.643365');
assert(*scores.data[3] == FP16x16 { mag: 43351, sign: false }, '*scores[3] == 0.661503');
assert(*scores.data[4] == FP16x16 { mag: 15881, sign: false }, '*scores[4] == 0.24232');
assert(*scores.data[5] == FP16x16 { mag: 52043, sign: false }, '*scores[5] == 0.79413');
assert(*scores.data[6] == FP16x16 { mag: 16738, sign: false }, '*scores[6] == 0.255403');
assert(*scores.data[7] == FP16x16 { mag: 30152, sign: false }, '*scores[7] == 0.460085');
assert(*scores.data[8] == FP16x16 { mag: 58450, sign: false }, '*scores[8] == 0.891871');
} |
fn test_linear_classifier_binary_none() {
let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::NONE);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
assert(*scores.data[0] == FP16x16 { mag: 624559, sign: true }, '*scores[0] == -9.53');
assert(*scores.data[1] == FP16x16 { mag: 624559, sign: false }, '*scores[1] == 9.53');
assert(*scores.data[2] == FP16x16 { mag: 435817, sign: true }, '*scores[2] == -6.65');
assert(*scores.data[3] == FP16x16 { mag: 435817, sign: false }, '*scores[3] == 6.65');
} |
fn test_linear_classifier_binary_logistic() {
let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::LOGISTIC);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
assert(*scores.data[0] == FP16x16 { mag: 4, sign: false }, '*scores[0] == 7.263436e-05');
assert(*scores.data[1] == FP16x16 { mag: 65532, sign: false }, '*scores[1] == 9.999274e-01');
assert(*scores.data[2] == FP16x16 { mag: 84, sign: false }, '*scores[2] == 1.292350e-03');
assert(*scores.data[3] == FP16x16 { mag: 65452, sign: false }, '*scores[3] == 9.999983e-01');
} |
fn test_linear_classifier_binary_softmax() {
let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::SOFTMAX);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
assert(*scores.data[0] == FP16x16 { mag: 0, sign: false }, '*scores[0] == 5.276517e-09');
assert(*scores.data[1] == FP16x16 { mag: 65535, sign: false }, '*scores[1] == 1.000000');
assert(*scores.data[2] == FP16x16 { mag: 0, sign: false }, '*scores[2] == 1.674492e-06');
assert(*scores.data[3] == FP16x16 { mag: 65535, sign: false }, '*scores[3] == 9.999983e-01');
} |
fn test_linear_classifier_binary_softmax_zero() {
let (mut classifier, X) = linear_classifier_helper_binary(POST_TRANSFORM::SOFTMAXZERO);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
assert(*scores.data[0] == FP16x16 { mag: 0, sign: false }, '*scores[0] == 5.276517e-09');
assert(*scores.data[1] == FP16x16 { mag: 65535, sign: false }, '*scores[1] == 1.000000');
assert(*scores.data[2] == FP16x16 { mag: 0, sign: false }, '*scores[2] == 1.674492e-06');
assert(*scores.data[3] == FP16x16 { mag: 65535, sign: false }, '*scores[3] == 9.999983e-01');
} |
fn test_linear_classifier_unary_none() {
let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::NONE);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
assert(*scores.data[0] == FP16x16 { mag: 146146, sign: false }, '*scores[0] == 2.23');
assert(*scores.data[1] == FP16x16 { mag: 42596, sign: true }, '*scores[1] == -0.65');
} |
fn test_linear_classifier_unary_logistic() {
let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::LOGISTIC);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
assert(*scores.data[0] == FP16x16 { mag: 59173, sign: false }, '*scores[0] == 0.902911');
assert(*scores.data[1] == FP16x16 { mag: 22479, sign: false }, '*scores[1] == 0.34299');
} |
fn test_linear_classifier_unary_softmax() {
let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::SOFTMAX);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
assert(*scores.data[0] == FP16x16 { mag: 65536, sign: false }, '*scores[0] == 1');
assert(*scores.data[1] == FP16x16 { mag: 65536, sign: false }, '*scores[1] == 1');
} |
fn test_linear_classifier_unary_softmax_zero() {
let (mut classifier, X) = linear_classifier_helper_unary(POST_TRANSFORM::SOFTMAXZERO);
let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X);
assert(*labels[0] == 1, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(labels.len() == 2, 'len(labels)');
assert(*scores.data[0] == FP16x16 { mag: 65536, sign: false }, '*scores[0] == 1');
assert(*scores.data[1] == FP16x16 { mag: 65536, sign: false }, '*scores[1] == 1');
}
fn linear_classifier_helper(
post_transform: POST_TRANSFORM
) -> (LinearClassifier<FP16x16>, Tensor<FP16x16>) {
let classlabels: Span<usize> = array![0, 1, 2].span();
let classlabels = Option::Some(classlabels);
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 38011, sign: true },
FP16x16 { mag: 19005, sign: true },
FP16x16 { mag: 5898, sign: true },
FP16x16 { mag: 38011, sign: false },
FP16x16 { mag: 19005, sign: false },
FP16x16 { mag: 5898, sign: false },
]
.span();
let intercepts: Span<FP16x16> = array![
FP16x16 { mag: 176947, sign: false },
FP16x16 { mag: 176947, sign: true },
FP16x16 { mag: 32768, sign: false },
]
.span();
let intercepts = Option::Some(intercepts);
let multi_class: usize = 0;
let mut classifier: LinearClassifier<FP16x16> = LinearClassifier {
classlabels, coefficients, intercepts, multi_class, post_transform
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 131072, sign: false },
FP16x16 { mag: 196608, sign: false },
FP16x16 { mag: 262144, sign: false },
FP16x16 { mag: 327680, sign: false },
]
.span()
);
(classifier, X)
}
fn linear_classifier_helper_binary(
post_transform: P |
OST_TRANSFORM
) -> (LinearClassifier<FP16x16>, Tensor<FP16x16>) {
let classlabels: Span<usize> = array![0, 1].span();
let classlabels = Option::Some(classlabels);
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 38011, sign: true },
FP16x16 { mag: 19005, sign: true },
FP16x16 { mag: 5898, sign: true },
]
.span();
let intercepts: Span<FP16x16> = array![FP16x16 { mag: 655360, sign: false },].span();
let intercepts = Option::Some(intercepts);
let multi_class: usize = 0;
let mut classifier: LinearClassifier<FP16x16> = LinearClassifier {
classlabels, coefficients, intercepts, multi_class, post_transform
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![2, 3].span(),
array![
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 131072, sign: false },
FP16x16 { mag: 196608, sign: false },
FP16x16 { mag: 262144, sign: false },
FP16x16 { mag: 327680, sign: false },
]
.span()
);
(classifier, X)
}
fn linear_classifier_helper_unary(
post_transform: POST_TRANSFORM
) -> (LinearClassifier<FP16x16>, Tensor<FP16x16>) {
let classlabels: Span<usize> = array![1].span();
let classlabels = Option::Some(classlabels);
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 38011, sign: true },
FP16x16 { mag: 19005, sign: true },
FP16x16 { mag: 5898, sign: true },
]
.span();
let intercepts: Span<FP16x16> = array![FP16x16 { mag: 176947, sign: false },].span();
let intercepts = Option::Some(intercepts);
let multi_class: usize = 0;
let mut classifier: LinearClassifier<FP16x16> = LinearClassifier {
classlabels, coefficients, intercepts, multi_class, post_transform
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![2, 3].span(),
array![
FP16x16 { mag: 0, sign: false }, |
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 131072, sign: false },
FP16x16 { mag: 196608, sign: false },
FP16x16 { mag: 262144, sign: false },
FP16x16 { mag: 327680, sign: false },
]
.span()
);
(classifier, X)
} |
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor, FP16x16TensorAdd};
use orion::operators::ml::linear::linear_regressor::{
LinearRegressorTrait, POST_TRANSFORM, LinearRegressor
};
use orion::numbers::{FP16x16, FixedTrait};
use core::debug::PrintTrait;
use orion::operators::nn::{NNTrait, FP16x16NN}; |
fn test_linear_regressor() {
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 131072, sign: false },
FP16x16 { mag: 196608, sign: false },
FP16x16 { mag: 262144, sign: false },
FP16x16 { mag: 327680, sign: false },
]
.span()
);
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 19661, sign: false }, FP16x16 { mag: 50463, sign: true },
]
.span();
let intercepts: Span<FP16x16> = array![FP16x16 { mag: 32768, sign: false },].span();
let intercepts = Option::Some(intercepts);
let target: usize = 1;
let post_transform = POST_TRANSFORM::NONE;
let mut regressor: LinearRegressor<FP16x16> = LinearRegressor {
coefficients, intercepts, target, post_transform
};
let scores = LinearRegressorTrait::predict(regressor, X);
assert(*scores.data[0] == FP16x16 { mag: 17695, sign: true }, '*scores[0] == -0.27');
assert(*scores.data[1] == FP16x16 { mag: 79299, sign: true }, '*scores[1] == -1.21');
assert(*scores.data[2] == FP16x16 { mag: 140903, sign: true }, '*scores[2] == -2.15');
} |
fn test_linear_regressor_2() {
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 131072, sign: false },
FP16x16 { mag: 196608, sign: false },
FP16x16 { mag: 262144, sign: false },
FP16x16 { mag: 327680, sign: false },
]
.span()
);
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 19661, sign: false },
FP16x16 { mag: 50463, sign: true },
FP16x16 { mag: 19661, sign: false },
FP16x16 { mag: 50463, sign: true },
]
.span();
let intercepts: Span<FP16x16> = array![
FP16x16 { mag: 32768, sign: false }, FP16x16 { mag: 45875, sign: false },
]
.span();
let intercepts = Option::Some(intercepts);
let target = 2;
let post_transform = POST_TRANSFORM::NONE;
let mut regressor: LinearRegressor<FP16x16> = LinearRegressor {
coefficients, intercepts, target, post_transform
};
let scores = LinearRegressorTrait::predict(regressor, X);
assert(*scores.data[0] == FP16x16 { mag: 17695, sign: true }, '*scores[0] == -0.27');
assert(*scores.data[1] == FP16x16 { mag: 4588, sign: true }, '*scores[1] == -0.07');
assert(*scores.data[2] == FP16x16 { mag: 79299, sign: true }, '*scores[2] == -1.21');
assert(*scores.data[3] == FP16x16 { mag: 66192, sign: true }, '*scores[3] == -1.01');
assert(*scores.data[4] == FP16x16 { mag: 140903, sign: true }, '*scores[4] == -2.15');
assert(*scores.data[5] == FP16x16 { mag: 127796, sign: true }, '*scores[5] == -1.95');
} |
use orion::operators::ml::normalizer::normalizer::{NormalizerTrait, NORM};
use orion::utils::{assert_eq, assert_seq_eq};
use orion::numbers::FP16x16;
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorPartialEq
}; |
fn test_normalizer_max() {
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 52428, sign: true });
data.append(FP16x16 { mag: 39321, sign: true });
data.append(FP16x16 { mag: 26214, sign: true });
data.append(FP16x16 { mag: 13107, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 13107, sign: false });
data.append(FP16x16 { mag: 26214, sign: false });
data.append(FP16x16 { mag: 39321, sign: false });
let X = TensorTrait::new(shape.span(), data.span());
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 52428, sign: true });
data.append(FP16x16 { mag: 39321, sign: true });
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 32768, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 21845, sign: false });
data.append(FP16x16 { mag: 43690, sign: false });
data.append(FP16x16 { mag: 65536, sign: false });
let expected_output = TensorTrait::new(shape.span(), data.span());
let actual_output = NormalizerTrait::predict(X, NORM::MAX);
assert_eq(actual_output, expected_output);
} |
fn test_normalizer_l1() {
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 52428, sign: true });
data.append(FP16x16 { mag: 39321, sign: true });
data.append(FP16x16 { mag: 26214, sign: true });
data.append(FP16x16 { mag: 13107, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 13107, sign: false });
data.append(FP16x16 { mag: 26214, sign: false });
data.append(FP16x16 { mag: 39321, sign: false });
let X = TensorTrait::new(shape.span(), data.span());
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 27306, sign: true });
data.append(FP16x16 { mag: 21845, sign: true });
data.append(FP16x16 { mag: 16384, sign: true });
data.append(FP16x16 { mag: 43690, sign: true });
data.append(FP16x16 { mag: 21845, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 10922, sign: false });
data.append(FP16x16 { mag: 21845, sign: false });
data.append(FP16x16 { mag: 32768, sign: false });
let expected_output = TensorTrait::new(shape.span(), data.span());
let actual_output = NormalizerTrait::predict(X, NORM::L1);
assert_eq(actual_output, expected_output);
} |
fn test_normalizer_l2() {
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 65536, sign: true });
data.append(FP16x16 { mag: 52428, sign: true });
data.append(FP16x16 { mag: 39321, sign: true });
data.append(FP16x16 { mag: 26214, sign: true });
data.append(FP16x16 { mag: 13107, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 13107, sign: false });
data.append(FP16x16 { mag: 26214, sign: false });
data.append(FP16x16 { mag: 39321, sign: false });
let X = TensorTrait::new(shape.span(), data.span());
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 46340, sign: true });
data.append(FP16x16 { mag: 37072, sign: true });
data.append(FP16x16 { mag: 27804, sign: true });
data.append(FP16x16 { mag: 58617, sign: true });
data.append(FP16x16 { mag: 29308, sign: true });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 17515, sign: false });
data.append(FP16x16 { mag: 35030, sign: false });
data.append(FP16x16 { mag: 52545, sign: false });
let expected_output = TensorTrait::new(shape.span(), data.span());
let actual_output = NormalizerTrait::predict(X, NORM::L2);
assert_eq(actual_output, expected_output);
} |
fn test_normalizer_max_avoid_div_zero() {
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
let X = TensorTrait::new(shape.span(), data.span());
let mut shape = ArrayTrait::<usize>::new();
shape.append(3);
shape.append(3);
let mut data = ArrayTrait::new();
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
data.append(FP16x16 { mag: 0, sign: false });
let expected_output = TensorTrait::new(shape.span(), data.span());
let actual_output = NormalizerTrait::predict(X, NORM::MAX);
assert_eq(actual_output, expected_output);
} |
use orion::numbers::FP16x16;
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor};
use orion::utils::{assert_eq, assert_seq_eq};
use orion::operators::tensor::FP16x16TensorPartialEq;
use orion::numbers::FP64x64;
use orion::operators::tensor::implementations::tensor_fp64x64::{
FP64x64Tensor, FP64x64TensorPartialEq
};
use orion::operators::ml::svm::svm_classifier::{SVMClassifierTrait, POST_TRANSFORM, SVMClassifier};
use orion::operators::ml::svm::core::{KERNEL_TYPE}; |
fn test_svm_classifier_noprob_linear_sv_none() {
let post_transform = POST_TRANSFORM::NONE;
let (mut classifier, X) = svm_classifier_binary_noprob_linear_sv(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 174499, sign: true },
FP16x16 { mag: 174499, sign: false },
FP16x16 { mag: 145149, sign: true },
FP16x16 { mag: 145149, sign: false },
FP16x16 { mag: 115799, sign: true },
FP16x16 { mag: 115799, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_noprob_linear_sv_logistic() {
let post_transform = POST_TRANSFORM::LOGISTIC;
let (mut classifier, X) = svm_classifier_binary_noprob_linear_sv(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 4273, sign: false },
FP16x16 { mag: 61262, sign: false },
FP16x16 { mag: 6450, sign: false },
FP16x16 { mag: 59085, sign: false },
FP16x16 { mag: 9563, sign: false },
FP16x16 { mag: 55972, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_noprob_linear_sv_softmax() {
let post_transform = POST_TRANSFORM::SOFTMAX;
let (mut classifier, X) = svm_classifier_binary_noprob_linear_sv(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 317, sign: false },
FP16x16 { mag: 65218, sign: false },
FP16x16 { mag: 771, sign: false },
FP16x16 { mag: 64764, sign: false },
FP16x16 { mag: 1858, sign: false },
FP16x16 { mag: 63677, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_noprob_linear_sv_softmax_zero() {
let post_transform = POST_TRANSFORM::SOFTMAXZERO;
let (mut classifier, X) = svm_classifier_binary_noprob_linear_sv(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 2].span(),
array![
FP16x16 { mag: 317, sign: false },
FP16x16 { mag: 65218, sign: false },
FP16x16 { mag: 771, sign: false },
FP16x16 { mag: 64764, sign: false },
FP16x16 { mag: 1858, sign: false },
FP16x16 { mag: 63677, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_noprob_linear_none() {
let post_transform = POST_TRANSFORM::NONE;
let (mut classifier, X) = svm_classifier_helper_noprob_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 7738, sign: true },
FP16x16 { mag: 29929, sign: true },
FP16x16 { mag: 27248, sign: false },
FP16x16 { mag: 21922, sign: false },
FP16x16 { mag: 4021, sign: true },
FP16x16 { mag: 15167, sign: true },
FP16x16 { mag: 4843, sign: false },
FP16x16 { mag: 5979, sign: false },
FP16x16 { mag: 304, sign: true },
FP16x16 { mag: 406, sign: true },
FP16x16 { mag: 17562, sign: true },
FP16x16 { mag: 9962, sign: true },
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_noprob_linear_logistic() {
let post_transform = POST_TRANSFORM::LOGISTIC;
let (mut classifier, X) = svm_classifier_helper_noprob_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 30835, sign: false },
FP16x16 { mag: 25413, sign: false },
FP16x16 { mag: 39483, sign: false },
FP16x16 { mag: 38197, sign: false },
FP16x16 { mag: 31762, sign: false },
FP16x16 { mag: 28992, sign: false },
FP16x16 { mag: 33978, sign: false },
FP16x16 { mag: 34261, sign: false },
FP16x16 { mag: 32691, sign: false },
FP16x16 { mag: 32666, sign: false },
FP16x16 { mag: 28403, sign: false },
FP16x16 { mag: 30282, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_noprob_linear_softmax() {
let post_transform = POST_TRANSFORM::SOFTMAX;
let (mut classifier, X) = svm_classifier_helper_noprob_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 13131, sign: false },
FP16x16 { mag: 9359, sign: false },
FP16x16 { mag: 22396, sign: false },
FP16x16 { mag: 20648, sign: false },
FP16x16 { mag: 15779, sign: false },
FP16x16 { mag: 13311, sign: false },
FP16x16 { mag: 18064, sign: false },
FP16x16 { mag: 18380, sign: false },
FP16x16 { mag: 18054, sign: false },
FP16x16 { mag: 18026, sign: false },
FP16x16 { mag: 13874, sign: false },
FP16x16 { mag: 15580, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_noprob_linear_softmax_zero() {
let post_transform = POST_TRANSFORM::SOFTMAXZERO;
let (mut classifier, X) = svm_classifier_helper_noprob_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 13131, sign: false },
FP16x16 { mag: 9359, sign: false },
FP16x16 { mag: 22396, sign: false },
FP16x16 { mag: 20648, sign: false },
FP16x16 { mag: 15779, sign: false },
FP16x16 { mag: 13311, sign: false },
FP16x16 { mag: 18064, sign: false },
FP16x16 { mag: 18380, sign: false },
FP16x16 { mag: 18054, sign: false },
FP16x16 { mag: 18026, sign: false },
FP16x16 { mag: 13874, sign: false },
FP16x16 { mag: 15580, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_linear_none() {
let post_transform = POST_TRANSFORM::NONE;
let (mut classifier, X) = svm_classifier_helper_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 7738, sign: true },
FP16x16 { mag: 29929, sign: true },
FP16x16 { mag: 27248, sign: false },
FP16x16 { mag: 21922, sign: false },
FP16x16 { mag: 4021, sign: true },
FP16x16 { mag: 15167, sign: true },
FP16x16 { mag: 4843, sign: false },
FP16x16 { mag: 5979, sign: false },
FP16x16 { mag: 304, sign: true },
FP16x16 { mag: 406, sign: true },
FP16x16 { mag: 17562, sign: true },
FP16x16 { mag: 9962, sign: true },
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_linear_logistic() {
let post_transform = POST_TRANSFORM::LOGISTIC;
let (mut classifier, X) = svm_classifier_helper_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 30835, sign: false },
FP16x16 { mag: 25413, sign: false },
FP16x16 { mag: 39483, sign: false },
FP16x16 { mag: 38197, sign: false },
FP16x16 { mag: 31762, sign: false },
FP16x16 { mag: 28992, sign: false },
FP16x16 { mag: 33978, sign: false },
FP16x16 { mag: 34261, sign: false },
FP16x16 { mag: 32691, sign: false },
FP16x16 { mag: 32666, sign: false },
FP16x16 { mag: 28403, sign: false },
FP16x16 { mag: 30282, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_linear_softmax() {
let post_transform = POST_TRANSFORM::SOFTMAX;
let (mut classifier, X) = svm_classifier_helper_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 13131, sign: false },
FP16x16 { mag: 9359, sign: false },
FP16x16 { mag: 22396, sign: false },
FP16x16 { mag: 20648, sign: false },
FP16x16 { mag: 15779, sign: false },
FP16x16 { mag: 13311, sign: false },
FP16x16 { mag: 18064, sign: false },
FP16x16 { mag: 18380, sign: false },
FP16x16 { mag: 18054, sign: false },
FP16x16 { mag: 18026, sign: false },
FP16x16 { mag: 13874, sign: false },
FP16x16 { mag: 15580, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_linear_softmax_zero() {
let post_transform = POST_TRANSFORM::SOFTMAXZERO;
let (mut classifier, X) = svm_classifier_helper_linear(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 2, 'labels[0]');
assert(*labels[1] == 3, 'labels[1]');
assert(*labels[2] == 0, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 4].span(),
array![
FP16x16 { mag: 13131, sign: false },
FP16x16 { mag: 9359, sign: false },
FP16x16 { mag: 22396, sign: false },
FP16x16 { mag: 20648, sign: false },
FP16x16 { mag: 15779, sign: false },
FP16x16 { mag: 13311, sign: false },
FP16x16 { mag: 18064, sign: false },
FP16x16 { mag: 18380, sign: false },
FP16x16 { mag: 18054, sign: false },
FP16x16 { mag: 18026, sign: false },
FP16x16 { mag: 13874, sign: false },
FP16x16 { mag: 15580, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_binary_none_fp64x64() {
let post_transform = POST_TRANSFORM::NONE;
let (mut classifier, X) = svm_classifier_helper_fp64x64(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(*labels[2] == 1, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP64x64> = TensorTrait::new(
array![3, 2].span(),
array![
FP64x64 { mag: 18322911080742739968, sign: false },
FP64x64 { mag: 123832992966812224, sign: false },
FP64x64 { mag: 8658920114943337472, sign: false },
FP64x64 { mag: 9787823958766215168, sign: false },
FP64x64 { mag: 276645820873422144, sign: false },
FP64x64 { mag: 18170098252836128768, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_binary_logistic_fp64x64() {
let post_transform = POST_TRANSFORM::LOGISTIC;
let (mut classifier, X) = svm_classifier_helper_fp64x64(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(*labels[2] == 1, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP64x64> = TensorTrait::new(
array![3, 2].span(),
array![
FP64x64 { mag: 13461271680116586496, sign: false },
FP64x64 { mag: 9254325673410459648, sign: false },
FP64x64 { mag: 11349211717397211136, sign: false },
FP64x64 { mag: 11614494343921229824, sign: false },
FP64x64 { mag: 9292528880387112960, sign: false },
FP64x64 { mag: 13431074360067923968, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_binary_softmax_fp64x64() {
let post_transform = POST_TRANSFORM::SOFTMAX;
let (mut classifier, X) = svm_classifier_helper_fp64x64(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(*labels[2] == 1, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP64x64> = TensorTrait::new(
array![3, 2].span(),
array![
FP64x64 { mag: 13436811297474848768, sign: false },
FP64x64 { mag: 5009932776234703872, sign: false },
FP64x64 { mag: 8941229086247388160, sign: false },
FP64x64 { mag: 9505514987462162432, sign: false },
FP64x64 { mag: 5070622564237207552, sign: false },
FP64x64 { mag: 13376121509472344064, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_classifier_binary_softmax_zero_fp64x64() {
let post_transform = POST_TRANSFORM::SOFTMAXZERO;
let (mut classifier, X) = svm_classifier_helper_fp64x64(post_transform);
let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X);
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 1, 'labels[1]');
assert(*labels[2] == 1, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
let mut expected_scores: Tensor<FP64x64> = TensorTrait::new(
array![3, 2].span(),
array![
FP64x64 { mag: 13436811297474848768, sign: false },
FP64x64 { mag: 5009932776234703872, sign: false },
FP64x64 { mag: 8941229086247388160, sign: false },
FP64x64 { mag: 9505514987462162432, sign: false },
FP64x64 { mag: 5070622564237207552, sign: false },
FP64x64 { mag: 13376121509472344064, sign: false }
]
.span()
);
assert_eq(scores, expected_scores);
}
fn svm_classifier_helper_linear(
post_transform: POST_TRANSFORM
) -> (SVMClassifier<FP16x16>, Tensor<FP16x16>) {
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 10169, sign: true },
FP16x16 { mag: 15905, sign: false },
FP16x16 { mag: 459, sign: false },
FP16x16 { mag: 26713, sign: false },
FP16x16 { mag: 2129, sign: true },
FP16x16 { mag: 18, sign: false },
FP16x16 { mag: 12830, sign: true },
FP16x16 { mag: 23097, sign: true },
FP16x16 { mag: 1415, sign: true },
FP16x16 { mag: 28717, sign: true },
FP16x16 { mag: 2994, sign: false },
FP16x16 { mag: 847, sign: true }
]
.span();
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 65, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::LINEAR;
let prob_a: Span<FP16x16> = array![FP16x16 { mag: 336797, sign: true }].span |
();
let prob_b: Span<FP16x16> = array![FP16x16 { mag: 4194, sign: false }].span();
let rho: Span<FP16x16> = array![
FP16x16 { mag: 4908, sign: true },
FP16x16 { mag: 11563, sign: true },
FP16x16 { mag: 13872, sign: true },
FP16x16 { mag: 33829, sign: true }
]
.span();
let support_vectors: Span<FP16x16> = array![].span();
let classlabels: Span<usize> = array![0, 1, 2, 3].span();
let vectors_per_class = Option::None;
let mut classifier: SVMClassifier<FP16x16> = SVMClassifier {
classlabels,
coefficients,
kernel_params,
kernel_type,
post_transform,
prob_a,
prob_b,
rho,
support_vectors,
vectors_per_class,
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 3].span(),
array![
FP16x16 { mag: 65536, sign: true },
FP16x16 { mag: 52428, sign: true },
FP16x16 { mag: 39321, sign: true },
FP16x16 { mag: 26214, sign: true },
FP16x16 { mag: 13107, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 13107, sign: false },
FP16x16 { mag: 26214, sign: false },
FP16x16 { mag: 39321, sign: false },
]
.span()
);
(classifier, X)
}
fn svm_classifier_binary_noprob_linear_sv(
post_transform: POST_TRANSFORM
) -> (SVMClassifier<FP16x16>, Tensor<FP16x16>) {
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 50226, sign: false },
FP16x16 { mag: 5711, sign: false },
FP16x16 { mag: 7236, sign: false },
FP16x16 { mag: 63175, sign: true }
]
.span();
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 8025, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::LINEAR;
let prob_a: Span<FP16x16> = array![].span();
let prob_b: Span<FP16x1 |
6> = array![].span();
let rho: Span<FP16x16> = array![FP16x16 { mag: 146479, sign: false }].span();
let support_vectors: Span<FP16x16> = array![
FP16x16 { mag: 314572, sign: false },
FP16x16 { mag: 222822, sign: false },
FP16x16 { mag: 124518, sign: false },
FP16x16 { mag: 327680, sign: false },
FP16x16 { mag: 196608, sign: false },
FP16x16 { mag: 104857, sign: false },
FP16x16 { mag: 294912, sign: false },
FP16x16 { mag: 150732, sign: false },
FP16x16 { mag: 85196, sign: false },
FP16x16 { mag: 334233, sign: false },
FP16x16 { mag: 163840, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let classlabels: Span<usize> = array![0, 1].span();
let vectors_per_class = Option::Some(array![3, 1].span());
let mut classifier: SVMClassifier<FP16x16> = SVMClassifier {
classlabels,
coefficients,
kernel_params,
kernel_type,
post_transform,
prob_a,
prob_b,
rho,
support_vectors,
vectors_per_class,
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 3].span(),
array![
FP16x16 { mag: 65536, sign: true },
FP16x16 { mag: 52428, sign: true },
FP16x16 { mag: 39321, sign: true },
FP16x16 { mag: 26214, sign: true },
FP16x16 { mag: 13107, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 13107, sign: false },
FP16x16 { mag: 26214, sign: false },
FP16x16 { mag: 39321, sign: false },
]
.span()
);
(classifier, X)
}
fn svm_classifier_helper_noprob_linear(
post_transform: POST_TRANSFORM
) -> (SVMClassifier<FP16x16>, Tensor<FP16x16>) {
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 10169, sign: true },
FP16x16 { mag: 15905, sign: false },
FP16x16 { mag: 459, sign: false },
FP16x16 { mag: 2671 |
3, sign: false },
FP16x16 { mag: 2129, sign: true },
FP16x16 { mag: 18, sign: false },
FP16x16 { mag: 12830, sign: true },
FP16x16 { mag: 23097, sign: true },
FP16x16 { mag: 1415, sign: true },
FP16x16 { mag: 28717, sign: true },
FP16x16 { mag: 2994, sign: false },
FP16x16 { mag: 847, sign: true }
]
.span();
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 65, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::LINEAR;
let prob_a: Span<FP16x16> = array![].span();
let prob_b: Span<FP16x16> = array![].span();
let rho: Span<FP16x16> = array![
FP16x16 { mag: 4908, sign: true },
FP16x16 { mag: 11563, sign: true },
FP16x16 { mag: 13872, sign: true },
FP16x16 { mag: 33829, sign: true }
]
.span();
let support_vectors: Span<FP16x16> = array![].span();
let classlabels: Span<usize> = array![0, 1, 2, 3].span();
let vectors_per_class = Option::None;
let mut classifier: SVMClassifier<FP16x16> = SVMClassifier {
classlabels,
coefficients,
kernel_params,
kernel_type,
post_transform,
prob_a,
prob_b,
rho,
support_vectors,
vectors_per_class,
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 3].span(),
array![
FP16x16 { mag: 65536, sign: true },
FP16x16 { mag: 52428, sign: true },
FP16x16 { mag: 39321, sign: true },
FP16x16 { mag: 26214, sign: true },
FP16x16 { mag: 13107, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 13107, sign: false },
FP16x16 { mag: 26214, sign: false },
FP16x16 { mag: 39321, sign: false },
]
.span()
);
(classifier, X)
}
fn svm_classifier_helper_fp64x64(
post_transform: |
POST_TRANSFORM
) -> (SVMClassifier<FP64x64>, Tensor<FP64x64>) {
let coefficients: Span<FP64x64> = array![
FP64x64 { mag: 18446744073709551616, sign: false },
FP64x64 { mag: 18446744073709551616, sign: false },
FP64x64 { mag: 18446744073709551616, sign: false },
FP64x64 { mag: 18446744073709551616, sign: false },
FP64x64 { mag: 18446744073709551616, sign: true },
FP64x64 { mag: 18446744073709551616, sign: true },
FP64x64 { mag: 18446744073709551616, sign: true },
FP64x64 { mag: 18446744073709551616, sign: true }
]
.span();
let kernel_params: Span<FP64x64> = array![
FP64x64 { mag: 7054933896252620800, sign: false },
FP64x64 { mag: 0, sign: false },
FP64x64 { mag: 55340232221128654848, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::RBF;
let prob_a: Span<FP64x64> = array![FP64x64 { mag: 94799998099962986496, sign: true }].span();
let prob_b: Span<FP64x64> = array![FP64x64 { mag: 1180576833385529344, sign: false }].span();
let rho: Span<FP64x64> = array![FP64x64 { mag: 3082192501545631744, sign: false }].span();
let support_vectors: Span<FP64x64> = array![
FP64x64 { mag: 3528081300248330240, sign: false },
FP64x64 { mag: 19594207602596118528, sign: true },
FP64x64 { mag: 9235613999318433792, sign: false },
FP64x64 { mag: 10869715877100519424, sign: true },
FP64x64 { mag: 5897111318564962304, sign: true },
FP64x64 { mag: 1816720038917308416, sign: false },
FP64x64 { mag: 4564890528671334400, sign: false },
FP64x64 { mag: 21278987070814027776, sign: true },
FP64x64 { mag: 7581529597213147136, sign: false },
FP64x64 { mag: 10953113834067329024, sign: true },
FP64x64 { mag: 24318984989010034688, sign: true },
FP64x64 { mag: 30296187483321270272, sign: true },
FP64x64 { mag: 10305112258191032320, sign: false },
FP64x64 { mag: 17005441559857987584, sign: true }, |
FP64x64 { mag: 11555205301925838848, sign: false },
FP64x64 { mag: 2962701975885447168, sign: true },
FP64x64 { mag: 11741665981322231808, sign: true },
FP64x64 { mag: 15376232508819505152, sign: false },
FP64x64 { mag: 13908474645692022784, sign: false },
FP64x64 { mag: 7323415394302033920, sign: true },
FP64x64 { mag: 3284258824352956416, sign: true },
FP64x64 { mag: 11374683084831064064, sign: true },
FP64x64 { mag: 9087138148126818304, sign: false },
FP64x64 { mag: 8247488946750095360, sign: false }
]
.span();
let classlabels: Span<usize> = array![0, 1].span();
let vectors_per_class = Option::Some(array![4, 4].span());
let mut classifier: SVMClassifier<FP64x64> = SVMClassifier {
classlabels,
coefficients,
kernel_params,
kernel_type,
post_transform,
prob_a,
prob_b,
rho,
support_vectors,
vectors_per_class,
};
let mut X: Tensor<FP64x64> = TensorTrait::new(
array![3, 3].span(),
array![
FP64x64 { mag: 18446744073709551616, sign: true },
FP64x64 { mag: 14757395258967642112, sign: true },
FP64x64 { mag: 11068046444225730560, sign: true },
FP64x64 { mag: 7378697629483821056, sign: true },
FP64x64 { mag: 3689348814741910528, sign: true },
FP64x64 { mag: 0, sign: false },
FP64x64 { mag: 3689348814741910528, sign: false },
FP64x64 { mag: 7378697629483821056, sign: false },
FP64x64 { mag: 11068046444225730560, sign: false }
]
.span()
);
(classifier, X)
} |
use orion::numbers::FP16x16;
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor};
use orion::utils::{assert_eq, assert_seq_eq};
use orion::operators::tensor::FP16x16TensorPartialEq;
use orion::operators::ml::svm::svm_regressor::{SVMRegressorTrait, POST_TRANSFORM, SVMRegressor};
use orion::operators::ml::svm::core::{KERNEL_TYPE}; |
fn test_svm_regressor_linear() {
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 27812, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::LINEAR;
let (mut regressor, X) = svm_regressor_helper(kernel_type, kernel_params);
let scores = SVMRegressorTrait::predict(ref regressor, X);
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 1].span(),
array![
FP16x16 { mag: 30684, sign: true },
FP16x16 { mag: 14908, sign: false },
FP16x16 { mag: 60501, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_regressor_poly() {
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 22456, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::POLY;
let (mut regressor, X) = svm_regressor_helper(kernel_type, kernel_params);
let scores = SVMRegressorTrait::predict(ref regressor, X);
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 1].span(),
array![
FP16x16 { mag: 34542, sign: false },
FP16x16 { mag: 35623, sign: false },
FP16x16 { mag: 35815, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_regressor_rbf() {
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 19848, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::RBF;
let (mut regressor, X) = svm_regressor_helper(kernel_type, kernel_params);
let scores = SVMRegressorTrait::predict(ref regressor, X);
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 1].span(),
array![
FP16x16 { mag: 19376, sign: false },
FP16x16 { mag: 31318, sign: false },
FP16x16 { mag: 45566, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_regressor_sigmoid() {
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 20108, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::SIGMOID;
let (mut regressor, X) = svm_regressor_helper(kernel_type, kernel_params);
let scores = SVMRegressorTrait::predict(ref regressor, X);
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 1].span(),
array![
FP16x16 { mag: 15683, sign: false },
FP16x16 { mag: 29421, sign: false },
FP16x16 { mag: 43364, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_regressor_linear_one_class_0() {
let post_transform = POST_TRANSFORM::NONE;
let one_class = 0;
let (mut regressor, X) = svm_regressor_linear_helper(post_transform, one_class);
let scores = SVMRegressorTrait::predict(ref regressor, X);
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 1].span(),
array![
FP16x16 { mag: 63484, sign: false },
FP16x16 { mag: 74218, sign: false },
FP16x16 { mag: 84953, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
} |
fn test_svm_regressor_linear_one_class_1() {
let post_transform = POST_TRANSFORM::NONE;
let one_class = 1;
let (mut regressor, X) = svm_regressor_linear_helper(post_transform, one_class);
let scores = SVMRegressorTrait::predict(ref regressor, X);
let mut expected_scores: Tensor<FP16x16> = TensorTrait::new(
array![3, 1].span(),
array![
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 65536, sign: false },
]
.span()
);
assert_eq(scores, expected_scores);
}
fn svm_regressor_helper(
kernel_type: KERNEL_TYPE, kernel_params: Span<FP16x16>
) -> (SVMRegressor<FP16x16>, Tensor<FP16x16>) {
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 65536, sign: false },
FP16x16 { mag: 65536, sign: true },
FP16x16 { mag: 54959, sign: false },
FP16x16 { mag: 54959, sign: true },
FP16x16 { mag: 29299, sign: false },
FP16x16 { mag: 65536, sign: true },
FP16x16 { mag: 36236, sign: false }
]
.span();
let n_supports: usize = 7;
let one_class: usize = 0;
let rho: Span<FP16x16> = array![FP16x16 { mag: 35788, sign: false }].span();
let support_vectors: Span<FP16x16> = array![
FP16x16 { mag: 8421, sign: true },
FP16x16 { mag: 5842, sign: false },
FP16x16 { mag: 4510, sign: false },
FP16x16 { mag: 5202, sign: true },
FP16x16 { mag: 14783, sign: true },
FP16x16 { mag: 17380, sign: true },
FP16x16 { mag: 60595, sign: false },
FP16x16 { mag: 1674, sign: true },
FP16x16 { mag: 38669, sign: true },
FP16x16 { mag: 63803, sign: false },
FP16x16 { mag: 87720, sign: true },
FP16x16 { mag: 22236, sign: false },
FP16x16 { mag: 61816, sign: false },
FP16x16 { mag: 34267, sign: true },
FP16x16 { mag: 36418, sign: false },
FP16x16 { mag: 27471, sign: false },
FP16x16 { mag: 28421, s |
ign: false },
FP16x16 { mag: 69270, sign: true },
FP16x16 { mag: 152819, sign: false },
FP16x16 { mag: 4065, sign: false },
FP16x16 { mag: 62274, sign: true }
]
.span();
let post_transform = POST_TRANSFORM::NONE;
let mut regressor: SVMRegressor<FP16x16> = SVMRegressor {
coefficients,
kernel_params,
kernel_type,
n_supports,
one_class,
post_transform,
rho,
support_vectors,
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 3].span(),
array![
FP16x16 { mag: 32768, sign: true },
FP16x16 { mag: 26214, sign: true },
FP16x16 { mag: 19660, sign: true },
FP16x16 { mag: 13107, sign: true },
FP16x16 { mag: 6553, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 6553, sign: false },
FP16x16 { mag: 13107, sign: false },
FP16x16 { mag: 19660, sign: false },
]
.span()
);
(regressor, X)
}
fn svm_regressor_linear_helper(
post_transform: POST_TRANSFORM, one_class: usize
) -> (SVMRegressor<FP16x16>, Tensor<FP16x16>) {
let coefficients: Span<FP16x16> = array![
FP16x16 { mag: 18540, sign: false },
FP16x16 { mag: 1746, sign: true },
FP16x16 { mag: 1097, sign: false }
]
.span();
let kernel_params: Span<FP16x16> = array![
FP16x16 { mag: 65, sign: false },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 196608, sign: false }
]
.span();
let kernel_type = KERNEL_TYPE::LINEAR;
let n_supports: usize = 0;
let rho: Span<FP16x16> = array![FP16x16 { mag: 81285, sign: false }].span();
let support_vectors: Span<FP16x16> = array![].span();
let mut regressor: SVMRegressor<FP16x16> = SVMRegressor {
coefficients,
kernel_params,
kernel_type,
n_supports,
one_class,
post_transform,
rho,
supp |
ort_vectors,
};
let mut X: Tensor<FP16x16> = TensorTrait::new(
array![3, 3].span(),
array![
FP16x16 { mag: 65536, sign: true },
FP16x16 { mag: 52428, sign: true },
FP16x16 { mag: 39321, sign: true },
FP16x16 { mag: 26214, sign: true },
FP16x16 { mag: 13107, sign: true },
FP16x16 { mag: 0, sign: false },
FP16x16 { mag: 13107, sign: false },
FP16x16 { mag: 26214, sign: false },
FP16x16 { mag: 39321, sign: false },
]
.span()
);
(regressor, X)
} |
use orion::numbers::FP16x16;
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor};
use orion::operators::ml::tree_ensemble::core::{NODE_MODES, TreeEnsembleAttributes, TreeEnsemble};
use orion::operators::ml::tree_ensemble::tree_ensemble_classifier::{
TreeEnsembleClassifier, POST_TRANSFORM, TreeEnsembleClassifierTrait
};
use orion::operators::tensor::implementations::tensor_fp16x16::relative_eq;
use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; |
fn test_tree_ensemble_classifier_multi_pt_none() {
let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::NONE);
let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(*labels[2] == 1, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
assert(
relative_eq(@scores.get(0, 0).unwrap(), @FP16x16 { mag: 60075, sign: false }) == true,
'score[0, 0]'
);
assert(
relative_eq(@scores.get(0, 1).unwrap(), @FP16x16 { mag: 0, sign: false }) == true,
'score[0, 1]'
);
assert(
relative_eq(@scores.get(0, 2).unwrap(), @FP16x16 { mag: 5461, sign: false }) == true,
'score[0, 2]'
);
assert(
relative_eq(@scores.get(1, 0).unwrap(), @FP16x16 { mag: 37329, sign: false }) == true,
'score[1, 0]'
);
assert(
relative_eq(@scores.get(1, 1).unwrap(), @FP16x16 { mag: 12528, sign: false }) == true,
'score[1, 1]'
);
assert(
relative_eq(@scores.get(1, 2).unwrap(), @FP16x16 { mag: 15677, sign: false }) == true,
'score[1, 2]'
);
assert(
relative_eq(@scores.get(2, 0).unwrap(), @FP16x16 { mag: 19853, sign: false }) == true,
'score[2, 0]'
);
assert(
relative_eq(@scores.get(2, 1).unwrap(), @FP16x16 { mag: 28257, sign: false }) == true,
'score[2, 1]'
);
assert(
relative_eq(@scores.get(2, 2).unwrap(), @FP16x16 { mag: 17424, sign: false }) == true,
'score[2, 2]'
);
} |
fn test_tree_ensemble_classifier_multi_pt_softmax() {
let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAX);
let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
assert(*labels[0] == 0, 'labels[0]');
assert(*labels[1] == 0, 'labels[1]');
assert(*labels[2] == 1, 'labels[2]');
assert(labels.len() == 3, 'len(labels)');
assert(
relative_eq(@scores.get(0, 0).unwrap(), @FP16x16 { mag: 35725, sign: false }) == true,
'score[0, 0]'
);
assert(
relative_eq(@scores.get(0, 1).unwrap(), @FP16x16 { mag: 14284, sign: false }) == true,
'score[0, 1]'
);
assert(
relative_eq(@scores.get(0, 2).unwrap(), @FP16x16 { mag: 15526, sign: false }) == true,
'score[0, 2]'
);
assert(
relative_eq(@scores.get(1, 0).unwrap(), @FP16x16 { mag: 27266, sign: false }) == true,
'score[1, 0]'
);
assert(
relative_eq(@scores.get(1, 1).unwrap(), @FP16x16 { mag: 18675, sign: false }) == true,
'score[1, 1]'
);
assert(
relative_eq(@scores.get(1, 2).unwrap(), @FP16x16 { mag: 19594, sign: false }) == true,
'score[1, 2]'
);
assert(
relative_eq(@scores.get(2, 0).unwrap(), @FP16x16 { mag: 21137, sign: false }) == true,
'score[2, 0]'
);
assert(
relative_eq(@scores.get(2, 1).unwrap(), @FP16x16 { mag: 24029, sign: false }) == true,
'score[2, 1]'
);
assert(
relative_eq(@scores.get(2, 2).unwrap(), @FP16x16 { mag: 20368, sign: false }) == true,
'score[2, 2]'
);
} |
fn test_tree_ensemble_classifier_multi_pt_softmax_zero() {
let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAXZERO);
let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
assert(*labels[0] == 0, 'labels[0] == 0');
assert(*labels[1] == 0, 'labels[1] == 0');
assert(*labels[2] == 1, 'labels[2] == 1');
assert(labels.len() == 3, 'len(labels) == 3');
assert(
relative_eq(@scores.get(0, 0).unwrap(), @FP16x16 { mag: 45682, sign: false }) == true,
'score[0, 0]'
);
assert(
relative_eq(@scores.get(0, 1).unwrap(), @FP16x16 { mag: 0, sign: false }) == true,
'score[0, 1]'
);
assert(
relative_eq(@scores.get(0, 2).unwrap(), @FP16x16 { mag: 19853, sign: false }) == true,
'score[0, 2]'
);
assert(
relative_eq(@scores.get(1, 0).unwrap(), @FP16x16 { mag: 27266, sign: false }) == true,
'score[1, 0]'
);
assert(
relative_eq(@scores.get(1, 1).unwrap(), @FP16x16 { mag: 18675, sign: false }) == true,
'score[1, 1]'
);
assert(
relative_eq(@scores.get(1, 2).unwrap(), @FP16x16 { mag: 19594, sign: false }) == true,
'score[1, 2]'
);
assert(
relative_eq(@scores.get(2, 0).unwrap(), @FP16x16 { mag: 21137, sign: false }) == true,
'score[2, 0]'
);
assert(
relative_eq(@scores.get(2, 1).unwrap(), @FP16x16 { mag: 24029, sign: false }) == true,
'score[2, 1]'
);
assert(
relative_eq(@scores.get(2, 2).unwrap(), @FP16x16 { mag: 20368, sign: false }) == true,
'score[2, 2]'
);
} |
fn test_tree_ensemble_classifier_multi_pt_logistic() {
let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::LOGISTIC);
let (labels, mut scores) = TreeEnsembleClassifierTrait::predict(classifier, X);
assert(*labels[0] == 0, 'labels[0] == 0');
assert(*labels[1] == 0, 'labels[1] == 0');
assert(*labels[2] == 1, 'labels[2] == 1');
assert(labels.len() == 3, 'len(labels) == 3');
assert(
relative_eq(@scores.get(0, 0).unwrap(), @FP16x16 { mag: 46816, sign: false }) == true,
'score[0, 0]'
);
assert(
relative_eq(@scores.get(0, 1).unwrap(), @FP16x16 { mag: 32768, sign: false }) == true,
'score[0, 1]'
);
assert(
relative_eq(@scores.get(0, 2).unwrap(), @FP16x16 { mag: 34132, sign: false }) == true,
'score[0, 2]'
);
assert(
relative_eq(@scores.get(1, 0).unwrap(), @FP16x16 { mag: 41856, sign: false }) == true,
'score[1, 0]'
);
assert(
relative_eq(@scores.get(1, 1).unwrap(), @FP16x16 { mag: 35890, sign: false }) == true,
'score[1, 1]'
);
assert(
relative_eq(@scores.get(1, 2).unwrap(), @FP16x16 { mag: 36668, sign: false }) == true,
'score[1, 2]'
);
assert(
relative_eq(@scores.get(2, 0).unwrap(), @FP16x16 { mag: 37693, sign: false }) == true,
'score[2, 0]'
);
assert(
relative_eq(@scores.get(2, 1).unwrap(), @FP16x16 { mag: 39724, sign: false }) == true,
'score[2, 1]'
);
assert(
relative_eq(@scores.get(2, 2).unwrap(), @FP16x16 { mag: 37098, sign: false }) == true,
'score[2, 2]'
);
} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.