text
stringlengths 1
2.05k
|
---|
T_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<FP8x23> {
math::resize::resize(
self,
roi,
scales,
sizes,
antialias,
axes,
coordinate_transformation_mode,
cubic_coeff_a,
exclude_outside,
extrapolation_value,
keep_aspect_ratio_policy,
mode,
nearest_mode
)
}
fn compress(
self: @Tensor<FP8x23>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<FP8x23> {
math::compress::compress(self, condition, axis)
}
fn split(
self: @Tensor<FP8x23>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<FP8x23>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<FP8x23>, high: Option<FP8x23>, low: Option<FP8x23>, seed: Option<usize>
) -> Tensor<FP8x23> {
math::random_uniform_like::random_uniform_like(*tensor, high, low, seed)
}
fn range(start: FP8x23, end: FP8x23, step: FP8x23) -> Tensor<FP8x23> {
math::range::range(start, end, step)
}
fn hann_window(size: FP8x23, periodic: Option<usize>) -> Tensor<FP8x23> {
math::hann_window::hann_window(size, FP8x23 { mag: PI, sign: false }, periodic)
}
fn hamming_window(size: FP8x23, periodic: Option<usize>) -> Tensor<FP8x23> {
math::hamming_window::hamming_window(size, FP8x23 { mag: PI, sign: false }, periodic)
}
fn blackman_window(size: FP8x23, periodic: Option<usize>) -> Tensor<FP8x23> {
math::blackman_window::blackman_window(size, FP8x23 { mag: PI, sign: false }, periodic)
}
fn split_to_sequence(
self: @Tensor<FP8x23>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<FP8x23>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, kee |
pdims, split)
}
fn reverse_sequence(
self: @Tensor<FP8x23>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<FP8x23> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<FP8x23>) -> Option<Tensor<FP8x23>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<FP8x23>
) -> (Tensor::<u32>, Tensor::<FP8x23>, Tensor<FP8x23>) {
quantization::dynamic_quantize_linear::dynamic_quantize_linear(
self,
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(255, false),
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(1, false),
)
}
fn scatter_nd(
self: @Tensor<FP8x23>,
updates: Tensor<FP8x23>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<FP8x23> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<FP8x23>,
default_list: Option<Span<FP8x23>>,
default_tensor: Option<Tensor<FP8x23>>,
keys: Option<Span<FP8x23>>,
keys_tensor: Option<Tensor<FP8x23>>,
values: Option<Span<FP8x23>>,
values_tensor: Option<Tensor<FP8x23>>
) -> Tensor<FP8x23> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
impl FP8x23TensorAdd<
FP8x23,
impl FP8x23Tensor: TensorTrait<FP8x23>,
impl TAdd: Add<FP8x23>,
impl TCopy: Copy<FP8x23>,
impl TDrop: Drop<FP8x23>
> of Add<Tensor<FP8x23>> {
fn add(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::add(@lhs, @rhs)
}
}
impl FP8x23TensorSub<
FP8x23,
impl FP8x23Tensor: TensorTrait<FP8x23>,
imp |
l TSub: Sub<FP8x23>,
impl TCopy: Copy<FP8x23>,
impl TDrop: Drop<FP8x23>
> of Sub<Tensor<FP8x23>> {
fn sub(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::sub(@lhs, @rhs)
}
}
impl FP8x23TensorMul<
FP8x23,
impl FP8x23Tensor: TensorTrait<FP8x23>,
impl TMul: Mul<FP8x23>,
impl TCopy: Copy<FP8x23>,
impl TDrop: Drop<FP8x23>
> of Mul<Tensor<FP8x23>> {
fn mul(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::mul(@lhs, @rhs)
}
}
impl FP8x23TensorDiv<
FP8x23,
impl FP8x23Tensor: TensorTrait<FP8x23>,
impl TDiv: Div<FP8x23>,
impl TCopy: Copy<FP8x23>,
impl TDrop: Drop<FP8x23>
> of Div<Tensor<FP8x23>> {
fn div(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::div(@lhs, @rhs)
}
}
impl FP8x23TensorPartialEq of PartialEq<Tensor<FP8x23>> {
fn eq(lhs: @Tensor<FP8x23>, rhs: @Tensor<FP8x23>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<FP8x23>, rhs: @Tensor<FP8x23>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl TensorI8IntoTensorFP8x23 of Into<Tensor<i8>, Tensor<FP8x23>> {
fn into(self: Tensor<i8>) -> Tensor<FP8x23> {
tensor_i8_to_tensor_fp8x23(@self)
}
}
impl FP8x23TensorPartialOrd of PartialOrd<Tensor<FP8x23>> {
fn ge(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
fn gt(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
fn le(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
fn lt(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
const PRECISION: u32 = 75497;
fn relative_eq(lhs: @FP8x23, rhs: @FP8x23) -> bool |
{
let diff = *lhs - *rhs;
let rel_diff = if *lhs.mag != 0 {
(diff / *lhs).mag
} else {
diff.mag
};
rel_diff <= PRECISION
}
fn tensor_eq(mut lhs: Tensor<FP8x23>, mut rhs: Tensor<FP8x23>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0
&& is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0
&& is_eq {
is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
}
fn tensor_i8_to_tensor_fp8x23(x: @Tensor<i8>) -> Tensor<FP8x23> {
let mut result_data = ArrayTrait::<FP8x23>::new();
let mut data = *x.data;
while data.len() != 0 {
result_data.append((*data.pop_front().unwrap()).into());
};
TensorTrait::new(*x.shape, result_data.span())
} |
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait, FP8x23W};
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
use orion::numbers::fixed_point::implementations::fp8x23wide::math::trig::PI;
use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23;
impl FP8x23WTensor of TensorTrait<FP8x23W> {
fn new(shape: Span<usize>, data: Span<FP8x23W>) -> Tensor<FP8x23W> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: FP8x23W) -> Tensor<FP8x23W> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<FP8x23W>, indices: Span<usize>) -> FP8x23W {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<FP8x23W>) -> FP8x23W {
math::min_in_tensor::min_in_tensor::<FP8x23W, u64>(*self.data)
}
fn min(tensors: Span<Tensor<FP8x23W>>) -> Tensor<FP8x23W> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<FP8x23W>) -> FP8x23W {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<FP8x23W>>) -> Tensor<FP8x23W> {
math::max::max(tensors)
}
fn |
stride(self: @Tensor<FP8x23W>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<FP8x23W>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<FP8x23W>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<FP8x23W>, target_shape: Span<i32>, allowzero: bool) -> Tensor<FP8x23W> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<FP8x23W>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23W> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<FP8x23W>, axis: usize, keepdims: bool) -> Tensor<FP8x23W> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<FP8x23W>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<FP8x23W>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<FP8x23W>, axes: Span<usize>) -> Tensor<FP8x23W> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::exp::exp(*self)
}
fn log(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::log::log(*self)
}
fn equal(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<usize> {
math::greater:: |
greater(self, other)
}
fn greater_equal(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::ceil::ceil(*self)
}
fn sin(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<FP8x23W>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<FP8x23W> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<FP8x23W>, axis: usize) -> Tensor<FP8x23W> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::atan::atan(*self)
}
fn xor(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(s |
elf: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<FP8x23W>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<FP8x23W> {
panic(array!['not supported!'])
}
fn sqrt(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<FP8x23W>>, axis: usize,) -> Tensor<FP8x23W> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<FP8x23W>, y_scale: @Tensor<FP8x23W>, y_zero_point: @Tensor<FP8x23W>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<FP8x23W>, x_zero_point: @Tensor<FP8x23W>
) -> Tensor::<FP8x23W> {
panic(array!['not supported!'])
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<FP8x23W>,
a_zero_point: @Tensor<FP8x23W>,
b: @Tensor<i8>,
b_scale: @Tensor<FP8x23W>,
b_zero_point: @Tensor<FP8x23W>,
y_scale: @Tensor<FP8x23W>,
y_zero_point: @Tensor<FP8x23W>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<FP8x23W>,
a_zero_point: @Tensor<FP8x23W>,
b: @Tensor<i8>,
b_scale: @Tensor<FP8x23W>,
b_zero_point: @Tensor<FP8x23W>,
y_scale: @Tensor<FP8x23W>,
y_zero_point: @Tensor<FP8x23W>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<FP8x23W>,
a_zero_point: @Tensor<FP8x23W>,
b: @Tensor<i |
8>,
b_scale: @Tensor<FP8x23W>,
b_zero_point: @Tensor<FP8x23W>,
y_scale: @Tensor<FP8x23W>,
y_zero_point: @Tensor<FP8x23W>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<FP8x23W>>,
zero_points: Span<Tensor<FP8x23W>>,
y_scale: @Tensor<FP8x23W>,
y_zero_point: @Tensor<FP8x23W>,
axis: usize
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<FP8x23W>, a_zero_point: @Tensor<FP8x23W>, alpha: FP8x23W
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn slice(
self: @Tensor<FP8x23W>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<FP8x23W> {
core_tensor::slice::<FP8x23W>(self, starts, ends, axes, steps)
}
fn gather(
self: @Tensor<FP8x23W>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP8x23W> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<FP8x23W>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<FP8x23W>, axes: Option<Span<usize>>) -> Tensor<FP8x23W> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<FP8x23W>, axes: Span<usize>) -> Tensor<FP8x23W> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<FP8x23W>, min: Option<FP8x23W>, max: Option<FP8x23W>) -> Tensor<FP8x23W> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
core_tensor::identity(self)
}
fn where(self: @Tensor<FP8x23W>, x: |
@Tensor<FP8x23W>, y: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<FP8x23W>, axis: usize, keepdims: bool) -> Tensor<FP8x23W> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn array_feature_extractor(self: @Tensor<FP8x23W>, indices: Tensor<usize>) -> Tensor<FP8x23W> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<FP8x23W>, threshold: Option<FP8x23W>) -> Tensor<FP8x23W> {
math::binarizer::binarizer(*self, threshold)
}
fn reduce_sum_square(self: @Tensor<FP8x23W>, axis: usize, keepdims: bool) -> Tensor<FP8x23W> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<FP8x23W>, axis: usize, keepdims: bool) -> Tensor<FP8x23W> {
math::reduce_l2::reduce_l2(self, axis, keepdims)
}
fn trilu(self: @Tensor<FP8x23W>, upper: bool, k: i64) -> Tensor<FP8x23W> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<FP8x23W>,
updates: Tensor<FP8x23W>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<FP8x23W> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn not(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<FP8x23W>, indice |
s: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP8x23W> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(
self: Tensor<FP8x23W>, bias: Option<FP8x23W>, lambd: Option<FP8x23W>
) -> Tensor<FP8x23W> {
math::shrink::shrink(self, bias, lambd)
}
fn reduce_mean(
self: @Tensor<FP8x23W>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23W> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<FP8x23W>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23W> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<FP8x23W>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<FP8x23W>) -> Tensor<bool> {
math::is_nan::is_nan(self)
}
fn gather_nd(
self: @Tensor<FP8x23W>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<FP8x23W> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<FP8x23W>, axis: usize, keepdims: bool) -> Tensor<FP8x23W> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn reduce_log_sum_exp(self: @Tensor<FP8x23W>, axis: usize, keepdims: bool) -> Tensor<FP8x23W> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::erf::erf(*self)
}
fn unique(
self: @Tensor<FP8x23W>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<FP8x23W>, Tensor<i32>, Tensor<i32>, Tensor<i3 |
2>) {
manipulation::unique::unique(self, axis, sorted)
}
fn layer_normalization(
self: @Tensor<FP8x23W>,
scale: @Tensor<FP8x23W>,
B: Option<@Tensor<FP8x23W>>,
axis: Option<i32>,
epsilon: Option<FP8x23W>,
stash_type: Option<usize>,
) -> (Tensor<FP8x23W>, Tensor<FP8x23W>, Tensor<FP8x23W>) {
math::layer_normalization::layer_normalization(self, scale, B, axis, epsilon, stash_type)
}
fn resize(
self: @Tensor<FP8x23W>,
roi: Option<Tensor<FP8x23W>>,
scales: Option<Span<FP8x23W>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<FP8x23W>,
exclude_outside: Option<bool>,
extrapolation_value: Option<FP8x23W>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<FP8x23W> {
panic(array!['not supported!'])
}
fn compress(
self: @Tensor<FP8x23W>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<FP8x23W> {
math::compress::compress(self, condition, axis)
}
fn split(
self: @Tensor<FP8x23W>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<FP8x23W>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<FP8x23W>, high: Option<FP8x23W>, low: Option<FP8x23W>, seed: Option<usize>
) -> Tensor<FP8x23W> {
math::random_uniform_like::random_uniform_like(*tensor, high, low, seed)
}
fn range(start: FP8x23W, end: FP8x23W, step: FP8x23W) -> Tensor<FP8x23W> {
math::range::range(start, end, step)
}
fn hann_window(size: FP8x23W, periodic: Option<usize>) -> Tensor<FP8x23W> {
math::hann_window::hann_w |
indow(size, FP8x23W { mag: PI, sign: false }, periodic)
}
fn hamming_window(size: FP8x23W, periodic: Option<usize>) -> Tensor<FP8x23W> {
math::hamming_window::hamming_window(size, FP8x23W { mag: PI, sign: false }, periodic)
}
fn blackman_window(size: FP8x23W, periodic: Option<usize>) -> Tensor<FP8x23W> {
math::blackman_window::blackman_window(size, FP8x23W { mag: PI, sign: false }, periodic)
}
fn split_to_sequence(
self: @Tensor<FP8x23W>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<FP8x23W>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<FP8x23W>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<FP8x23W> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<FP8x23W>) -> Option<Tensor<FP8x23W>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<FP8x23W>
) -> (Tensor::<u32>, Tensor::<FP8x23W>, Tensor<FP8x23W>) {
quantization::dynamic_quantize_linear::dynamic_quantize_linear(
self,
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(255, false),
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(1, false),
)
}
fn scatter_nd(
self: @Tensor<FP8x23W>,
updates: Tensor<FP8x23W>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<FP8x23W> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<FP8x23W>,
default_list: Option<Span<FP8x23W>>,
default_tensor: Option<Tensor<FP8x23W>>,
keys: Option<Span<FP8x23W>>,
keys_tensor: Option<Tensor<FP8x23W>>,
values: Option<Span |
<FP8x23W>>,
values_tensor: Option<Tensor<FP8x23W>>
) -> Tensor<FP8x23W> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
impl FP8x23WTensorAdd<
FP8x23W,
impl FP8x23WTensor: TensorTrait<FP8x23W>,
impl TAdd: Add<FP8x23W>,
impl TCopy: Copy<FP8x23W>,
impl TDrop: Drop<FP8x23W>
> of Add<Tensor<FP8x23W>> {
fn add(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::add(@lhs, @rhs)
}
}
impl FP8x23WTensorSub<
FP8x23W,
impl FP8x23WTensor: TensorTrait<FP8x23W>,
impl TSub: Sub<FP8x23W>,
impl TCopy: Copy<FP8x23W>,
impl TDrop: Drop<FP8x23W>
> of Sub<Tensor<FP8x23W>> {
fn sub(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::sub(@lhs, @rhs)
}
}
impl FP8x23WTensorMul<
FP8x23W,
impl FP8x23WTensor: TensorTrait<FP8x23W>,
impl TMul: Mul<FP8x23W>,
impl TCopy: Copy<FP8x23W>,
impl TDrop: Drop<FP8x23W>
> of Mul<Tensor<FP8x23W>> {
fn mul(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::mul(@lhs, @rhs)
}
}
impl FP8x23WTensorDiv<
FP8x23W,
impl FP8x23WTensor: TensorTrait<FP8x23W>,
impl TDiv: Div<FP8x23W>,
impl TCopy: Copy<FP8x23W>,
impl TDrop: Drop<FP8x23W>
> of Div<Tensor<FP8x23W>> {
fn div(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::arithmetic::div(@lhs, @rhs)
}
}
impl FP8x23WTensorPartialEq of PartialEq<Tensor<FP8x23W>> {
fn eq(lhs: @Tensor<FP8x23W>, rhs: @Tensor<FP8x23W>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<FP8x23W>, rhs: @Tensor<FP8x23W>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl U32TryIntoU32 of TryInto<u64, u64> {
fn try_into(self: u64) - |
> Option<u64> {
Option::Some(self)
}
}
impl FP8x23WTensorPartialOrd of PartialOrd<Tensor<FP8x23W>> {
fn ge(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
fn gt(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
fn le(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
fn lt(lhs: Tensor<FP8x23W>, rhs: Tensor<FP8x23W>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
const PRECISION: u64 = 75497;
fn relative_eq(lhs: @FP8x23W, rhs: @FP8x23W) -> bool {
let diff = *lhs - *rhs;
let rel_diff = if *lhs.mag != 0 {
(diff / *lhs).mag
} else {
diff.mag
};
rel_diff <= PRECISION
}
fn tensor_eq(mut lhs: Tensor<FP8x23W>, mut rhs: Tensor<FP8x23W>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0 && is_eq {
is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
} |
use orion::numbers::{I32Div, I32DivEq};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait};
use orion::operators::tensor::implementations::{
tensor_u32::U32Tensor, tensor_i8::I8Tensor, tensor_bool::BoolTensor
};
impl I32Tensor of TensorTrait<i32> {
fn new(shape: Span<usize>, data: Span<i32>) -> Tensor<i32> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: i32) -> Tensor<i32> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<i32>, indices: Span<usize>) -> i32 {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<i32>) -> i32 {
math::min_in_tensor::min_in_tensor::<i32>(*self.data)
}
fn min(tensors: Span<Tensor<i32>>) -> Tensor<i32> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<i32>) -> i32 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<i32>>) -> Tensor<i32> {
math::max::max(tensors)
}
fn stride(self: @Tensor<i32>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<i32>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<i32>, i |
ndex: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<i32>, target_shape: Span<i32>, allowzero: bool) -> Tensor<i32> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<i32>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i32> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<i32>, axis: usize, keepdims: bool) -> Tensor<i32> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<i32>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<i32>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<i32>, axes: Span<usize>) -> Tensor<i32> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<i32> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn log(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn equal(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tenso |
r<i32>, other: @Tensor<i32>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<i32>) -> Tensor<i32> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<i32>) -> Tensor<i32> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn sin(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn cos(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn asin(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn cumsum(
self: @Tensor<i32>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<i32> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<i32>, axis: usize) -> Tensor<i32> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn tanh(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn cosh(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn acosh(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn asinh(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn atan(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn xor(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn onehot(
self: @Tensor<i32>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn sqrt(self: |
@Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn concat(tensors: Span<Tensor<i32>>, axis: usize,) -> Tensor<i32> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<i32>, y_scale: @Tensor<i32>, y_zero_point: @Tensor<i32>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(self, y_scale, y_zero_point, -127, 127)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<i32>, x_zero_point: @Tensor<i32>
) -> Tensor::<i32> {
quantization::dequantize_linear::dequantize_linear(self, x_scale, x_zero_point)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<i32>,
a_zero_point: @Tensor<i32>,
b: @Tensor<i8>,
b_scale: @Tensor<i32>,
b_zero_point: @Tensor<i32>,
y_scale: @Tensor<i32>,
y_zero_point: @Tensor<i32>
) -> Tensor::<i8> {
quantization::qlinear_add::qlinear_add(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<i32>,
a_zero_point: @Tensor<i32>,
b: @Tensor<i8>,
b_scale: @Tensor<i32>,
b_zero_point: @Tensor<i32>,
y_scale: @Tensor<i32>,
y_zero_point: @Tensor<i32>
) -> Tensor::<i8> {
quantization::qlinear_mul::qlinear_mul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<i32>,
a_zero_point: @Tensor<i32>,
b |
: @Tensor<i8>,
b_scale: @Tensor<i32>,
b_zero_point: @Tensor<i32>,
y_scale: @Tensor<i32>,
y_zero_point: @Tensor<i32>
) -> Tensor::<i8> {
quantization::qlinear_matmul::qlinear_matmul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<i32>>,
zero_points: Span<Tensor<i32>>,
y_scale: @Tensor<i32>,
y_zero_point: @Tensor<i32>,
axis: usize
) -> Tensor::<i8> {
quantization::qlinear_concat::qlinear_concat(
tensors,
scales,
zero_points,
y_scale,
y_zero_point,
axis,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<i32>, a_zero_point: @Tensor<i32>, alpha: i32
) -> Tensor::<i8> {
quantization::qlinear_leakyrelu::qlinear_leakyrelu(
self,
a_scale,
a_zero_point,
alpha,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn slice(
self: @Tensor<i32>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<i32> {
core_tensor::slice::<i32>(self, starts, ends, axes, steps)
}
fn gather(self: @Tensor<i32>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<i32> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<i32>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<i32>, axes: Option<Span<usize>>) -> Tensor<i32 |
> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<i32>, axes: Span<usize>) -> Tensor<i32> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<i32>) -> Tensor<i32> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<i32>, min: Option<i32>, max: Option<i32>) -> Tensor<i32> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<i32>) -> Tensor<i32> {
core_tensor::identity(self)
}
fn where(self: @Tensor<i32>, x: @Tensor<i32>, y: @Tensor<i32>) -> Tensor<i32> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<i32> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<i32> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<i32> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<i32>) -> Tensor<i32> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<i32>, axis: usize, keepdims: bool) -> Tensor<i32> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn trilu(self: @Tensor<i32>, upper: bool, k: i64) -> Tensor<i32> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<i32>,
updates: Tensor<i32>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<i32> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn array_feature_extractor(self: @Tensor<i32>, indices: Tensor<usize>) -> Tensor<i32> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<i32>, threshold: Option<i32>) -> Tensor<i32> {
panic(array! |
['not supported!'])
}
fn reduce_sum_square(self: @Tensor<i32>, axis: usize, keepdims: bool) -> Tensor<i32> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<i32>, axis: usize, keepdims: bool) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn not(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<i32>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<i32> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(self: Tensor<i32>, bias: Option<i32>, lambd: Option<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn reduce_mean(
self: @Tensor<i32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i32> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<i32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i32> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn is_inf(
self: @Tensor<i32>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<i32>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn gather_nd(
self: @Tensor<i32>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<i32> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<i32>, axis: usize, keepdims: bool) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn reduce_log_sum_ |
exp(self: @Tensor<i32>, axis: usize, keepdims: bool) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn unique(
self: @Tensor<i32>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<i32>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn resize(
self: @Tensor<i32>,
roi: Option<Tensor<i32>>,
scales: Option<Span<i32>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<i32>,
exclude_outside: Option<bool>,
extrapolation_value: Option<i32>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn compress(self: @Tensor<i32>, condition: Tensor<usize>, axis: Option<usize>) -> Tensor<i32> {
math::compress::compress(self, condition, axis)
}
fn layer_normalization(
self: @Tensor<i32>,
scale: @Tensor<i32>,
B: Option<@Tensor<i32>>,
axis: Option<i32>,
epsilon: Option<i32>,
stash_type: Option<usize>,
) -> (Tensor<i32>, Tensor<i32>, Tensor<i32>) {
panic(array!['not supported!'])
}
fn split(
self: @Tensor<i32>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<i32>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<i32>, high: Option<i32>, low: Option<i32>, seed: Option<usize>
) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn range(start: i32, end: i32, step: i32) -> Tensor<i32> {
math::range::ra |
nge(start, end, step)
}
fn hann_window(size: i32, periodic: Option<usize>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn hamming_window(size: i32, periodic: Option<usize>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn blackman_window(size: i32, periodic: Option<usize>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn split_to_sequence(
self: @Tensor<i32>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<i32>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<i32>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<i32> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<i32>) -> Option<Tensor<i32>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(self: @Tensor<i32>) -> (Tensor::<u32>, Tensor::<i32>, Tensor<i32>) {
panic(array!['not supported!'])
}
fn scatter_nd(
self: @Tensor<i32>, updates: Tensor<i32>, indices: Tensor<usize>, reduction: Option<usize>
) -> Tensor<i32> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<i32>,
default_list: Option<Span<i32>>,
default_tensor: Option<Tensor<i32>>,
keys: Option<Span<i32>>,
keys_tensor: Option<Tensor<i32>>,
values: Option<Span<i32>>,
values_tensor: Option<Tensor<i32>>
) -> Tensor<i32> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
impl I32TensorAdd of Add<Tensor<i32>> {
fn add(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::add(@lhs, @r |
hs)
}
}
impl I32TensorSub of Sub<Tensor<i32>> {
fn sub(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::sub(@lhs, @rhs)
}
}
impl I32TensorMul of Mul<Tensor<i32>> {
fn mul(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::mul(@lhs, @rhs)
}
}
impl I32TensorDiv of Div<Tensor<i32>> {
fn div(lhs: Tensor<i32>, rhs: Tensor<i32>) -> Tensor<i32> {
math::arithmetic::div(@lhs, @rhs)
}
}
impl I32TensorPartialEq of PartialEq<Tensor<i32>> {
fn eq(lhs: @Tensor<i32>, rhs: @Tensor<i32>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<i32>, rhs: @Tensor<i32>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl I8TryIntoI8 of TryInto<i32, i32> {
fn try_into(self: i32) -> Option<i32> {
Option::Some(self)
}
}
impl TensorI8IntoTensorI32 of Into<Tensor<i8>, Tensor<i32>> {
fn into(self: Tensor<i8>) -> Tensor<i32> {
tensor_i8_to_tensor_i32(@self)
}
}
impl I32TensorPartialOrd of PartialOrd<Tensor<i32>> {
fn ge(lhs: Tensor<i32>, rhs: Tensor<i32>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
fn gt(lhs: Tensor<i32>, rhs: Tensor<i32>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
fn le(lhs: Tensor<i32>, rhs: Tensor<i32>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
fn lt(lhs: Tensor<i32>, rhs: Tensor<i32>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
fn tensor_eq(mut lhs: Tensor<i32>, mut rhs: Tensor<i32>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0 && is_eq {
is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap();
};
i |
s_eq
}
fn tensor_i8_to_tensor_i32(x: @Tensor<i8>) -> Tensor<i32> {
let mut result_data = ArrayTrait::<i32>::new();
let mut data = *x.data;
while data.len() != 0 {
result_data.append((*data.pop_front().unwrap()).into());
};
TensorTrait::new(*x.shape, result_data.span())
} |
use orion::numbers::{I8Div, I8DivEq};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait};
use orion::operators::tensor::implementations::{tensor_u32::U32Tensor, tensor_bool::BoolTensor};
impl I8Tensor of TensorTrait<i8> {
fn new(shape: Span<usize>, data: Span<i8>) -> Tensor<i8> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: i8) -> Tensor<i8> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<i8>, indices: Span<usize>) -> i8 {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<i8>) -> i8 {
math::min_in_tensor::min_in_tensor::<i8>(*self.data)
}
fn min(tensors: Span<Tensor<i8>>) -> Tensor<i8> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<i8>) -> i8 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<i8>>) -> Tensor<i8> {
math::max::max(tensors)
}
fn stride(self: @Tensor<i8>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<i8>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<i8>, index: usize) -> Span<usize> {
unravel_index(index, *s |
elf.shape)
}
fn reshape(self: @Tensor<i8>, target_shape: Span<i32>, allowzero: bool) -> Tensor<i8> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<i8>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i8> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<i8>, axis: usize, keepdims: bool) -> Tensor<i8> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<i8>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<i8>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<i8>, axes: Span<usize>) -> Tensor<i8> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i8> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn log(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn equal(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i32> {
math::less_equal::less_equal(self, |
other)
}
fn abs(self: @Tensor<i8>) -> Tensor<i8> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<i8>) -> Tensor<i8> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn sin(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn cos(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn asin(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn cumsum(
self: @Tensor<i8>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<i8> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<i8>, axis: usize) -> Tensor<i8> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn tanh(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn cosh(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn acosh(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn asinh(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn atan(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn xor(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn onehot(
self: @Tensor<i8>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn sqrt(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn concat(tensors: Span<Tensor<i8>>, axis: u |
size,) -> Tensor<i8> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<i8>, y_scale: @Tensor<i8>, y_zero_point: @Tensor<i8>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(-127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<i8>, x_zero_point: @Tensor<i8>
) -> Tensor::<i8> {
quantization::dequantize_linear::dequantize_linear(self, x_scale, x_zero_point)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<i8>,
a_zero_point: @Tensor<i8>,
b: @Tensor<i8>,
b_scale: @Tensor<i8>,
b_zero_point: @Tensor<i8>,
y_scale: @Tensor<i8>,
y_zero_point: @Tensor<i8>
) -> Tensor::<i8> {
quantization::qlinear_add::qlinear_add(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(-127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<i8>,
a_zero_point: @Tensor<i8>,
b: @Tensor<i8>,
b_scale: @Tensor<i8>,
b_zero_point: @Tensor<i8>,
y_scale: @Tensor<i8>,
y_zero_point: @Tensor<i8>
) -> Tensor::<i8> {
quantization::qlinear_mul::qlinear_mul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(-127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<i8>,
a_zero_point: @Tensor<i8>,
b: @Tensor< |
i8>,
b_scale: @Tensor<i8>,
b_zero_point: @Tensor<i8>,
y_scale: @Tensor<i8>,
y_zero_point: @Tensor<i8>
) -> Tensor::<i8> {
quantization::qlinear_matmul::qlinear_matmul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(-127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<i8>>,
zero_points: Span<Tensor<i8>>,
y_scale: @Tensor<i8>,
y_zero_point: @Tensor<i8>,
axis: usize
) -> Tensor::<i8> {
quantization::qlinear_concat::qlinear_concat(
tensors,
scales,
zero_points,
y_scale,
y_zero_point,
axis,
NumberTrait::new_unscaled(-127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<i8>, a_zero_point: @Tensor<i8>, alpha: i8
) -> Tensor::<i8> {
quantization::qlinear_leakyrelu::qlinear_leakyrelu(
self,
a_scale,
a_zero_point,
alpha,
NumberTrait::new_unscaled(-127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn slice(
self: @Tensor<i8>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<i8> {
core_tensor::slice::<i8>(self, starts, ends, axes, steps)
}
fn gather(self: @Tensor<i8>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<i8> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<i8>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<i8>, axes: Option<Span<usize>>) -> Tensor<i8> {
core_tensor::s |
queeze(self, axes)
}
fn unsqueeze(self: @Tensor<i8>, axes: Span<usize>) -> Tensor<i8> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<i8>) -> Tensor<i8> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<i8>, min: Option<i8>, max: Option<i8>) -> Tensor<i8> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<i8>) -> Tensor<i8> {
core_tensor::identity(self)
}
fn where(self: @Tensor<i8>, x: @Tensor<i8>, y: @Tensor<i8>) -> Tensor<i8> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i8> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i8> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i8> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<i8>) -> Tensor<i8> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<i8>, axis: usize, keepdims: bool) -> Tensor<i8> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn trilu(self: @Tensor<i8>, upper: bool, k: i64) -> Tensor<i8> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<i8>,
updates: Tensor<i8>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<i8> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn array_feature_extractor(self: @Tensor<i8>, indices: Tensor<usize>) -> Tensor<i8> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<i8>, threshold: Option<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn reduce_sum_square(self: @Tens |
or<i8>, axis: usize, keepdims: bool) -> Tensor<i8> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<i8>, axis: usize, keepdims: bool) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn not(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<i8>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<i8> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(self: Tensor<i8>, bias: Option<i8>, lambd: Option<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn reduce_mean(
self: @Tensor<i8>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i8> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<i8>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i8> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn is_inf(
self: @Tensor<i8>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<i8>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn gather_nd(
self: @Tensor<i8>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<i8> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<i8>, axis: usize, keepdims: bool) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn reduce_log_sum_exp(self: @Tensor<i8>, axis: usize, keepdims: bool) -> Tensor<i8> {
panic(array! |
['not supported'])
}
fn erf(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn unique(
self: @Tensor<i8>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<i8>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn resize(
self: @Tensor<i8>,
roi: Option<Tensor<i8>>,
scales: Option<Span<i8>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<i8>,
exclude_outside: Option<bool>,
extrapolation_value: Option<i8>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn compress(self: @Tensor<i8>, condition: Tensor<usize>, axis: Option<usize>) -> Tensor<i8> {
math::compress::compress(self, condition, axis)
}
fn layer_normalization(
self: @Tensor<i8>,
scale: @Tensor<i8>,
B: Option<@Tensor<i8>>,
axis: Option<i32>,
epsilon: Option<i8>,
stash_type: Option<usize>,
) -> (Tensor<i8>, Tensor<i8>, Tensor<i8>) {
panic(array!['not supported!'])
}
fn split(
self: @Tensor<i8>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<i8>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<i8>, high: Option<i8>, low: Option<i8>, seed: Option<usize>
) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn range(start: i8, end: i8, step: i8) -> Tensor<i8> {
math::range::range(start, end, step)
}
fn hann_window(size: i8, periodic: Option<usize>) -> Tensor<i8> {
panic(array![ |
'not supported!'])
}
fn hamming_window(size: i8, periodic: Option<usize>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn blackman_window(size: i8, periodic: Option<usize>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn split_to_sequence(
self: @Tensor<i8>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<i8>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<i8>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<i8> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<i8>) -> Option<Tensor<i8>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(self: @Tensor<i8>) -> (Tensor::<u32>, Tensor::<i8>, Tensor<i8>) {
panic(array!['not supported!'])
}
fn scatter_nd(
self: @Tensor<i8>, updates: Tensor<i8>, indices: Tensor<usize>, reduction: Option<usize>
) -> Tensor<i8> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<i8>,
default_list: Option<Span<i8>>,
default_tensor: Option<Tensor<i8>>,
keys: Option<Span<i8>>,
keys_tensor: Option<Tensor<i8>>,
values: Option<Span<i8>>,
values_tensor: Option<Tensor<i8>>
) -> Tensor<i8> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
impl I8TensorAdd of Add<Tensor<i8>> {
fn add(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::add(@lhs, @rhs)
}
}
impl I8TensorSub of Sub<Tensor<i8>> {
fn sub(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> |
{
math::arithmetic::sub(@lhs, @rhs)
}
}
impl I8TensorMul of Mul<Tensor<i8>> {
fn mul(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::mul(@lhs, @rhs)
}
}
impl I8TensorDiv of Div<Tensor<i8>> {
fn div(lhs: Tensor<i8>, rhs: Tensor<i8>) -> Tensor<i8> {
math::arithmetic::div(@lhs, @rhs)
}
}
impl I8TensorPartialEq of PartialEq<Tensor<i8>> {
fn eq(lhs: @Tensor<i8>, rhs: @Tensor<i8>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<i8>, rhs: @Tensor<i8>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl I8TensorPartialOrd of PartialOrd<Tensor<i8>> {
fn ge(lhs: Tensor<i8>, rhs: Tensor<i8>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
fn gt(lhs: Tensor<i8>, rhs: Tensor<i8>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
fn le(lhs: Tensor<i8>, rhs: Tensor<i8>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
fn lt(lhs: Tensor<i8>, rhs: Tensor<i8>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
fn tensor_eq(mut lhs: Tensor<i8>, mut rhs: Tensor<i8>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() == 0 && !is_eq {
is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap();
};
is_eq
} |
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait};
use orion::operators::tensor::implementations::{tensor_i8::I8Tensor, tensor_bool::BoolTensor};
impl U32Tensor of TensorTrait<u32> {
fn new(shape: Span<usize>, data: Span<u32>) -> Tensor<u32> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: u32) -> Tensor<u32> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<u32>, indices: Span<usize>) -> u32 {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<u32>) -> u32 {
math::min_in_tensor::min_in_tensor::<u32, u32>(*self.data)
}
fn min(tensors: Span<Tensor<u32>>) -> Tensor<u32> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<u32>) -> u32 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<u32>>) -> Tensor<u32> {
math::max::max(tensors)
}
fn stride(self: @Tensor<u32>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<u32>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<u32>, index: usize) -> Span<usize> {
unravel_index(index, *self |
.shape)
}
fn reshape(self: @Tensor<u32>, target_shape: Span<i32>, allowzero: bool) -> Tensor<u32> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<u32>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<u32> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<u32>, axis: usize, keepdims: bool) -> Tensor<u32> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<u32>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<u32>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<u32>, axes: Span<usize>) -> Tensor<u32> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<u32> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn log(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn equal(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<i32> {
math::less_ |
equal::less_equal(self, other)
}
fn abs(self: @Tensor<u32>) -> Tensor<u32> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<u32>) -> Tensor<u32> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn sin(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn cos(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn asin(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn cumsum(
self: @Tensor<u32>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<u32> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<u32>, axis: usize) -> Tensor<u32> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn tanh(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn cosh(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn acosh(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn asinh(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn atan(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn xor(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn onehot(
self: @Tensor<u32>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn sqrt(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not support |
ed!'])
}
fn concat(tensors: Span<Tensor<u32>>, axis: usize,) -> Tensor<u32> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<u32>, y_scale: @Tensor<u32>, y_zero_point: @Tensor<u32>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<u32>, x_zero_point: @Tensor<u32>
) -> Tensor::<u32> {
panic(array!['not supported!'])
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<u32>,
a_zero_point: @Tensor<u32>,
b: @Tensor<i8>,
b_scale: @Tensor<u32>,
b_zero_point: @Tensor<u32>,
y_scale: @Tensor<u32>,
y_zero_point: @Tensor<u32>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<u32>,
a_zero_point: @Tensor<u32>,
b: @Tensor<i8>,
b_scale: @Tensor<u32>,
b_zero_point: @Tensor<u32>,
y_scale: @Tensor<u32>,
y_zero_point: @Tensor<u32>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<u32>,
a_zero_point: @Tensor<u32>,
b: @Tensor<i8>,
b_scale: @Tensor<u32>,
b_zero_point: @Tensor<u32>,
y_scale: @Tensor<u32>,
y_zero_point: @Tensor<u32>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<u32>>,
zero_points: Span<Tensor<u32>>,
y_scale: @Tensor<u32>,
y_zero_point: @Tensor<u32>,
axis: usize,
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<u32>, a_zero_point: @Tensor<u32>, alpha: u32
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn slice(
self: @Tensor<u32>,
start |
s: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<u32> {
core_tensor::slice::<u32>(self, starts, ends, axes, steps)
}
fn gather(self: @Tensor<u32>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<u32> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<u32>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<u32>, axes: Option<Span<usize>>) -> Tensor<u32> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<u32>, axes: Span<usize>) -> Tensor<u32> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn clip(self: @Tensor<u32>, min: Option<u32>, max: Option<u32>) -> Tensor<u32> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<u32>) -> Tensor<u32> {
core_tensor::identity(self)
}
fn where(self: @Tensor<u32>, x: @Tensor<u32>, y: @Tensor<u32>) -> Tensor<u32> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<u32> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<u32> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<u32> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<u32>) -> Tensor<u32> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<u32>, axis: usize, keepdims: bool) -> Tensor<u32> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn trilu(self: @Tensor<u32>, upper: bool, k: i64) -> Tensor<u32> {
linalg::trilu::trilu(self, upper, k)
}
fn |
scatter(
self: @Tensor<u32>,
updates: Tensor<u32>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<u32> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn array_feature_extractor(self: @Tensor<u32>, indices: Tensor<usize>) -> Tensor<u32> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<u32>, threshold: Option<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn reduce_sum_square(self: @Tensor<u32>, axis: usize, keepdims: bool) -> Tensor<u32> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<u32>, axis: usize, keepdims: bool) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn not(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<u32>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<u32> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(self: Tensor<u32>, bias: Option<u32>, lambd: Option<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn reduce_mean(
self: @Tensor<u32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<u32> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<u32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<u32> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<u32>, other: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn is_inf(
self: @Tensor<u32>, detect_negative: Option<u8>, detect_positive: Option<u8>
) |
-> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<u32>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn gather_nd(
self: @Tensor<u32>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<u32> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<u32>, axis: usize, keepdims: bool) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn reduce_log_sum_exp(self: @Tensor<u32>, axis: usize, keepdims: bool) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn unique(
self: @Tensor<u32>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<u32>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn resize(
self: @Tensor<u32>,
roi: Option<Tensor<u32>>,
scales: Option<Span<u32>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<u32>,
exclude_outside: Option<bool>,
extrapolation_value: Option<u32>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn compress(self: @Tensor<u32>, condition: Tensor<usize>, axis: Option<usize>) -> Tensor<u32> {
math::compress::compress(self, condition, axis)
}
fn layer_normalization(
self: @Tensor<u32>,
scale: @Tensor<u32>,
B: Option<@Tensor<u32>>,
axis: Option<i32>,
epsilon: Option<u32>,
stash_type: Option<usize>,
) -> (Tensor<u32>, Tensor<u32>, Tensor< |
u32>) {
panic(array!['not supported!'])
}
fn split(
self: @Tensor<u32>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<u32>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<u32>, high: Option<u32>, low: Option<u32>, seed: Option<usize>
) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn range(start: u32, end: u32, step: u32) -> Tensor<u32> {
math::range::range(start, end, step)
}
fn hann_window(size: u32, periodic: Option<usize>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn hamming_window(size: u32, periodic: Option<usize>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn blackman_window(size: u32, periodic: Option<usize>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn split_to_sequence(
self: @Tensor<u32>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<u32>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<u32>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<u32> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<u32>) -> Option<Tensor<u32>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(self: @Tensor<u32>) -> (Tensor::<u32>, Tensor::<u32>, Tensor<u32>) {
panic(array!['not supported!'])
}
fn scatter_nd(
self: @Tensor<u32>, updates: Tensor<u32>, indices: Tensor<usize>, reduction: Option<usize>
) -> Tensor<u32> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<u32>,
default_list: Option<Span<u32>>,
default_ |
tensor: Option<Tensor<u32>>,
keys: Option<Span<u32>>,
keys_tensor: Option<Tensor<u32>>,
values: Option<Span<u32>>,
values_tensor: Option<Tensor<u32>>
) -> Tensor<u32> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
impl U32TensorAdd of Add<Tensor<u32>> {
fn add(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::add(@lhs, @rhs)
}
}
impl U32TensorSub of Sub<Tensor<u32>> {
fn sub(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::sub(@lhs, @rhs)
}
}
impl U32TensorMul of Mul<Tensor<u32>> {
fn mul(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::mul(@lhs, @rhs)
}
}
impl U32TensorDiv of Div<Tensor<u32>> {
fn div(lhs: Tensor<u32>, rhs: Tensor<u32>) -> Tensor<u32> {
math::arithmetic::div(@lhs, @rhs)
}
}
impl U32TensorPartialEq of PartialEq<Tensor<u32>> {
fn eq(lhs: @Tensor<u32>, rhs: @Tensor<u32>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<u32>, rhs: @Tensor<u32>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl U32TryIntoI8 of TryInto<u32, i8> {
fn try_into(self: u32) -> Option<i8> {
let number_felt: felt252 = self.into();
let number_i8: i8 = number_felt.try_into().unwrap();
Option::Some(number_i8)
}
}
impl U32TensorPartialOrd of PartialOrd<Tensor<u32>> {
fn ge(lhs: Tensor<u32>, rhs: Tensor<u32>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
fn gt(lhs: Tensor<u32>, rhs: Tensor<u32>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
fn le(lhs: Tensor<u32>, rhs: Tensor<u32>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
fn lt(lhs: Tensor<u3 |
2>, rhs: Tensor<u32>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
fn tensor_eq(mut lhs: Tensor<u32>, mut rhs: Tensor<u32>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0 && is_eq {
is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap();
};
is_eq
} |
mod matmul;
mod transpose;
mod trilu;
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn matmul<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, other: @Tensor<T>
) -> Tensor<T> {
let self_shape = *self.shape;
let other_shape = *other.shape;
let self_ndim = (self_shape).len();
let other_ndim = (other_shape).len();
assert(self_ndim <= 2 || other_ndim <= 2, 'supports only 1D and 2D matmul');
if self_ndim == 1 && other_ndim == 1 {
let dot = dot_product((*self).data, (*other).data);
let mut result_shape = ArrayTrait::new();
let mut result_data = ArrayTrait::new();
result_shape.append(1);
result_data.append(dot);
return TensorTrait::new(result_shape.span(), result_data.span());
}
let self_shape = prepare_shape_for_matmul(self_shape, true);
let other_shape = prepare_shape_for_matmul(other_shape, false);
let result = matrix_multiply(*self.data, self_shape, *other.data, other_shape);
let result_shape = adjust_output_shape_after_matmul(result.shape, self_ndim, other_ndim);
TensorTrait::new(result_shape, result.data)
}
fn dot_product<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut vec1: Span<T>, mut vec2: Span<T>
) -> T {
assert(vec1.len() == vec2.len(), 'vector lengths do not match');
let mut result: T = NumberTrait::zero();
loop {
match vec1.pop_front() {
Option::Some(vec1_item) => {
let element_product = *vec1_item * *vec2.pop_front().unwrap();
result += element_product;
},
Option::None => { break; }
};
};
result
}
fn matrix_multiply<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TN |
umber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mat1: Span<T>, mat1_shape: Span<usize>, mat2: Span<T>, mat2_shape: Span<usize>
) -> Tensor<T> {
let m = *mat1_shape[0];
let n = *mat1_shape[1];
let p = *mat2_shape[1];
let mut result_data: Array<T> = array![];
let mut result_shape: Array<usize> = array![m, p];
let mut i = 0_usize;
while i != m {
let mut j = 0_usize;
while j != p {
let mut sum: T = NumberTrait::zero();
let mut k = 0_usize;
while k != n {
let mat1_index = i * n + k;
let mat2_index = k * p + j;
sum += *mat1[mat1_index] * *mat2[mat2_index];
k += 1;
};
result_data.append(sum);
j += 1;
};
i += 1;
};
TensorTrait::new(result_shape.span(), result_data.span())
}
fn prepare_shape_for_matmul(mut shape: Span<usize>, is_first_tensor: bool) -> Span<usize> {
let ndim = shape.len();
if ndim == 1 && is_first_tensor {
let mut shape_adjusted = ArrayTrait::new();
shape_adjusted.append(1);
loop {
match shape.pop_front() {
Option::Some(item) => { shape_adjusted.append(*item); },
Option::None => { break; }
};
};
return shape_adjusted.span();
} else if ndim == 1 && !is_first_tensor {
let mut shape_adjusted = ArrayTrait::new();
loop {
match shape.pop_front() {
Option::Some(item) => { shape_adjusted.append(*item) },
Option::None => { break; }
};
};
shape_adjusted.append(1);
return shape_adjusted.span();
}
shape
}
fn adjust_output_shape_after_matmul(
mut output_shape: Span<usize>, self_dim: usize, other_dim: usize
) -> Span<usize> {
if self_dim == 1 { |
let _ = output_shape.pop_front().unwrap();
}
if other_dim == 1 {
let _ = output_shape.pop_back().unwrap();
}
output_shape
} |
use orion::operators::tensor::core::{
new_tensor, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape
};
use orion::operators::tensor::helpers::{len_from_shape, find_axis, permutation_output_shape};
use orion::numbers::NumberTrait;
fn transpose<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: @Tensor<T>, axes: Span<usize>
) -> Tensor<T> {
if (*self.shape).len() == 1 {
return self.identity();
}
assert(axes.len() == (*self.shape).len(), 'shape and axes length unequal');
if (*self.shape).len() == 2 {
return transpose2D(@(*self));
}
let output_shape = permutation_output_shape(*self.shape, axes);
let output_data_len = len_from_shape(output_shape);
let mut output_data: Array<T> = array![];
let mut output_index: usize = 0;
while output_index != output_data_len {
let output_indices = unravel_index(output_index, output_shape);
let mut input_indices: Array<u32> = array![];
let mut output_axis: usize = 0;
while output_axis != axes.len() {
let input_axis = find_axis(axes, output_axis);
input_indices.append(*output_indices[input_axis]);
output_axis += 1;
};
let input_index = ravel_index(*self.shape, input_indices.span());
output_data.append(*(*self.data)[input_index]);
output_index += 1;
};
TensorTrait::new(output_shape, output_data.span())
}
fn transpose2D<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: @Tensor<T>
) -> Tensor<T> {
assert((*self.shape).len() == 2, 'transpose a 2D tensor');
let mut output_data: Array<T> = array![];
let n = *self.shape[0];
let m = *self.shape[1];
let mut output_shape: Array<u32> = array![m, n];
let mut j: usize = 0;
while j != m {
let mut i = 0;
while i != n {
output_data.append(*(*self.data)[i * m + j]);
i += 1;
};
j += 1;
};
Tensor |
Trait::new(output_shape.span(), output_data.span())
} |
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::NumberTrait;
fn trilu<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, upper: bool, k: i64
) -> Tensor<T> {
assert((*self.shape).len() >= 2, 'must have at least 2 dimensions');
let shape_len = (*self.shape).len();
let mut output_data: Array<T> = array![];
let mut output_size: Array<u32> = array![];
let mut batch_size = 1;
let mut n: u32 = 0;
let mut m: u32 = 0;
let mut self_shape = *self.shape;
let mut i = 0;
loop {
match self_shape.pop_front() {
Option::Some(val) => {
if i == shape_len - 2 {
n = *val;
} else if i == shape_len - 1 {
m = *val;
} else {
batch_size *= *val;
}
i += 1;
output_size.append(*val);
},
Option::None => { break; }
}
};
let mut self_data = *self.data;
let mut b = 0;
loop {
if b == batch_size {
break ();
}
let mut i = 0;
loop {
if i == n {
break ();
}
let mut j = 0;
loop {
if j == m {
break ();
}
let ii: felt252 = i.into();
let jj: felt252 = j.into();
let iii: i64 = ii.try_into().unwrap();
let jjj: i64 = jj.try_into().unwrap();
let result = match self_data.pop_front() {
Option::Some(val) => {
if (upper && (iii + k <= jjj)) || (!upper && (iii + k >= jjj)) {
*val
} else {
NumberTrait::zero()
}
},
Option::None => { br |
eak; }
};
output_data.append(result);
j += 1;
};
i += 1;
};
b += 1;
};
TensorTrait::new(*self.shape, output_data.span())
} |
mod unique;
mod split;
mod split_to_sequence;
mod reverse_sequence;
mod optional;
|
use orion::operators::tensor::{Tensor, TensorTrait};
/// Cf: TensorTrait::optional docstring
fn optional<T, +Copy<T>, +Drop<T>, impl TOption: OptionTrait<T>>(
self: @Tensor<T>
) -> Option<Tensor<T>> {
Option::Some(*self)
}
|
use orion::operators::tensor::{TensorTrait, Tensor};
fn reverse_sequence<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: @Tensor<T>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<T> {
let shape = *self.shape;
let mut data: Array<T> = array![];
let has_batch_axis: usize = match batch_axis {
Option::Some(value) => {
assert!((value != 0) || (value != 1), "batch_axis must be one of 1 or 0.");
value
},
Option::None => 0,
};
let has_time_axis: usize = match time_axis {
Option::Some(value) => {
assert!((value != 0) || (value != 1), "time_axis must be one of 1 or 0.");
value
},
Option::None => 1,
};
assert!(has_batch_axis != has_time_axis, "batch_axis and time_axis cannot be equal");
assert((*self.data).len() >= 2, 'Tensor of rank r >= 2');
let control: bool = if has_batch_axis == 0 && has_time_axis == 1 {
true
} else {
false
};
let mut index: Array<usize> = reverse_index(*self.shape, sequence_lens, control);
loop {
match index.pop_front() {
Option::Some(ele) => { data.append(*((*self).data).at(ele)); },
Option::None => { break; }
}
};
TensorTrait::<T>::new(shape, data.span())
}
fn reverse_index(shape: Span<usize>, sequence_lens: Tensor<usize>, control: bool) -> Array<usize> {
let x: usize = *shape.at(0);
let y: usize = *shape.at(1);
let mut result: Array<usize> = array![];
if control {
assert!(
sequence_lens.data.len() <= x, "The length of sequence_lens cannot exceed batch_axis"
);
let mut i: usize = 0;
while i != x {
let reverse: usize = (*sequence_lens.data.at(i));
assert!(
reverse <= y && reverse >= 1,
"sequence_lens must be greater than one and less than batch_size"
) |
;
let mut j: usize = reverse - 1;
loop {
if j == 0 {
result.append(i * y + j);
break;
}
result.append(i * y + j);
j -= 1;
};
let current_index_len: usize = (i + 1) * y - 1;
let mut j: usize = result.len();
while j != current_index_len + 1 {
result.append(j);
j += 1;
};
i += 1;
};
} else {
assert!(
sequence_lens.data.len() <= y, "The length of sequence_lens cannot exceed time_axis"
);
let mut tmp = ArrayTrait::<usize>::new();
let mut i: usize = 0;
while i != y {
let reverse: usize = *sequence_lens.data.at(i);
assert!(
reverse <= x && reverse >= 1,
"sequence_lens must be greater than one and less than batch_size"
);
let mut j: usize = reverse - 1;
loop {
if j == 0 {
tmp.append(j * y + i);
break;
}
tmp.append(j * y + i);
j -= 1;
};
let mut j: usize = reverse;
while j != x {
tmp.append(j * y + i);
j += 1;
};
i += 1;
};
let tmp = tmp.span();
let mut i: usize = 0;
while i != x {
let mut j: usize = 0;
while j != y {
result.append((*tmp.at(j * x + i)));
j += 1;
};
i += 1;
};
}
result
} |
use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor};
use orion::operators::matrix::{MutMatrixTrait, MutMatrix, MutMatrixImpl};
fn split<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
self: @Tensor<T>, axis: usize, num_outputs: Option<usize>, split: Option<Tensor<usize>>
) -> Array<Tensor<T>> {
let has_num_outputs = match num_outputs {
Option::Some => true,
Option::None => false,
};
let has_split = match split {
Option::Some => true,
Option::None => false,
};
assert(!(has_num_outputs && has_split), 'split or num_outputs not both.');
assert(has_num_outputs || has_split, 'split or num_outputs not both.');
let mut splited_t: Array<Tensor<T>> = array![];
let rank = (*self).shape.len();
assert(axis < rank, 'axis out of dimensions');
if (has_num_outputs) {
splited_t = split_num_outputs(self, axis, num_outputs.unwrap());
} else {
splited_t = split_has_split(self, axis, split.unwrap());
}
splited_t
}
fn split_num_outputs<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
t: @Tensor<T>, mut axis: usize, num_outputs: usize
) -> Array<Tensor<T>> {
let mut splited_t: Array<Tensor<T>> = array![];
let mut div: usize = 0;
let mut split: Array<usize> = array![];
if (*(*t).shape.at(axis) % num_outputs == 0) {
div = *(*t).shape.at(axis) / num_outputs;
let mut i = 0;
while i != num_outputs {
split.append(div);
i += 1;
};
} else {
div = *(*t).shape.at(axis) / num_outputs + 1;
let mut i = 0;
while i != num_outputs {
split.append(div);
i += 1;
};
match split.pop_front() {
Option::Some(split_last_one) => {
split.append(split_last_one + *(*t).shape.at(axis) - div * (num_outputs - 1));
},
Option::None => { assert(false, 'split is none array'); }
}
}
let mut sli: MutMatrix<usize> = MutMatrixImpl:: |
new((*t).shape.len(), 2);
let mut pos: usize = 0;
let mut i = 0;
while i != (*t).shape.len() {
let s: usize = *(*t).shape.at(i);
sli.set(i, 0, 0);
sli.set(i, 1, s);
i += 1;
};
let mut i: usize = 0;
while i != split.len() {
let spl = *split.at(i);
sli.set(axis, 0, pos);
pos += spl;
sli.set(axis, 1, pos);
let end_ele_0 = match sli.get(axis, 0) {
Option::Some(res) => res,
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let end_ele_1 = match sli.get(axis, 1) {
Option::Some(res) => res,
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let starts: Span<usize> = array![sli.get(0, 0).unwrap(), end_ele_0].span();
let ends: Span<usize> = array![sli.get(0, 1).unwrap(), end_ele_1].span();
let axes: Option<Span<usize>> = Option::None(());
let steps: Option<Span<usize>> = Option::None(());
let sub_t: Tensor<T> = t.slice(starts, ends, axes, steps);
splited_t.append(sub_t);
i += 1;
};
splited_t
}
fn split_has_split<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
t: @Tensor<T>, axis: usize, split: Tensor<u32>
) -> Array<Tensor<T>> {
let mut splited_t: Array<Tensor<T>> = array![];
let mut sli: MutMatrix<usize> = MutMatrixImpl::new((*t).shape.len(), 2);
let mut pos: usize = 0;
let mut i = 0;
while i != (*t).shape.len() {
let s: usize = *(*t).shape.at(i);
sli.set(i, 0, 0);
sli.set(i, 1, s);
i += 1;
};
let mut i: usize = 0;
while i != split.data.len() {
let spl: usize = split.at(indices: array![i].span());
sli.set(axis, 0, pos);
pos += spl;
sli.set(axis, 1, pos);
let end_ele_0 = match sli.get(axis, 0) {
Option::Some(res) => res,
Option::None => { |
assert(false, 'Get end_ele_0 is failed');
0
},
};
let end_ele_1 = match sli.get(axis, 1) {
Option::Some(res) => res,
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let starts: Span<usize> = array![sli.get(0, 0).unwrap(), end_ele_0].span();
let ends: Span<usize> = array![sli.get(0, 1).unwrap(), end_ele_1].span();
let axes: Option<Span<usize>> = Option::None(());
let steps: Option<Span<usize>> = Option::None(());
let sub_t: Tensor<T> = t.slice(starts, ends, axes, steps);
splited_t.append(sub_t);
i += 1;
};
splited_t
} |
use core::option::OptionTrait;
use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor};
use orion::operators::matrix::{MutMatrixTrait, MutMatrix, MutMatrixImpl};
fn split_to_sequence<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
self: @Tensor<T>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<T>> {
let has_split = match split {
Option::Some => true,
Option::None => false,
};
let mut has_num_outputs = false;
let mut split_unwrap: Tensor<usize> = TensorTrait::new(array![1].span(), array![1].span());
if (!has_split) {
let split_length = *(*self.shape).at(axis);
let mut split_data: Array<usize> = array![];
let mut i = 0;
while i != split_length {
split_data.append(1);
i += 1;
};
split_unwrap = TensorTrait::new(array![split_length].span(), split_data.span());
} else if (split.unwrap().data.len() == 1 && *(split.unwrap().shape.at(0)) == 1) {
has_num_outputs = true;
split_unwrap = split.unwrap();
} else {
split_unwrap = split.unwrap();
}
let mut splited_t: Array<Tensor<T>> = array![];
let rank = (*self).shape.len();
assert(axis < rank, 'axis out of dimensions');
if (has_num_outputs) {
splited_t = split_num_outputs(self, axis, *(split_unwrap.data).at(0));
} else {
splited_t = split_has_split(self, axis, split_unwrap);
}
if (keepdims == 0 && !has_split) {
let mut splited_t_temp: Array<Tensor<T>> = array![];
let mut i = 0;
while i != splited_t
.len() {
let mut shape: Array<i32> = array![];
let mut j = 0;
let shape_in_splited: Span<usize> = *splited_t.at(i).shape;
while j != shape_in_splited
.len() {
if (j != axis) {
shape.append((*shape_in_splited.at(j)).try_into().unwrap())
} |
j += 1;
};
splited_t_temp.append(splited_t[i].reshape(shape.span(), false));
i += 1;
};
return splited_t_temp;
}
splited_t
}
fn split_num_outputs<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
t: @Tensor<T>, mut axis: usize, num_outputs: usize
) -> Array<Tensor<T>> {
let mut splited_t: Array<Tensor<T>> = array![];
let mut div: usize = 0;
let mut split: Array<usize> = array![];
if (*(*t).shape.at(axis) % num_outputs == 0) {
div = *(*t).shape.at(axis) / num_outputs;
let mut i = 0;
while i != num_outputs {
split.append(div);
i += 1;
};
} else {
div = *(*t).shape.at(axis) / num_outputs + 1;
let mut i = 0;
while i != num_outputs {
split.append(div);
i += 1;
};
match split.pop_front() {
Option::Some(split_last_one) => {
split.append(split_last_one + *(*t).shape.at(axis) - div * (num_outputs - 1));
},
Option::None => { assert(false, 'split is none array'); }
}
}
let mut sli: MutMatrix<usize> = MutMatrixImpl::new((*t).shape.len(), 2);
let mut pos: usize = 0;
let mut i = 0;
while i != (*t)
.shape
.len() {
let s: usize = *(*t).shape.at(i);
sli.set(i, 0, 0);
sli.set(i, 1, s);
i += 1;
};
let mut i: usize = 0;
while i != split
.len() {
let spl = *split.at(i);
sli.set(axis, 0, pos);
pos += spl;
sli.set(axis, 1, pos);
let end_ele_0 = match sli.get(axis, 0) {
Option::Some(res) => res,
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let end_ele_1 = match sli.get(axis, 1) {
Option::Some(res) => res, |
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let starts: Span<usize> = array![sli.get(0, 0).unwrap(), end_ele_0].span();
let ends: Span<usize> = array![sli.get(0, 1).unwrap(), end_ele_1].span();
let axes: Option<Span<usize>> = Option::None(());
let steps: Option<Span<usize>> = Option::None(());
let sub_t: Tensor<T> = t.slice(starts, ends, axes, steps);
splited_t.append(sub_t);
i += 1;
};
splited_t
}
fn split_has_split<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
t: @Tensor<T>, axis: usize, split: Tensor<u32>
) -> Array<Tensor<T>> {
let mut splited_t: Array<Tensor<T>> = array![];
let mut sli: MutMatrix<usize> = MutMatrixImpl::new((*t).shape.len(), 2);
let mut pos: usize = 0;
let mut i = 0;
while i != (*t)
.shape
.len() {
let s: usize = *(*t).shape.at(i);
sli.set(i, 0, 0);
sli.set(i, 1, s);
i += 1;
};
let mut i: usize = 0;
while i != split
.data
.len() {
let spl: usize = split.at(indices: array![i].span());
sli.set(axis, 0, pos);
pos += spl;
sli.set(axis, 1, pos);
let end_ele_0 = match sli.get(axis, 0) {
Option::Some(res) => { res },
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let end_ele_1 = match sli.get(axis, 1) {
Option::Some(res) => { res },
Option::None => {
assert(false, 'Get end_ele_0 is failed');
0
},
};
let starts: Span<usize> = array![sli.get(0, 0).unwrap(), end_ele_0].span();
let ends: Span<usize> = array![sli.get(0, 1).unwrap(), end_ele_1].span();
let axes: Opt |
ion<Span<usize>> = Option::None(());
let steps: Option<Span<usize>> = Option::None(());
let sub_t: Tensor<T> = t.slice(starts, ends, axes, steps);
splited_t.append(sub_t);
i += 1;
};
splited_t
} |
use alexandria_data_structures::array_ext::{SpanTraitExt, ArrayTraitExt};
use alexandria_sorting::merge_sort::merge;
use orion::numbers::{NumberTrait, U32IntoI32};
use orion::operators::tensor::core::{Tensor, TensorTrait, stride};
use orion::operators::tensor::helpers::{as_tensors_array, flatten_array_of_tensors};
fn unique<
T,
+Copy<T>,
+Drop<T>,
+TensorTrait<T>,
+PartialOrd<T>,
+PartialEq<T>,
+PartialEq<Tensor<T>>,
+PartialOrd<Tensor<T>>
>(
self: @Tensor<T>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<T>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
let sorted = match sorted {
Option::Some(sorted) => sorted,
Option::None => true,
};
let (unique_elements, new_shape, indices, inverse_indices, count) = if axis.is_none() {
unique_flatten(self, sorted)
} else {
unique_along_axis(self, axis.unwrap(), sorted)
};
let unique_elements = Tensor::<T> { shape: new_shape, data: unique_elements };
let indices = Tensor::<i32> { shape: array![indices.len()].span(), data: indices };
let inverse_indices = Tensor::<
i32
> { shape: array![inverse_indices.len()].span(), data: inverse_indices };
let count = Tensor::<i32> { shape: array![count.len()].span(), data: count };
(unique_elements, indices, inverse_indices, count)
}
fn unique_flatten<T, +Copy<T>, +Drop<T>, +PartialOrd<T>, +PartialEq<T>,>(
t: @Tensor<T>, sorted: bool
) -> (Span<T>, Span<usize>, Span<i32>, Span<i32>, Span<i32>) {
let mut indices: Array<i32> = array![];
let mut inverse_indices: Array<i32> = array![];
let mut count: Array<i32> = array![];
let mut unique_elements = (*t.data).unique();
let mut new_shape: Array<usize> = array![unique_elements.len()];
if (sorted) {
unique_elements = merge(unique_elements);
}
let mut unique_elements_span = unique_elements.span();
let mut data_cpy = *(t.data);
loop {
match unique_elements_span.pop_front() {
Option::Some(value) |
=> {
let occurences = data_cpy.occurrences_of(*value);
count.append(occurences.into());
let idx_in_data = data_cpy.index_of(*value).unwrap();
indices.append(idx_in_data.into());
},
Option::None => { break; }
}
};
unique_elements_span = unique_elements.span();
loop {
match data_cpy.pop_front() {
Option::Some(value) => {
let idx_in_uniques = unique_elements_span.index_of(*value).unwrap();
inverse_indices.append(idx_in_uniques.into());
},
Option::None => { break; }
}
};
(unique_elements.span(), new_shape.span(), indices.span(), inverse_indices.span(), count.span())
}
fn unique_along_axis<
T,
+Copy<T>,
+Drop<T>,
+PartialOrd<T>,
+PartialEq<T>,
+TensorTrait<T>,
+PartialEq<Tensor<T>>,
+PartialOrd<Tensor<T>>
>(
t: @Tensor<T>, axis: usize, sorted: bool
) -> (Span<T>, Span<usize>, Span<i32>, Span<i32>, Span<i32>) {
let mut new_shape: Array<usize> = array![];
let mut indices: Array<i32> = array![];
let mut inverse_indices: Array<i32> = array![];
let mut count: Array<i32> = array![];
let rank = (*t).shape.len();
assert(axis < rank, 'axis out of dimensions');
let all_tensors = as_tensors_array(t, axis);
let mut unique_tensors = all_tensors.unique();
let mut unique_tensors_len = unique_tensors.len();
let mut i = 0;
while i != rank {
new_shape.append(if axis == i {
unique_tensors_len
} else {
*(*t).shape.at(i)
});
i += 1;
};
if (sorted) {
unique_tensors = merge(unique_tensors);
}
let mut all_tensors_span = all_tensors.span();
let mut unique_tensors_span = unique_tensors.span();
loop {
match unique_tensors_span.pop_front() {
Option::Some(t) => {
let occurences = all_tensors_span.occurrences_of(*t);
count.app |
end(occurences.into());
let idx_in_all = all_tensors_span.index_of(*t).unwrap();
indices.append(idx_in_all.into());
},
Option::None => { break; }
}
};
unique_tensors_span = unique_tensors.span();
loop {
match all_tensors_span.pop_front() {
Option::Some(t) => {
let idx_in_uniques = unique_tensors_span.index_of(*t).unwrap();
inverse_indices.append(idx_in_uniques.into());
},
Option::None => { break; }
}
};
let new_shape_span = new_shape.span();
let unique_elements = flatten_array_of_tensors(unique_tensors, axis, new_shape_span);
(unique_elements, new_shape_span, indices.span(), inverse_indices.span(), count.span())
} |
mod min_in_tensor;
mod min;
mod max_in_tensor;
mod max;
mod reduce_sum;
mod reduce_prod;
mod argmax;
mod argmin;
mod exp;
mod log;
mod arithmetic;
mod equal;
mod greater;
mod greater_equal;
mod less;
mod less_equal;
mod abs;
mod ceil;
mod sin;
mod cos;
mod asin;
mod cumsum;
mod flatten;
mod sinh;
mod tanh;
mod cosh;
mod acosh;
mod asinh;
mod atan;
mod xor;
mod or;
mod acos;
mod onehot;
mod sqrt;
mod concat;
mod gather;
mod sign;
mod and;
mod neg;
mod where;
mod not;
mod round;
mod scatter;
mod binarizer;
mod reduce_l2;
mod reduce_l1;
mod reduce_sum_square;
mod bitwise_and;
mod bitwise_xor;
mod bitwise_or;
mod gather_elements;
mod reduce_min;
mod shrink;
mod reduce_mean;
mod pow;
mod is_nan;
mod is_inf;
mod gather_nd;
mod reduce_log_sum;
mod erf;
mod reduce_log_sum_exp;
mod layer_normalization;
mod resize;
mod compress;
mod random_uniform_like;
mod range;
mod hann_window;
mod hamming_window;
mod blackman_window;
mod scatter_nd;
|
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::NumberTrait;
/// Cf: TensorTrait::abs docstring
fn abs<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => { data_result.append((*item).abs()); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(z.shape, data_result.span())
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::acos docstring
fn acos<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).acos()); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(self.shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::acosh docstring
fn acosh<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).acosh()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, BoolTensor};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::and docstring
fn and(y: @Tensor<bool>, z: @Tensor<bool>) -> Tensor<bool> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<bool> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
result.append(*(*y.data)[indices_self] && *(*z.data)[indices_other]);
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
|
use core::option::OptionTrait;
use core::traits::TryInto;
use orion::operators::tensor::{core::{Tensor, TensorTrait, ravel_index, unravel_index}, I32Tensor};
use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices};
use orion::numbers::NumberTrait;
fn argmax<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
let keepdims = keepdims.unwrap_or(true);
let select_last_index = select_last_index.unwrap_or(false);
let axis = if axis < 0 {
((*self.shape).len().try_into().unwrap() + axis).try_into().unwrap()
} else {
axis.try_into().unwrap()
};
assert(axis <= (*self.shape).len(), 'axis out of dimensions');
if (*self.shape).len() == 1 {
return find_argmax_1D::<T>(*self, axis, true, select_last_index);
}
let mut output_data: Array<i32> = array![];
let output_shape = reduce_output_shape(*self.shape, axis, false);
let output_data_len = len_from_shape(output_shape);
let MIN = NumberTrait::min_value();
let mut index: usize = 0;
while index != output_data_len {
let output_indices = unravel_index(index, output_shape);
let current_argmax = find_argmax(self, output_indices, axis, 0, MIN, 0, select_last_index);
output_data.append(current_argmax);
index += 1;
};
TensorTrait::<i32>::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span())
}
fn find_argmax_1D<
T,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut input: Tensor<T>, axis: usize, keepdims: bool, select_last_index: bool
) -> Tensor<i32> {
let mut output_data = ArrayTrait::<i32>::new();
let mut max = match input.data.pop_front() {
Option: |
:Some(item) => *item,
Option::None => {
return TensorTrait::<
i32
>::new(reduce_output_shape(input.shape, axis, keepdims), output_data.span());
}
};
let mut max_index = 0;
let mut count = 0;
loop {
match input.data.pop_front() {
Option::Some(item) => {
count += 1;
if *item > max {
max = *item;
max_index = count;
} else {
if select_last_index && item == @max {
max_index = count;
}
};
},
Option::None => { break; }
};
};
output_data.append(max_index);
return TensorTrait::<
i32
>::new(reduce_output_shape(input.shape, axis, keepdims), output_data.span());
}
fn find_argmax<
T,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
input: @Tensor<T>,
output_indices: Span<usize>,
axis: usize,
axis_index: usize,
max_value: T,
argmax: usize,
select_last_index: bool
) -> i32 {
if axis_index == *(*input.shape)[axis] {
return argmax.try_into().unwrap();
}
let input_indices = combine_indices(output_indices, axis_index, axis);
let input_index = ravel_index(*input.shape, input_indices);
let ele = *(*input.data)[input_index];
let (new_max_value, new_argmax) = if ele > max_value {
(ele, axis_index)
} else {
if select_last_index && ele == max_value {
(ele, axis_index)
} else {
(max_value, argmax)
}
};
return find_argmax(
input,
output_indices,
axis,
axis_index + 1_usize,
new_max_value,
new_argmax,
select_last_index
);
} |
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::operators::tensor::helpers::{reduce_output_shape, combine_indices, len_from_shape};
use orion::numbers::NumberTrait;
fn argmin<
T,
MAG,
impl UsizeTensor: TensorTrait<usize>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
let keepdims = match keepdims {
Option::Some(val) => val,
Option::None => true,
};
let select_last_index = match select_last_index {
Option::Some(val) => val,
Option::None => false,
};
assert(axis <= (*self.shape).len(), 'axis out of dimensions');
if (*self.shape).len() == 1 {
return find_argmin_1D(*self, axis, true, select_last_index);
}
let mut output_data: Array<u32> = array![];
let output_shape = reduce_output_shape(*self.shape, axis, false);
let output_data_len = len_from_shape(output_shape);
let MAX = NumberTrait::max_value();
let mut index: usize = 0;
while index != output_data_len {
let output_indices = unravel_index(index, output_shape);
let current_argmin = find_argmin(self, output_indices, axis, 0, MAX, 0, select_last_index);
output_data.append(current_argmin);
index += 1;
};
TensorTrait::<usize>::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span())
}
fn find_argmin_1D<
T,
impl UsizeTensor: TensorTrait<usize>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut input: Tensor<T>, axis: usize, keepdims: bool, select_last_index: bool
) -> Tensor<usize> {
let mut output_data = ArrayTrait::<usize>::new();
let mut min = match input.data.pop_front() {
Option::Some(item) => *item, |
Option::None => {
return TensorTrait::<
usize
>::new(reduce_output_shape(input.shape, axis, keepdims), output_data.span());
}
};
let mut min_index = 0;
let mut count = 0;
loop {
match input.data.pop_front() {
Option::Some(item) => {
count += 1;
if *item < min {
min = *item;
min_index = count;
} else {
if select_last_index && item == @min {
min_index = count;
}
};
},
Option::None => { break; }
};
};
output_data.append(min_index);
return TensorTrait::<
usize
>::new(reduce_output_shape(input.shape, axis, keepdims), output_data.span());
}
fn find_argmin<
T,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
input: @Tensor<T>,
output_indices: Span<usize>,
axis: usize,
axis_index: usize,
min_value: T,
argmin: usize,
select_last_index: bool
) -> usize {
if axis_index == *(*input.shape)[axis] {
return argmin;
}
let input_indices = combine_indices(output_indices, axis_index, axis);
let input_index = ravel_index(*input.shape, input_indices);
let ele = *(*input.data)[input_index];
let (new_min_value, new_argmin) = if ele < min_value {
(ele, axis_index)
} else {
if select_last_index && ele == min_value {
(ele, axis_index)
} else {
(min_value, argmin)
}
};
return find_argmin(
input,
output_indices,
axis,
axis_index + 1_usize,
new_min_value,
new_argmin,
select_last_index
);
} |
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index,};
use orion::operators::tensor::helpers::{broadcast_shape, broadcast_index_mapping, len_from_shape,};
use orion::utils::saturate;
fn add<
T, impl TTensor: TensorTrait<T>, impl TAdd: Add<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>
>(
self: @Tensor<T>, other: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result.append(*(*self.data)[indices_self] + *(*other.data)[indices_other]);
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
fn add_by_scalar<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TAdd: Add<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, val: T
) -> Tensor<T> {
if val == NumberTrait::zero() {
return *self;
}
let mut input_data = *self.data;
let mut data_result = array![];
loop {
match input_data.pop_front() {
Option::Some(ele) => { data_result.append(*ele + val); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(*self.shape, data_result.span())
}
fn saturated_add<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TAdd: Add<T>,
impl TPartialOrd: PartialOrd<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QDrop: Drop<Q>,
>(
self: @Tensor<T>, other: @Tensor<T>, min_saturation: T, max_saturation: T
) -> Tensor<Q> { |
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result
.append(
saturate(
min_saturation,
max_saturation,
*(*self.data)[indices_self] + *(*other.data)[indices_other]
)
.try_into()
.unwrap()
);
n += 1;
};
TensorTrait::<Q>::new(broadcasted_shape, result.span())
}
fn sub<
T, impl TTensor: TensorTrait<T>, impl TSub: Sub<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>
>(
self: @Tensor<T>, other: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result.append(*(*self.data)[indices_self] - *(*other.data)[indices_other]);
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
fn sub_by_scalar<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TSub: Sub<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, val: T
) -> Tensor<T> {
if val == NumberTrait::zero() {
return *self;
}
let mut input_data = *self.data;
let mut data_ |
result = array![];
loop {
match input_data.pop_front() {
Option::Some(ele) => { data_result.append(*ele - val); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(*self.shape, data_result.span())
}
fn saturated_sub<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TSub: Sub<T>,
impl TPartialOrd: PartialOrd<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QDrop: Drop<Q>,
>(
self: @Tensor<T>, other: @Tensor<T>, min_saturation: T, max_saturation: T
) -> Tensor<Q> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result
.append(
saturate(
min_saturation,
max_saturation,
*(*self.data)[indices_self] - *(*other.data)[indices_other]
)
.try_into()
.unwrap()
);
n += 1;
};
TensorTrait::<Q>::new(broadcasted_shape, result.span())
}
fn mul<
T, impl TTensor: TensorTrait<T>, impl TMul: Mul<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>
>(
self: @Tensor<T>, other: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indic |
es_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result.append(*(*self.data)[indices_self] * *(*other.data)[indices_other]);
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
fn mul_by_scalar<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TMul: Mul<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, val: T
) -> Tensor<T> {
if val == NumberTrait::one() {
return *self;
}
let mut input_data = *self.data;
let mut data_result = array![];
loop {
match input_data.pop_front() {
Option::Some(ele) => { data_result.append(*ele * val); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(*self.shape, data_result.span())
}
fn saturated_mul<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TMul: Mul<T>,
impl TPartialOrd: PartialOrd<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QDrop: Drop<Q>,
>(
self: @Tensor<T>, other: @Tensor<T>, min_saturation: T, max_saturation: T
) -> Tensor<Q> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result
.append(
saturate(
min_saturation,
max_saturation,
*(*self.data)[indices_self] * *(*other.data)[indices_other]
)
.try_into()
.unwrap()
);
n += 1;
} |
;
TensorTrait::<Q>::new(broadcasted_shape, result.span())
}
fn div<
T, impl TTensor: TensorTrait<T>, impl TMul: Div<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>
>(
self: @Tensor<T>, other: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result.append(*(*self.data)[indices_self] / *(*other.data)[indices_other]);
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
fn div_by_scalar<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TDiv: Div<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, val: T
) -> Tensor<T> {
if val == NumberTrait::one() {
return *self;
}
let mut input_data = *self.data;
let mut data_result = array![];
loop {
match input_data.pop_front() {
Option::Some(ele) => { data_result.append(*ele / val); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(*self.shape, data_result.span())
}
fn saturated_div<
T,
Q,
impl TTensor: TensorTrait<T>,
impl QTensor: TensorTrait<Q>,
impl TDiv: Div<T>,
impl TPartialOrd: PartialOrd<T>,
impl TTryInto: TryInto<T, Q>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl QDrop: Drop<Q>,
>(
self: @Tensor<T>, other: @Tensor<T>, min_saturation: T, max_saturation: T
) -> Tensor<Q> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let m |
ut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result
.append(
saturate(
min_saturation,
max_saturation,
*(*self.data)[indices_self] / *(*other.data)[indices_other]
)
.try_into()
.unwrap()
);
n += 1;
};
TensorTrait::<Q>::new(broadcasted_shape, result.span())
}
fn div_downcast<
T,
D,
impl TTensor: TensorTrait<T>,
impl DTensor: TensorTrait<D>,
impl DDiv: Div<D>,
impl TTryIntoD: TryInto<T, D>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl DCopy: Copy<D>,
impl DDrop: Drop<D>
>(
self: @Tensor<T>, other: @Tensor<T>
) -> Tensor<D> {
let broadcasted_shape = broadcast_shape(*self.shape, *other.shape);
let mut result = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*other.shape, indices_broadcasted);
result
.append(
(*(*self.data)[indices_self]).try_into().unwrap()
/ (*(*other.data)[indices_other]).try_into().unwrap()
);
n += 1;
};
TensorTrait::<D>::new(broadcasted_shape, result.span())
} |
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::asin docstring
fn asin<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).asin()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::asinh docstring
fn asinh<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).asinh()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn atan<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).atan()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
|
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::NumberTrait;
/// Cf: TensorTrait::binarizer docstring
fn binarizer<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut self: Tensor<T>, threshold: Option<T>
) -> Tensor<T> {
let threshold: T = if threshold.is_some() {
threshold.unwrap()
} else {
NumberTrait::zero()
};
let mut binarized_data: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => {
if (*item) > threshold {
binarized_data.append(NumberTrait::one());
} else {
binarized_data.append(NumberTrait::zero());
}
},
Option::None => { break; }
};
};
TensorTrait::new(self.shape, binarized_data.span())
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::and docstring
fn bitwise_and<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<T> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
let lhs = *(*y.data)[indices_self];
let rhs = *(*z.data)[indices_other];
result.append(NumberTrait::bitwise_and(lhs, rhs));
// let res = *(*y.data).at(n) ^ *(*z.data).at(n)
// result.append(res);
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::and docstring
fn bitwise_or<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<T> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
let lhs = *(*y.data)[indices_self];
let rhs = *(*z.data)[indices_other];
result.append(NumberTrait::bitwise_or(lhs, rhs));
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::and docstring
fn bitwise_xor<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<T> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
let lhs = *(*y.data)[indices_self];
let rhs = *(*z.data)[indices_other];
result.append(NumberTrait::bitwise_xor(lhs, rhs));
n += 1;
};
TensorTrait::<T>::new(broadcasted_shape, result.span())
}
|
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn blackman_window<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
size: T, PI: T, periodic: Option<usize>
) -> Tensor<T> {
let start: T = NumberTrait::zero();
let one_step: T = NumberTrait::one();
let two: T = one_step + one_step;
let three: T = two + one_step;
let n25: T = three.pow(three) - two;
let alpha: T = (n25 - two * two) / (n25 * two);
let beta: T = two / n25;
let n_0_5: T = (one_step - two) / two;
let ni = TensorTrait::range(start, size, one_step);
assert((ni.shape).len() == 1, 'Unexpected shape 1.');
let mut N_1 = size;
if periodic != Option::Some(1) {
N_1 = N_1 - one_step;
};
let len = *(ni.shape).at(0);
let mut arr1: Array<T> = array![];
let mut i: usize = 0;
while i != len {
let v = *(ni.data).at(i);
let r = (v * (PI * two)) / N_1;
arr1.append(r);
i += 1;
};
let window_cos = TensorTrait::<T>::new(ni.shape, arr1.span()).cos();
i = 0;
let mut a1: Array<T> = array![];
while i != len {
let v = *(window_cos.data).at(i);
let r = v * n_0_5;
a1.append(r);
i += 1;
};
let window1 = TensorTrait::<T>::new(ni.shape, a1.span());
let mut arr2: Array<T> = array![];
i = 0;
while i != len {
let v = *(ni.data).at(i);
let r = v * (PI * two * two) / N_1;
arr2.append(r);
i += 1;
};
let window_cos_2 = TensorTrait::<T>::new(ni.shape, arr2.span()).cos();
let mut a2: Array<T> = array![];
i = 0;
while i != len {
let v = *(window_cos_2.data).at(i); |
let r = v * beta + alpha;
a2.append(r);
i += 1;
};
let window2 = TensorTrait::<T>::new(ni.shape, a2.span());
let mut arr: Array<T> = array![];
i = 0;
while i != len {
let v1 = *(window1.data).at(i);
let v2 = *(window2.data).at(i);
let r = v1 + v2;
arr.append(r);
i += 1;
};
TensorTrait::<T>::new(ni.shape, arr.span())
} |
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::ceil docstring
fn ceil<
T,
MAG,
impl FFixedTrait: FixedTrait<T, MAG>,
impl FTensor: TensorTrait<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => { data_result.append((*item).ceil()); },
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
|
use alexandria_data_structures::array_ext::SpanTraitExt;
use orion::numbers::NumberTrait;
use orion::operators::tensor::U32TensorPartialEq;
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
fn compress<T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,>(
self: @Tensor<T>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<T> {
let axis = match axis {
Option::Some(val) => val,
Option::None => 999
};
let data_rank = (*self.shape).len();
let condition_rank = (condition.shape).len();
assert((data_rank >= 1), 'data rank must > 1');
assert((condition_rank == 1), 'condition rank must be 1');
let mut data_shape = *self.shape;
if (axis != 999) {
assert(*data_shape.at(axis) >= condition.data.len(), 'index out of bound');
}
let mut output_shape = array![];
let mut index_data = array![];
let mut output_data = array![];
let mut condition_data = condition.data;
let mut ind = 0;
let mut condition_data_clone = condition_data.clone();
let mut output = 0;
loop {
match condition_data_clone.pop_front() {
Option::Some(val) => {
if (*val != 0) {
output += 1;
}
ind += 1;
},
Option::None => { break; }
};
};
if (axis == 999) {
output_shape.append(output);
let mut total_shape = 1;
loop {
match data_shape.pop_front() {
Option::Some(val) => { total_shape *= *val; },
Option::None => { break; }
};
};
let mut ind = 0;
loop {
match condition_data.pop_front() {
Option::Some(val) => {
if (ind == total_shape) {
break;
}
if (*val != 0) {
output_data.append(*self.data[ind]);
}
ind += 1; |
},
Option::None => { break; }
};
};
} else {
let mut ind = 0;
let mut loop_breaker = 1;
let mut other_loop_breaker = 1;
let mut multiplier = 1;
let mut data_shape_clone = data_shape.clone();
loop {
match data_shape_clone.pop_front() {
Option::Some(val) => {
if (ind == axis) {
output_shape.append(output);
} else {
output_shape.append(*val);
if (ind > axis) {
loop_breaker *= *val;
}
if (ind >= axis) {
multiplier *= *val;
}
if (ind < axis) {
other_loop_breaker *= *val;
}
}
ind += 1;
},
Option::None => { break; }
};
};
let mut ind = 0;
let mut inner_index: usize = 0;
loop {
if (ind == other_loop_breaker) {
break;
}
let mut condition_data_clone = condition_data.clone();
inner_index = *data_shape.at(axis) * ind;
loop {
match condition_data_clone.pop_front() {
Option::Some(val) => {
if (*val != 0) {
let result = inner_index * loop_breaker;
let mut data_ind: usize = result;
loop {
if data_ind == result + loop_breaker {
break;
}
index_data.append(data_ind);
data_ind += 1;
};
}
inner_index += 1;
}, |
Option::None => { break; }
};
};
ind += 1;
};
loop {
match index_data.pop_front() {
Option::Some(val) => { output_data.append(*self.data[val]); },
Option::None => { break; }
};
};
}
let mut output_tensor = TensorTrait::<T>::new(output_shape.span(), output_data.span());
output_tensor
} |
use orion::operators::tensor::helpers::replace_index;
use orion::operators::tensor::{TensorTrait, Tensor};
fn concat<T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,>(
mut tensors: Span<Tensor<T>>, axis: usize
) -> Tensor<T> {
assert(tensors.len() >= 2, 'Input tensors must be > 1');
let base_tensor = *tensors.at(0);
let base_shape = base_tensor.shape;
let dimension = base_shape.len();
assert(dimension > axis, 'Out of bounds for dimension');
validate_shapes(tensors, base_shape, axis);
let output_size = compute_output_size(base_shape, tensors, axis);
let output_data: Array<T> = concatenate_data(tensors, axis, base_shape);
TensorTrait::<T>::new(output_size.span(), output_data.span())
}
fn validate_shapes<T>(mut tensors: Span<Tensor<T>>, mut base_shape: Span<usize>, axis: usize) {
loop {
match tensors.pop_front() {
Option::Some(tensor) => {
assert(base_shape.len() == (*tensor.shape).len(), 'Dimension not the same');
let mut axis_index = 0;
let mut tensor_shape = *tensor.shape;
let mut base_shape_copy = base_shape;
loop {
match tensor_shape.pop_front() {
Option::Some(tensor_shape_i) => {
let base_shape_i = base_shape_copy.pop_front().unwrap();
if axis_index != axis {
assert(base_shape_i == tensor_shape_i, 'Shape is not the same');
}
axis_index += 1;
},
Option::None => { break; }
};
};
},
Option::None => { break; }
};
};
}
fn compute_output_size<T>(
mut base_shape: Span<usize>, mut tensors: Span<Tensor<T>>, axis: usize
) -> Array<u32> {
let mut output_size: Array<usize> = array![];
let mut axis_size = 0;
l |
oop {
match tensors.pop_front() {
Option::Some(tensor) => { axis_size += *(*tensor.shape).at(axis); },
Option::None => { break; }
};
};
let mut shape_index = 0;
loop {
match base_shape.pop_front() {
Option::Some(item) => {
if shape_index == axis {
output_size.append(axis_size);
} else {
output_size.append(*item);
}
shape_index += 1;
},
Option::None => { break; }
};
};
output_size
}
fn concatenate_data<T, impl TCopy: Copy<T>, impl TDrop: Drop<T>,>(
mut tensors: Span<Tensor<T>>, axis: usize, base_shape: Span<usize>
) -> Array<T> {
let mut output_data: Array<T> = array![];
let total_loops = product_upto(base_shape, axis);
let mut outer_loop_index = 0;
while outer_loop_index != total_loops {
let mut tensors_copy = tensors;
loop {
match tensors_copy.pop_front() {
Option::Some(tensor) => {
let slice_len = (*tensor.data).len() / total_loops;
let mut inner_index = 0;
while inner_index != slice_len {
output_data
.append(*(*tensor.data).at(slice_len * outer_loop_index + inner_index));
inner_index += 1;
};
},
Option::None => { break; }
};
};
outer_loop_index += 1;
};
output_data
}
fn product_upto(mut shape: Span<usize>, upto: usize) -> usize {
let mut total = 1;
let mut index = 0;
loop {
match shape.pop_front() {
Option::Some(val) => {
if index == upto {
break;
}
total *= *val;
index += 1;
},
Option::None => { break; }
};
};
total
} |
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::cos docstring
fn cos<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).cos()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::cosh docstring
fn cosh<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).cosh()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.