text
stringlengths 1
2.05k
|
---|
fn squeeze(self: @Tensor<T>, axes: Option<Span<u32>>) -> Tensor<T>;
fn clip(self: @Tensor<T>, min: Option<T>, max: Option<T>) -> Tensor<T>;
fn sign(self: @Tensor<T>) -> Tensor<T>;
fn identity(self: @Tensor<T>) -> Tensor<T>;
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool>;
fn where(self: @Tensor<T>, x: @Tensor<T>, y: @Tensor<T>) -> Tensor<T>; |
fn resize(
self: @Tensor<T>,
roi: Option<Tensor<T>>,
scales: Option<Span<T>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<
orion::operators::tensor::math::resize::TRANSFORMATION_MODE
>,
cubic_coeff_a: Option<T>,
exclude_outside: Option<bool>,
extrapolation_value: Option<T>,
keep_aspect_ratio_policy: Option<
orion::operators::tensor::math::resize::KEEP_ASPECT_RATIO_POLICY
>,
mode: Option<orion::operators::tensor::math::resize::MODE>,
nearest_mode: Option<orion::operators::tensor::math::resize::NEAREST_MODE>,
) -> Tensor<T>;
fn round(self: @Tensor<T>) -> Tensor<T>;
fn scatter(
self: @Tensor<T>,
updates: Tensor<T |
>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<T>;
fn trilu(self: @Tensor<T>, upper: bool, k: i64) -> Tensor<T>;
fn bitwise_and(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<T>;
fn bitwise_or(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<T>;
fn bitwise_xor(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<T>;
fn reduce_l1(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
fn reduce_l2(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>; |
fn reduce_sum_square(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
fn constant_of_shape(shape: Span<usize>, value: T) -> Tensor<T>;
fn gather_elements(self: @Tensor<T>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<T>;
fn binarizer(self: @Tensor<T>, threshold: Option<T>) -> Tensor<T>;
fn array_feature_extractor(self: @Tensor<T>, indices: Tensor<usize>) -> Tensor<T>;
fn shrink(self: Tensor<T>, bias: Option<T>, lambd: Option<T>) -> Tensor<T>;
fn reduce_mean(
self: @Tensor<T>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> |
Tensor<T>;
fn reduce_min(
self: @Tensor<T>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<T>;
fn pow(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<T>;
fn reduce_prod(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
fn is_inf(
self: @Tensor<T>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool>;
fn is_nan(self: @Tensor<T>) -> Tensor<bool>;
fn not(self: @Tensor<T>) -> Tensor<T>; |
fn reduce_log_sum(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
fn reduce_log_sum_exp(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
fn erf(self: @Tensor<T>) -> Tensor<T>;
fn unique(
self: @Tensor<T>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<T>, Tensor<i32>, Tensor<i32>, Tensor<i32>);
fn gather_nd(self: @Tensor<T>, indices: Tensor<usize>, batch_dims: Option<usize>) -> Tensor<T>;
fn compress(self: @Tensor<T>, condition: Tensor<usize>, axis: Option<usize>) -> Tensor<T>; |
fn layer_normalization(
self: @Tensor<T>,
scale: @Tensor<T>,
B: Option<@Tensor<T>>,
axis: Option<i32>,
epsilon: Option<T>,
stash_type: Option<usize>,
) -> (Tensor<T>, Tensor<T>, Tensor<T>);
fn split(
self: @Tensor<T>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<T>>;
fn reverse_sequence(
self: @Tensor<T>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<T>;
fn scatter_nd(
self: @Tensor<T>, updates: Tensor<T>, indices: Tensor<usize>, reduction: Option<usize>
) -> Tensor<T>; |
fn dynamic_quantize_linear(self: @Tensor<T>) -> (Tensor<u32>, Tensor<T>, Tensor<T>);
fn optional(self: @Tensor<T>) -> Option<Tensor<T>>;
fn split_to_sequence(
self: @Tensor<T>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<T>>;
fn range(start: T, end: T, step: T) -> Tensor<T>;
fn hann_window(size: T, periodic: Option<usize>) -> Tensor<T>;
fn hamming_window(size: T, periodic: Option<usize>) -> Tensor<T>;
fn blackman_window(size: T, periodic: Option<usize>) -> Tensor<T>; |
fn random_uniform_like(
tensor: @Tensor<T>, high: Option<T>, low: Option<T>, seed: Option<usize>
) -> Tensor<T>;
fn label_encoder(
self: @Tensor<T>,
default_list: Option<Span<T>>,
default_tensor: Option<Tensor<T>>,
keys: Option<Span<T>>,
keys_tensor: Option<Tensor<T>>,
values: Option<Span<T>>,
values_tensor: Option<Tensor<T>>
) -> Tensor<T>;
}
fn new_tensor<T>(shape: Span<usize>, data: Span<T>) -> Tensor<T> {
check_shape::<T>(shape, data);
Tensor::<T> { shape, data }
}
fn constant_of_shape<T, impl FCopy: Copy<T>, impl FDrop: Drop<T>,>(
shape: Span<usize>, value: T
) -> Tensor<T> {
let mut data = ArrayTrait::new();
let mut length = len_from_shape(shape);
loop {
match length.into() {
0 => { break (); },
_ => {
data.append(value.clone());
length -= 1;
}
}
};
Tensor::<T> { shape, data: data.span() }
}
fn ravel_index(mut shape: Span<usize>, mut indices: Span<usize>) -> usize {
assert(shape.len() == indices.len(), 'shape & indices length unequal');
let mut raveled_index: usize = 0;
let mut stride: usize = 1;
loop {
match shape.pop_back() {
Option::Som |
e(i) => {
let index = *indices.pop_back().unwrap();
raveled_index += index * stride;
stride *= *i;
},
Option::None => { break; }
};
};
raveled_index
}
fn unravel_index(index: usize, mut shape: Span<usize>) -> Span<usize> {
assert(shape.len() > 0, 'shape cannot be empty');
let mut result = ArrayTrait::new();
let mut remainder = index;
let mut stride = len_from_shape(shape);
loop {
match shape.pop_front() {
Option::Some(i) => {
stride /= *i;
let coord = remainder / stride;
remainder = remainder % stride;
result.append(coord);
},
Option::None => { break; }
};
};
return result.span();
}
fn stride(mut shape: Span<usize>) -> Span<usize> {
let mut strides = ArrayTrait::new();
let mut stride = 1;
loop {
match shape.pop_back() {
Option::Some(size) => {
strides.append(stride);
stride *= *size;
},
Option::None => { break; }
};
};
strides.reverse().span()
}
fn reshape<T, +Copy<Tensor<T>>>(
self: @Tensor<T>, target_shape: Span<i32>, allowzero: bool
) -> Tensor<T> {
let mut total_elements = 1;
let mut shape = *self.shape;
loop {
match shape.pop_front() {
Option::Some(val) => total_elements *= *val,
Option::None => { break; }
};
};
let mut elements_so_far = 1;
let mut inferred_index = Option::None;
let mut target_shape_clone = target_shape.clone();
let mut i: usize = 0;
loop {
match target_shape_clone.pop_front() {
Option::Some(dim) => {
if *dim == -1 {
if inferred_index.is_none() {
inferred_index = Option::Some(i);
} else {
panic!("Only one dimension can be inferred"); |
}
} else if *dim == 0 && allowzero == false {
if i >= (*self.shape).len() {
panic!("Dimension out of bounds for using original dimension value");
}
elements_so_far *= *(*self).shape.at(i);
} else if *dim >= 0 {
elements_so_far *= (*dim).try_into().unwrap();
} else {
panic!("Invalid dimension size");
};
},
Option::None => { break; }
};
i += 1;
};
let mut target_shape_clone = target_shape.clone();
let mut inferred_shape = ArrayTrait::<u32>::new();
i = 0;
loop {
match target_shape_clone.pop_front() {
Option::Some(dim) => {
if *dim == -1 {
inferred_shape.append(total_elements / elements_so_far)
} else if *dim == 0 {
if allowzero == true {
inferred_shape
.append(
0
)
} else if i < (*self.shape).len() {
inferred_shape
.append(
*(*self).shape.at(i)
)
} else {
panic!("Dimension out of bounds for using original dimension value");
}
} else {
inferred_shape
.append((*dim).try_into().unwrap())
};
},
Option::None => { break; }
}
i += 1;
};
new_tensor(inferred_shape.span(), *self.data)
}
fn at_tensor<T>(self: @Tensor<T>, indices: Span<usize>) -> @T {
assert(indices.len() == (*self.shape).len(), 'indices not match dimensions');
let data = *self.data;
return data.at(ravel_index(*self.shape, indices));
}
fn ten |
sor_eq<T, impl TPartialEq: PartialEq<T>>(mut lhs: Tensor<T>, mut rhs: Tensor<T>,) -> bool {
let mut is_eq = true;
loop {
if lhs.shape.len() == 0 || !is_eq {
break;
}
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
loop {
if lhs.data.len() == 0 || !is_eq {
break;
}
is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap();
};
return is_eq;
}
fn slice<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: @Tensor<T>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<T> {
let axes = match axes {
Option::Some(axes) => axes,
Option::None => {
let mut ret: Array<usize> = ArrayTrait::new();
let mut i: usize = 0;
let stop_i = starts.len() - 1;
loop {
ret.append(i);
if i == stop_i {
break ();
}
i += 1;
};
ret.span()
},
};
let steps = match steps {
Option::Some(steps) => steps,
Option::None => {
let mut ret: Array<usize> = ArrayTrait::new();
let mut i: usize = 0;
let stop_i = starts.len() - 1;
loop {
ret.append(1);
if i == stop_i {
break ();
}
i += 1;
};
ret.span()
},
};
assert(starts.len() == ends.len(), 'Ends and starts len unequal');
assert(starts.len() == axes.len(), 'Axes and starts len unequal');
assert(starts.len() == steps.len(), 'Steps and starts len unequal');
let mut is_empty: bool = false;
let mut output_shape: Array<usize> = ArrayTrait::new();
let mut processed_starts: Array<usize> = ArrayTrait::new();
let mut processed_ends: |
Array<usize> = ArrayTrait::new();
let mut processed_steps: Array<usize> = ArrayTrait::new();
let mut shape = *self.shape;
let mut i: usize = 0;
loop {
match shape.pop_front() {
Option::Some(ele) => {
let (axis_index, is_found) = match axes.index_of(i) {
Option::Some(axis_index) => (axis_index, true),
Option::None => (0, false),
};
let mut processed_params = (0, 0, 0, 0);
if is_found {
let mut start: usize = *ele;
let mut end: usize = *ele;
if *starts.at(axis_index) < *ele {
start = *starts.at(axis_index);
}
if *ele > *ends.at(axis_index) {
end = *ends.at(axis_index);
};
if start > *ele {
start = *ele;
};
if end > *ele {
end = *ele;
};
if start >= end {
is_empty = true;
} else {
let dim = (end - start + (*steps.at(axis_index) - 1))
/ *steps.at(axis_index);
if dim == 0 {
is_empty = true;
} else {
processed_params = (start, end, *steps.at(axis_index), dim);
};
};
} else {
processed_params = (0, *ele, 1, *ele);
}
let (start, end, step, shape) = processed_params;
processed_starts.append(start);
processed_ends.append(end);
processed_steps.append(step);
output_shape.append(shape);
i += 1;
},
Option::None => { break; }
};
};
let mut outpu |
t_data: Array<T> = ArrayTrait::new();
if is_empty {
return Tensor::<T> { shape: output_shape.span(), data: output_data.span() };
}
let mut data = *self.data;
let mut j: usize = 0;
loop {
match data.pop_front() {
Option::Some(ele) => {
let mut indices = unravel_index(j, *self.shape);
let mut is_included = false;
let mut shape = *self.shape;
let mut starts = processed_starts.span();
let mut ends = processed_ends.span();
let mut steps = processed_steps.span();
loop {
match shape.pop_front() {
Option::Some => {
let start = *starts.pop_front().unwrap();
let end = *ends.pop_front().unwrap();
let step = *steps.pop_front().unwrap();
let index = *indices.pop_front().unwrap();
if index < start || index >= end {
is_included = false;
break ();
}
if (index - start) % step == 0 {
is_included = true;
} else {
is_included = false;
break ();
}
},
Option::None => { break; }
};
};
if is_included {
output_data.append(*ele);
}
j += 1;
},
Option::None => { break; }
};
};
return TensorTrait::new(output_shape.span(), output_data.span());
}
fn nonzero<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TPartialEq: PartialEq<T>,
impl TDrop: Drop<T>,
impl TCopy: Copy<T>,
impl TNumber: NumberTrait<T, MAG>
>(
self: @ |
Tensor<T>
) -> Tensor<usize> {
let mut indexes_of_dimensions: Array<usize> = ArrayTrait::new();
let mut self_data_copy = *self.data;
let mut j: usize = 0;
loop {
match self_data_copy.pop_front() {
Option::Some(val) => {
if *val != NumberTrait::zero() {
let indices = unravel_index(j, *self.shape);
let mut i: usize = 0;
let mut self_shape_copy = *self.shape;
loop {
match self_shape_copy.pop_front() {
Option::Some => {
indexes_of_dimensions.append(*indices.at(i));
i += 1;
},
Option::None => { break (); }
};
};
}
j += 1;
},
Option::None => { break (); }
};
};
let indexes_of_dimensions_span = indexes_of_dimensions.span();
let mut output_data: Array<usize> = ArrayTrait::new();
if indexes_of_dimensions_span.len() == 0 {
return Tensor::<
usize
> { shape: array![(*self.shape).len(), 0].span(), data: output_data.span() };
}
let stop_k = (indexes_of_dimensions_span.len() / (*self.shape).len()) - 1;
let mut self_shape_copy = *self.shape;
let mut i: usize = 0;
loop {
match self_shape_copy.pop_front() {
Option::Some => {
let mut k: usize = 0;
loop {
output_data.append(*indexes_of_dimensions_span.at((*self.shape).len() * k + i));
if k == stop_k {
break ();
}
k += 1;
};
i += 1;
},
Option::None => { break (); }
};
};
return Tensor::<
usize
> { shape: array![(*self.shape).len(), stop_k + 1].span(), data: output_data.spa |
n() };
}
fn squeeze<T>(self: @Tensor<T>, axes: Option<Span<u32>>) -> Tensor<T> {
let target_shape = match axes {
Option::Some(mut axes) => {
let mut axis_squeezed = 0;
let mut shape = *self.shape;
loop {
match axes.pop_front() {
Option::Some(axis) => {
let mut reshape: Array<usize> = ArrayTrait::new();
let mut index = 0;
let axis = if *axis < 0 {
assert(
*axis <= (*self.shape).len().into(), 'axis out of accepted range'
);
(*self.shape).len().into() - *axis
} else {
assert(
*axis < (*self.shape).len().into(), 'axis out of accepted range'
);
*axis
};
loop {
match shape.pop_front() {
Option::Some(shape) => {
let squeezed = if axis >= axis_squeezed {
axis - axis_squeezed
} else {
axis
};
if index == squeezed {
assert(*shape == 1, 'shape entry not equal to one');
axis_squeezed += 1;
} else {
reshape.append(*shape);
}
},
Option::None => { break; },
};
index += 1;
};
shape = reshape.span();
},
O |
ption::None => { break shape; },
};
}
},
Option::None => {
let mut reshape: Array<usize> = ArrayTrait::new();
let mut shape = *self.shape;
loop {
match shape.pop_front() {
Option::Some(shape) => { if *shape != 1 {
reshape.append(*shape);
} },
Option::None => { break reshape.span(); },
};
}
},
};
return Tensor::<T> { shape: target_shape, data: *self.data };
}
fn unsqueeze<T>(self: @Tensor<T>, axes: Span<usize>) -> Tensor<T> {
let dedupped_array = axes.dedup();
assert(dedupped_array.len() == axes.len(), 'Duplicated input axes');
let mut self_shape_copy = *self.shape;
let mut i: usize = 0;
let mut added_axes_count: usize = 0;
let mut output_shape: Array<usize> = ArrayTrait::new();
loop {
if axes.contains(i + added_axes_count) {
output_shape.append(1);
added_axes_count += 1;
} else {
match self_shape_copy.pop_front() {
Option::Some(val) => {
output_shape.append(*val);
i += 1;
},
Option::None => { break (); }
};
};
};
let mut j: usize = output_shape.len();
loop {
if axes.contains(j) {
output_shape.append(1);
} else {
break ();
}
j += 1;
};
assert(output_shape.len() == axes.len() + (*self.shape).len(), 'Invalid input axes');
return Tensor::<T> { shape: output_shape.span(), data: *self.data };
}
fn sign<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialEq: PartialEq<T>,
impl TDrop: Drop<T>,
impl TCopy: Copy<T>,
>(
self: @Tensor<T>
) -> Tensor<T> {
let mut sign_data_array: Array<T> = ArrayTrait::new();
let mut data = *self.data;
loop {
match data.pop_front() {
Opti |
on::Some(data) => {
let sign_data = if *data == NumberTrait::zero() {
NumberTrait::zero()
} else if NumberTrait::is_neg(*data) {
NumberTrait::neg_one()
} else {
NumberTrait::one()
};
sign_data_array.append(sign_data);
},
Option::None => {
break Tensor::<T> { shape: *self.shape, data: sign_data_array.span() };
}
};
}
}
fn clip<
T,
MAG,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TTensor: TensorTrait<T>,
impl TPartialOrd: PartialOrd<T>,
impl TNumber: NumberTrait<T, MAG>
>(
self: @Tensor<T>, min: Option<T>, max: Option<T>
) -> Tensor<T> {
let min = match min {
Option::Some(min) => min,
Option::None => { NumberTrait::min_value() },
};
let max = match max {
Option::Some(max) => max,
Option::None => { NumberTrait::max_value() },
};
let mut return_data: Array<T> = ArrayTrait::new();
let mut self_data_copy = *self.data;
loop {
match self_data_copy.pop_front() {
Option::Some(val) => {
if *val < min {
return_data.append(min);
} else if *val > max {
return_data.append(max);
} else {
return_data.append(*val);
}
},
Option::None => { break (); }
};
};
return Tensor::<T> { shape: *self.shape, data: return_data.span() };
}
fn identity<T>(self: @Tensor<T>) -> Tensor<T> {
Tensor::<T> { shape: *self.shape, data: *self.data }
} |
use alexandria_data_structures::array_ext::ArrayTraitExt;
use orion::utils::u32_max;
use orion::operators::tensor::{core::{Tensor, TensorTrait, stride}, BoolTensor};
fn len_from_shape(mut shape: Span<usize>) -> usize {
let mut result: usize = 1;
loop {
match shape.pop_front() {
Option::Some(item) => { result *= *item; },
Option::None => { break; }
};
};
result
}
fn check_shape<T>(shape: Span<usize>, data: Span<T>) {
assert(len_from_shape(shape) == data.len(), 'wrong tensor shape');
} |
fn check_compatibility(mut shape_1: Span<usize>, mut shape_2: Span<usize>) {
let mut iter_1 = shape_1.len();
let mut iter_2 = shape_2.len();
while iter_1 > 0 || iter_2 > 0 {
let dim_1 = if iter_1 > 0 {
*shape_1[iter_1 - 1]
} else {
1
};
let dim_2 = if iter_2 > 0 {
*shape_2[iter_2 - 1]
} else {
1
};
if dim_1 != dim_2 && dim_1 != 1 && dim_2 != 1 {
panic(array!['tensors shape must match']);
}
if iter_1 > 0 {
iter_1 -= 1;
}
if iter_2 > 0 {
iter_2 -= 1;
}
}
}
fn broadcast_index_mapping(mut shape: Span<usize>, mut indices: Span<usize>) -> usize {
if shape.len() == indices.len() {
broadcast_index_mapping_equal_shape(shape, indices)
} else {
broadcast_index_mapping_non_equal_shape(shape, indices)
}
}
fn broadcast_index_mapping_equal_shape(mut shape: Span<usize>, mut indices: Span<usize>) -> usize {
let mut result = 0_usize;
let mut stride = stride(shape);
loop {
match shape.pop_front() {
Option::Some(shape_val) => {
let indices_val = *indices.pop_front().unwrap();
let stride_val = *stride.pop_front().unwrap();
let index = (indices_val % *shape_val) * stride_val;
result += index;
},
Option::None => { break; }
};
};
result
}
fn broadcast_index_mapping_non_equal_shape(
mut shape: Span<usize>, mut indices: Span<usize>
) -> usize {
let mut result = 0_usize;
let mut stride = stride(shape.clone());
let mut offset = if shape.len() > indices.len() {
shape.len() - indices.len()
} else {
0
};
loop {
match shape.pop_back() {
Option::Some(_) => {
let stride_val = stride
.pop_back()
.unwrap_or(@1); |
let index_val = if offset > 0 {
offset -= 1;
0
} else {
*indices
.pop_back()
.unwrap_or(@0)
};
let index = index_val * *stride_val;
result += index;
},
Option::None => { break; }
};
};
result
}
fn reduce_output_shape(mut input_shape: Span<usize>, axis: usize, keepdims: bool) -> Span<usize> {
assert(axis < input_shape.len(), 'axis out of dimensions');
let mut output_shape: Array<u32> = array![];
let mut n: usize = 0;
loop {
match input_shape.pop_front() {
Option::Some(current_dim) => {
if n == axis {
if keepdims {
output_shape.append(1);
}
} else {
output_shape.append(*current_dim);
}
n += 1;
},
Option::None => { break; }
};
};
output_shape.span()
}
fn permutation_output_shape(input_shape: Span<usize>, mut axes: Span<usize>) -> Span<usize> {
let axes_len = axes.len();
assert(input_shape.len() == axes_len, 'input_shape/indices len unequal');
let mut output_shape: Array<u32> = array![];
loop {
match axes.pop_front() {
Option::Some(item) => { output_shape.append(*input_shape[*item]); },
Option::None => { break; }
};
};
output_shape.span()
}
fn combine_indices(mut output_indices: Span<usize>, axis_index: usize, axis: usize) -> Span<usize> {
assert(axis <= output_indices.len(), 'axis value is out of range');
let mut result: Array<u32> = array![];
let mut n: usize = 0;
while n != output_indices.len() + 1 {
if n == axis {
result.append(axis_index);
} else if n > axis {
result.append(* |
output_indices[n - 1_usize]);
} else {
result.append(*output_indices[n]);
}
n += 1;
};
result.span()
}
fn find_axis(mut axes: Span<usize>, target_axis: usize) -> usize {
assert(target_axis < axes.len(), 'target_axis is out of range');
let mut axis: usize = 0;
loop {
match axes.pop_front() {
Option::Some(item) => {
if *item == target_axis {
break ();
}
axis += 1;
},
Option::None => { break; }
};
};
axis
}
fn broadcast_shape(mut shape1: Span<usize>, mut shape2: Span<usize>) -> Span<usize> {
check_compatibility(shape1, shape2);
let mut result: Array<usize> = array![];
while !shape1.is_empty() || !shape2.is_empty() {
let dim1 = *shape1.pop_back().unwrap_or(@1);
let dim2 = *shape2.pop_back().unwrap_or(@1);
let broadcasted_dim = u32_max(dim1, dim2);
result.append(broadcasted_dim);
};
result.reverse().span()
}
fn replace_index(mut shape: Span<usize>, index: usize, value: usize) -> Span<usize> {
let mut output: Array<u32> = array![];
let mut i = 0;
loop {
match shape.pop_front() {
Option::Some(item) => {
if i == index {
output.append(value);
} else {
output.append(*item);
};
i += 1;
},
Option::None => { break; }
};
};
output.span()
}
fn get_all_axes(shape: Span<usize>) -> Span<usize> {
let mut ret: Array<usize> = array![];
let mut i: usize = 0;
let stop_i = shape.len() - 1;
loop {
ret.append(i);
if i == stop_i {
break ();
}
i += 1;
};
ret.span()
}
fn flatten_array_of_tensors<T, +Copy<T>, +Drop<T>,>(
tensors: Array<Tensor<T>>, axis: usize, new_shape: Span<usize>
) -> Span<T> {
l |
et mut new_stride = stride(new_shape);
let mut flattened: Array<T> = array![];
let stride_lim: usize = *new_stride.at(axis);
let max_row = (*(*tensors.at(0).shape).at(0));
let mut row = 0;
while row != max_row {
let mut tensors_span = tensors.span();
loop {
let mut i = 0;
match tensors_span.pop_front() {
Option::Some(mut t) => {
let mut data = *t.data;
while i != stride_lim {
let idx = i + (row * stride_lim);
flattened.append(*data.at(idx));
i += 1;
}
},
Option::None => { break; },
}
};
row += 1;
};
flattened.span()
}
fn as_tensors_array<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
tensor: @Tensor<T>, axis: usize
) -> Array<Tensor<T>> {
let shape = *tensor.shape;
let rank = shape.len();
let mut as_tensors: Array<Tensor<T>> = array![];
let mut axes: Array<usize> = array![];
let mut idx: usize = 0;
while idx != rank {
axes.append(idx);
idx += 1;
};
idx = 0;
let axis_len: usize = *shape.at(axis);
while idx != axis_len {
let mut starts: Array<usize> = array![];
let mut ends: Array<usize> = array![];
let mut i: usize = 0;
while i != rank {
starts.append(if i == axis {
idx
} else {
0
});
ends.append(if i == axis {
idx + 1
} else {
*shape.at(i)
});
i += 1;
};
let sub_tensor: Tensor<T> = tensor
.slice(
starts: starts.span(),
ends: ends.span(),
axes: Option::Some(axes.span()),
steps: Option::None(())
);
as_tensors.append(sub_tensor);
idx += 1;
};
as_tensors
}
fn span_cmp<T, |
+Drop<T>, +Copy<T>, +PartialEq<T>, +PartialOrd<T>>(
lhs: Span<T>, rhs: Span<T>
) -> i8 {
let mut rhs = rhs;
let mut lhs = lhs;
let mut ret: i8 = 0;
loop {
match lhs.pop_front() {
Option::Some(l) => {
match rhs.pop_front() {
Option::Some(r) => { if l != r {
ret = if *l > *r {
1
} else {
-1
};
break;
} },
Option::None => {
ret = 1;
break;
},
}
},
Option::None => {
ret = -1;
break;
}
};
};
ret
}
impl SpanPartialOrd<T, +Drop<T>, +Copy<T>, +PartialEq<T>, +PartialOrd<T>> of PartialOrd<Span<T>> {
fn ge(lhs: Span<T>, rhs: Span<T>) -> bool {
span_cmp(lhs, rhs) >= 0
}
fn gt(lhs: Span<T>, rhs: Span<T>) -> bool {
span_cmp(lhs, rhs) > 0
}
fn le(lhs: Span<T>, rhs: Span<T>) -> bool {
span_cmp(lhs, rhs) <= 0
}
fn lt(lhs: Span<T>, rhs: Span<T>) -> bool {
span_cmp(lhs, rhs) < 0
}
}
fn optional_has_element<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
x: Option<Tensor<T>>
) -> Tensor<bool> {
match x {
Option::Some => {
let mut shape: Array<usize> = array![];
shape.append(1);
let mut data: Array<bool> = array![];
data.append(true);
TensorTrait::new(shape.span(), data.span())
},
Option::None => {
let mut shape: Array<usize> = array![];
shape.append(1);
let mut data: Array<bool> = array![];
data.append(false);
TensorTrait::new(shape.span(), data.span())
}
}
}
fn optional_get_element<T, +Copy<T>, +Drop<T>, +TensorTrait<T>,>(
x: Option<Tensor<T>>
) -> Ten |
sor<T> {
match x {
Option::Some(ele) => { ele },
Option::None => { panic(array!['The input is an empty', 'optional-type.']) }
}
} |
mod tensor_bool;
mod tensor_u32;
mod tensor_i8;
mod tensor_i32;
mod tensor_fp8x23;
mod tensor_fp16x16;
mod tensor_fp64x64;
mod tensor_fp32x32;
mod tensor_fp16x16wide;
mod tensor_fp8x23wide;
mod tensor_complex64;
|
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{
constant_of_shape, new_tensor, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_ops, ml, manipulation};
use orion::numbers::{NumberTrait};
use orion::operators::tensor::implementations::tensor_u32::U32Tensor;
impl BoolTensor of TensorTrait<bool> {
fn new(shape: Span<usize>, data: Span<bool>) -> Tensor<bool> {
new_tensor(shape, data)
}
fn at(self: @Tensor<bool>, indices: Span<usize>) -> bool {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<bool>, rhs: Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn sub(lhs: Tensor<bool>, rhs: Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn mul(lhs: Tensor<bool>, rhs: Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn div(lhs: Tensor<bool>, rhs: Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn min_in_tensor(self: @Tensor<bool>) -> bool {
panic(array!['not supported!'])
}
fn min(tensors: Span<Tensor<bool>>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn max_in_tensor(self: @Tensor<bool>) -> bool {
panic(array!['not supported!'])
}
fn max(tensors: Span<Tensor<bool>>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn stride(self: @Tensor<bool>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<bool>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<bool>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<bool>, target_shape: Span<i32>, allowzero: bool) -> Tensor<bool> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<boo |
l>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_prod(self: @Tensor<bool>, axis: usize, keepdims: bool) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn argmax(
self: @Tensor<bool>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn argmin(
self: @Tensor<bool>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn transpose(self: @Tensor<bool>, axes: Span<usize>) -> Tensor<bool> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn exp(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn log(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn equal(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn greater_equal(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn less(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn less_equal(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn abs(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn neg(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn ceil(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn |
sin(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn cos(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn asin(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn cumsum(
self: @Tensor<bool>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn flatten(self: @Tensor<bool>, axis: usize) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn sinh(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn tanh(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn cosh(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn acosh(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn asinh(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn atan(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn xor(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn or(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn acos(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn onehot(
self: @Tensor<bool>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn sqrt(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn concat(tensors: Span<Tensor<bool>>, axis: usize,) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn quantize_linear(
self: @Tensor<bool>, y_scale: @Tensor<bool>, y_zero_point: @Tensor<bool>
) -> Tensor::<i8> {
panic(array!['not s |
upported!'])
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<bool>, x_zero_point: @Tensor<bool>
) -> Tensor::<bool> {
panic(array!['not supported!'])
}
fn slice(
self: @Tensor<bool>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<bool> {
core_ops::slice::<bool>(self, starts, ends, axes, steps)
}
fn gather(self: @Tensor<bool>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<bool> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<bool>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn squeeze(self: @Tensor<bool>, axes: Option<Span<usize>>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn unsqueeze(self: @Tensor<bool>, axes: Span<usize>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn sign(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn clip(self: @Tensor<bool>, min: Option<bool>, max: Option<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<bool>) -> Tensor<bool> {
core_ops::identity(self)
}
fn where(self: @Tensor<bool>, x: @Tensor<bool>, y: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<bool>,
a_zero_point: @Tensor<bool>,
b: @Tensor<i8>,
b_scale: @Tensor<bool>,
b_zero_point: @Tensor<bool>,
y_scale: @Tensor<bool>,
y_zero_point: @Tensor<bool>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn not(self: @Tensor<bool>) -> Tensor<bool> {
math::not::not(*self)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @ |
Tensor<bool>,
a_zero_point: @Tensor<bool>,
b: @Tensor<i8>,
b_scale: @Tensor<bool>,
b_zero_point: @Tensor<bool>,
y_scale: @Tensor<bool>,
y_zero_point: @Tensor<bool>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<bool>,
a_zero_point: @Tensor<bool>,
b: @Tensor<i8>,
b_scale: @Tensor<bool>,
b_zero_point: @Tensor<bool>,
y_scale: @Tensor<bool>,
y_zero_point: @Tensor<bool>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<bool>>,
zero_points: Span<Tensor<bool>>,
y_scale: @Tensor<bool>,
y_zero_point: @Tensor<bool>,
axis: usize
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<bool>, a_zero_point: @Tensor<bool>, alpha: bool,
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn round(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn scatter(
self: @Tensor<bool>,
updates: Tensor<bool>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn trilu(self: @Tensor<bool>, upper: bool, k: i64) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn bitwise_and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn bitwise_xor(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn bitwise_or(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_l1(self: @Tensor<bool>, axis: usize, keepdims: bool) -> Tensor<bool> { |
panic(array!['not supported!'])
}
fn reduce_l2(self: @Tensor<bool>, axis: usize, keepdims: bool) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_sum_square(self: @Tensor<bool>, axis: usize, keepdims: bool) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn constant_of_shape(shape: Span<usize>, value: bool) -> Tensor<bool> {
constant_of_shape(shape, value)
}
fn gather_elements(
self: @Tensor<bool>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<bool> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(self: Tensor<bool>, bias: Option<bool>, lambd: Option<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_mean(
self: @Tensor<bool>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn binarizer(self: @Tensor<bool>, threshold: Option<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn array_feature_extractor(self: @Tensor<bool>, indices: Tensor<usize>) -> Tensor<bool> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn reduce_min(
self: @Tensor<bool>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn pow(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn is_inf(
self: @Tensor<bool>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn is_nan(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_log_sum(self: |
@Tensor<bool>, axis: usize, keepdims: bool) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_log_sum_exp(self: @Tensor<bool>, axis: usize, keepdims: bool) -> Tensor<bool> {
panic(array!['not supported'])
}
fn unique(
self: @Tensor<bool>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<bool>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
panic(array!['not supported!'])
}
fn gather_nd(
self: @Tensor<bool>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<bool> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn compress(
self: @Tensor<bool>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<bool> {
math::compress::compress(self, condition, axis)
}
fn layer_normalization(
self: @Tensor<bool>,
scale: @Tensor<bool>,
B: Option<@Tensor<bool>>,
axis: Option<i32>,
epsilon: Option<bool>,
stash_type: Option<usize>,
) -> (Tensor<bool>, Tensor<bool>, Tensor<bool>) {
panic(array!['not supported!'])
}
fn resize(
self: @Tensor<bool>,
roi: Option<Tensor<bool>>,
scales: Option<Span<bool>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<bool>,
exclude_outside: Option<bool>,
extrapolation_value: Option<bool>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn split(
self: @Tensor<bool>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<bool>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn split_to_sequence( |
self: @Tensor<bool>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<bool>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<bool>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<bool> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<bool>) -> Option<Tensor<bool>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<bool>
) -> (Tensor::<u32>, Tensor::<bool>, Tensor<bool>) {
panic(array!['not supported!'])
}
fn scatter_nd(
self: @Tensor<bool>, updates: Tensor<bool>, indices: Tensor<usize>, reduction: Option<usize>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn range(start: bool, end: bool, step: bool) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn hann_window(size: bool, periodic: Option<usize>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn hamming_window(size: bool, periodic: Option<usize>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn blackman_window(size: bool, periodic: Option<usize>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn random_uniform_like(
tensor: @Tensor<bool>, high: Option<bool>, low: Option<bool>, seed: Option<usize>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn label_encoder(
self: @Tensor<bool>,
default_list: Option<Span<bool>>,
default_tensor: Option<Tensor<bool>>,
keys: Option<Span<bool>>,
keys_tensor: Option<Tensor<bool>>,
values: Option<Span<bool>>,
values_tensor: Option<Tensor<bool>>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
}
impl BoolTensorPartialEq of P |
artialEq<Tensor<bool>> {
fn eq(lhs: @Tensor<bool>, rhs: @Tensor<bool>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<bool>, rhs: @Tensor<bool>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl BoolTryIntobool of TryInto<bool, bool> {
fn try_into(self: bool) -> Option<bool> {
Option::Some(self)
}
}
fn tensor_eq(mut lhs: Tensor<bool>, mut rhs: Tensor<bool>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0
&& is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0
&& is_eq {
is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap();
};
is_eq
} |
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait, FP64x64, FP64x64Impl};
use orion::numbers::fixed_point::implementations::fp64x64::core::ONE;
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
use orion::numbers::complex_number::complex_trait::ComplexTrait;
use orion::numbers::complex_number::complex64::{Complex64Impl, complex64};
impl Complex64Tensor of TensorTrait<complex64> {
fn new(shape: Span<usize>, data: Span<complex64>) -> Tensor<complex64> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: complex64) -> Tensor<complex64> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<complex64>, indices: Span<usize>) -> complex64 {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<complex64>) -> complex64 {
panic(array!['not supported!'])
}
fn min(tensors: Span<Tensor<complex64>>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn max_in_tensor(self: @Tensor<complex64>) -> complex64 {
panic(array!['not supported!'])
}
fn max(tensors: Span<Tensor<complex64>>) -> Tensor<complex64> {
pani |
c(array!['not supported!'])
}
fn stride(self: @Tensor<complex64>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<complex64>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<complex64>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(
self: @Tensor<complex64>, target_shape: Span<i32>, allowzero: bool
) -> Tensor<complex64> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<complex64>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<complex64> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<complex64>, axis: usize, keepdims: bool) -> Tensor<complex64> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<complex64>,
axis: i32,
keepdims: Option<bool>,
select_last_index: Option<bool>
) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn argmin(
self: @Tensor<complex64>,
axis: usize,
keepdims: Option<bool>,
select_last_index: Option<bool>
) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn transpose(self: @Tensor<complex64>, axes: Span<usize>) -> Tensor<complex64> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<complex64> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<complex64>) -> Tensor<complex64> {
math::exp::exp(*self)
}
fn log(self: @Tensor<complex64>) -> Tensor<complex64> {
math::log::log(*self)
}
fn equal(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(sel |
f: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn greater_equal(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn less(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn less_equal(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn abs(self: @Tensor<complex64>) -> Tensor<complex64> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn ceil(self: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn sin(self: @Tensor<complex64>) -> Tensor<complex64> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<complex64>) -> Tensor<complex64> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<complex64>) -> Tensor<complex64> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<complex64>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<complex64> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<complex64>, axis: usize) -> Tensor<complex64> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<complex64>) -> Tensor<complex64> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<complex64>) -> Tensor<complex64> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<complex64>) -> Tensor<complex64> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<complex64>) -> Tensor<complex64> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<complex64>) -> Tensor<complex64> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<complex64>) -> Tensor<complex64> {
math::atan::at |
an(*self)
}
fn xor(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn or(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<usize> {
panic(array!['not supported!'])
}
fn acos(self: @Tensor<complex64>) -> Tensor<complex64> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<complex64>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn sqrt(self: @Tensor<complex64>) -> Tensor<complex64> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<complex64>>, axis: usize,) -> Tensor<complex64> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<complex64>, y_scale: @Tensor<complex64>, y_zero_point: @Tensor<complex64>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<complex64>, x_zero_point: @Tensor<complex64>
) -> Tensor::<complex64> {
panic(array!['not supported!'])
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<complex64>,
a_zero_point: @Tensor<complex64>,
b: @Tensor<i8>,
b_scale: @Tensor<complex64>,
b_zero_point: @Tensor<complex64>,
y_scale: @Tensor<complex64>,
y_zero_point: @Tensor<complex64>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<complex64>,
a_zero_point: @Tensor<complex64>,
b: @Tensor<i8>,
b_scale: @Tensor<complex64>,
b_zero_point: @Tensor<complex64>,
y_scale: @Tensor<complex64>,
y_zero_point: @Tensor<complex64>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<complex64>,
a_zero_point: @Tensor |
<complex64>,
b: @Tensor<i8>,
b_scale: @Tensor<complex64>,
b_zero_point: @Tensor<complex64>,
y_scale: @Tensor<complex64>,
y_zero_point: @Tensor<complex64>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<complex64>>,
zero_points: Span<Tensor<complex64>>,
y_scale: @Tensor<complex64>,
y_zero_point: @Tensor<complex64>,
axis: usize
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_leakyrelu(
self: @Tensor<i8>,
a_scale: @Tensor<complex64>,
a_zero_point: @Tensor<complex64>,
alpha: complex64
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn slice(
self: @Tensor<complex64>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<complex64> {
core_tensor::slice::<complex64>(self, starts, ends, axes, steps)
}
fn gather(
self: @Tensor<complex64>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<complex64> {
math::gather::gather(self, indices, axis)
}
fn gather_nd(
self: @Tensor<complex64>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<complex64> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn nonzero(self: @Tensor<complex64>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<complex64>, axes: Option<Span<usize>>) -> Tensor<complex64> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<complex64>, axes: Span<usize>) -> Tensor<complex64> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn clip(
self: @Tensor<complex64>, min: Option<complex64>, max: Option<complex64> |
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<complex64>) -> Tensor<complex64> {
core_tensor::identity(self)
}
fn where(
self: @Tensor<complex64>, x: @Tensor<complex64>, y: @Tensor<complex64>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn bitwise_and(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn bitwise_xor(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn bitwise_or(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn round(self: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn reduce_l1(self: @Tensor<complex64>, axis: usize, keepdims: bool) -> Tensor<complex64> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn array_feature_extractor(
self: @Tensor<complex64>, indices: Tensor<usize>
) -> Tensor<complex64> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<complex64>, threshold: Option<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn reduce_sum_square(
self: @Tensor<complex64>, axis: usize, keepdims: bool
) -> Tensor<complex64> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<complex64>, axis: usize, keepdims: bool) -> Tensor<complex64> {
math::reduce_l2::reduce_l2_complex(self, axis, keepdims)
}
fn trilu(self: @Tensor<complex64>, upper: bool, k: i64) -> Tensor<complex64> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<complex64 |
>,
updates: Tensor<complex64>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn not(self: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<complex64>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<complex64> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(
self: Tensor<complex64>, bias: Option<complex64>, lambd: Option<complex64>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn reduce_mean(
self: @Tensor<complex64>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<complex64> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<complex64>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn pow(self: @Tensor<complex64>, other: @Tensor<complex64>) -> Tensor<complex64> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<complex64>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn is_nan(self: @Tensor<complex64>) -> Tensor<bool> {
panic(array!['not supported!'])
}
fn reduce_log_sum(self: @Tensor<complex64>, axis: usize, keepdims: bool) -> Tensor<complex64> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn erf(self: @Tensor<complex64>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn unique(
self: @Tensor<complex64>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<complex64>, Tensor<i32>, Tensor< |
i32>, Tensor<i32>) {
panic(array!['not supported!'])
}
fn compress(
self: @Tensor<complex64>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<complex64> {
math::compress::compress(self, condition, axis)
}
fn reduce_log_sum_exp(
self: @Tensor<complex64>, axis: usize, keepdims: bool
) -> Tensor<complex64> {
math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims)
}
fn layer_normalization(
self: @Tensor<complex64>,
scale: @Tensor<complex64>,
B: Option<@Tensor<complex64>>,
axis: Option<i32>,
epsilon: Option<complex64>,
stash_type: Option<usize>,
) -> (Tensor<complex64>, Tensor<complex64>, Tensor<complex64>) {
panic(array!['not supported!'])
}
fn split(
self: @Tensor<complex64>,
axis: usize,
num_outputs: Option<usize>,
spl: Option<Tensor<usize>>
) -> Array<Tensor<complex64>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn reverse_sequence(
self: @Tensor<complex64>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<complex64> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn resize(
self: @Tensor<complex64>,
roi: Option<Tensor<complex64>>,
scales: Option<Span<complex64>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<complex64>,
exclude_outside: Option<bool>,
extrapolation_value: Option<complex64>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<complex64> {
panic(array!['not support |
ed!'])
}
fn random_uniform_like(
tensor: @Tensor<complex64>,
high: Option<complex64>,
low: Option<complex64>,
seed: Option<usize>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn range(start: complex64, end: complex64, step: complex64) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn hann_window(size: complex64, periodic: Option<usize>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn hamming_window(size: complex64, periodic: Option<usize>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn blackman_window(size: complex64, periodic: Option<usize>) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn split_to_sequence(
self: @Tensor<complex64>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<complex64>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn optional(self: @Tensor<complex64>) -> Option<Tensor<complex64>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<complex64>
) -> (Tensor::<u32>, Tensor::<complex64>, Tensor<complex64>) {
panic(array!['not supported!'])
}
fn scatter_nd(
self: @Tensor<complex64>,
updates: Tensor<complex64>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
fn label_encoder(
self: @Tensor<complex64>,
default_list: Option<Span<complex64>>,
default_tensor: Option<Tensor<complex64>>,
keys: Option<Span<complex64>>,
keys_tensor: Option<Tensor<complex64>>,
values: Option<Span<complex64>>,
values_tensor: Option<Tensor<complex64>>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}
}
impl Complex64TensorAdd of Add<Tensor<complex64>> { |
fn add(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::add(@lhs, @rhs)
}
}
impl Complex64TensorSub of Sub<Tensor<complex64>> {
fn sub(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::sub(@lhs, @rhs)
}
}
impl Complex64TensorMul of Mul<Tensor<complex64>> {
fn mul(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::mul(@lhs, @rhs)
}
}
impl Complex64TensorDiv of Div<Tensor<complex64>> {
fn div(lhs: Tensor<complex64>, rhs: Tensor<complex64>) -> Tensor<complex64> {
math::arithmetic::div(@lhs, @rhs)
}
}
impl Complex64TensorPartialEq of PartialEq<Tensor<complex64>> {
fn eq(lhs: @Tensor<complex64>, rhs: @Tensor<complex64>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<complex64>, rhs: @Tensor<complex64>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
fn eq(lhs: @complex64, rhs: @complex64) -> bool {
let eq = (*lhs.real == *rhs.real) && (*lhs.img == *rhs.img);
eq
}
fn tensor_eq(mut lhs: Tensor<complex64>, mut rhs: Tensor<complex64>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0
&& is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0
&& is_eq {
is_eq = eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
} |
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait, FP16x16, I8IntoFP16x16};
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
use orion::numbers::fixed_point::implementations::fp16x16::math::trig::PI;
use orion::numbers::fixed_point::implementations::fp16x16wide::core::FP16x16W;
impl FP16x16Tensor of TensorTrait<FP16x16> {
fn new(shape: Span<usize>, data: Span<FP16x16>) -> Tensor<FP16x16> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: FP16x16) -> Tensor<FP16x16> {
constant_of_shape(shape, value)
}
fn add(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::div(@lhs, @rhs)
}
fn at(self: @Tensor<FP16x16>, indices: Span<usize>) -> FP16x16 {
*at_tensor(self, indices)
}
fn min_in_tensor(self: @Tensor<FP16x16>) -> FP16x16 {
math::min_in_tensor::min_in_tensor::<FP16x16, u32>(*self.data)
}
fn min(tensors: Span<Tensor<FP16x16>>) -> Tensor<FP16x16> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<FP16x16>) -> FP16x16 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<FP16x16>>) -> Tensor<FP16x16> {
math::max::max(tens |
ors)
}
fn stride(self: @Tensor<FP16x16>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<FP16x16>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<FP16x16>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<FP16x16>, target_shape: Span<i32>, allowzero: bool) -> Tensor<FP16x16> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<FP16x16>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<FP16x16>, axis: usize, keepdims: bool) -> Tensor<FP16x16> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<FP16x16>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<FP16x16>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<FP16x16>, axes: Span<usize>) -> Tensor<FP16x16> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<FP16x16> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::exp::exp(*self)
}
fn log(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::log::log(*self)
}
fn equal(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<usize> { |
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::ceil::ceil(*self)
}
fn sin(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<FP16x16>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<FP16x16> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<FP16x16>, axis: usize) -> Tensor<FP16x16> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::atan::atan(*self)
}
fn xor(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<usize> {
math::xor::xor(self, other) |
}
fn or(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<FP16x16>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<FP16x16> {
math::onehot::onehot(self, depth, axis, values)
}
fn sqrt(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<FP16x16>>, axis: usize,) -> Tensor<FP16x16> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<FP16x16>, y_scale: @Tensor<FP16x16>, y_zero_point: @Tensor<FP16x16>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(127, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<FP16x16>, x_zero_point: @Tensor<FP16x16>
) -> Tensor::<FP16x16> {
quantization::dequantize_linear::dequantize_linear(self, x_scale, x_zero_point)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16>,
a_zero_point: @Tensor<FP16x16>,
b: @Tensor<i8>,
b_scale: @Tensor<FP16x16>,
b_zero_point: @Tensor<FP16x16>,
y_scale: @Tensor<FP16x16>,
y_zero_point: @Tensor<FP16x16>
) -> Tensor::<i8> {
quantization::qlinear_add::qlinear_add(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16>,
a_zero_point: @Tensor<FP16x16>,
b: @Te |
nsor<i8>,
b_scale: @Tensor<FP16x16>,
b_zero_point: @Tensor<FP16x16>,
y_scale: @Tensor<FP16x16>,
y_zero_point: @Tensor<FP16x16>
) -> Tensor::<i8> {
quantization::qlinear_mul::qlinear_mul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16>,
a_zero_point: @Tensor<FP16x16>,
b: @Tensor<i8>,
b_scale: @Tensor<FP16x16>,
b_zero_point: @Tensor<FP16x16>,
y_scale: @Tensor<FP16x16>,
y_zero_point: @Tensor<FP16x16>
) -> Tensor::<i8> {
quantization::qlinear_matmul::qlinear_matmul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<FP16x16>>,
zero_points: Span<Tensor<FP16x16>>,
y_scale: @Tensor<FP16x16>,
y_zero_point: @Tensor<FP16x16>,
axis: usize
) -> Tensor::<i8> {
quantization::qlinear_concat::qlinear_concat(
tensors,
scales,
zero_points,
y_scale,
y_zero_point,
axis,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<FP16x16>, a_zero_point: @Tensor<FP16x16>, alpha: FP16x16
) -> Tensor::<i8> {
quantization::qlinear_leakyrelu::qlinear_leakyrelu(
self,
a_scale,
a_zero_point,
alpha, |
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn slice(
self: @Tensor<FP16x16>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<FP16x16> {
core_tensor::slice::<FP16x16>(self, starts, ends, axes, steps)
}
fn gather(
self: @Tensor<FP16x16>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP16x16> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<FP16x16>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<FP16x16>, axes: Option<Span<usize>>) -> Tensor<FP16x16> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<FP16x16>, axes: Span<usize>) -> Tensor<FP16x16> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<FP16x16>, min: Option<FP16x16>, max: Option<FP16x16>) -> Tensor<FP16x16> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
core_tensor::identity(self)
}
fn where(self: @Tensor<FP16x16>, x: @Tensor<FP16x16>, y: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
m |
ath::round::round(*self)
}
fn reduce_l1(self: @Tensor<FP16x16>, axis: usize, keepdims: bool) -> Tensor<FP16x16> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn array_feature_extractor(self: @Tensor<FP16x16>, indices: Tensor<usize>) -> Tensor<FP16x16> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<FP16x16>, threshold: Option<FP16x16>) -> Tensor<FP16x16> {
math::binarizer::binarizer(*self, threshold)
}
fn reduce_sum_square(self: @Tensor<FP16x16>, axis: usize, keepdims: bool) -> Tensor<FP16x16> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<FP16x16>, axis: usize, keepdims: bool) -> Tensor<FP16x16> {
math::reduce_l2::reduce_l2(self, axis, keepdims)
}
fn trilu(self: @Tensor<FP16x16>, upper: bool, k: i64) -> Tensor<FP16x16> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<FP16x16>,
updates: Tensor<FP16x16>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<FP16x16> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn not(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<FP16x16>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP16x16> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(
self: Tensor<FP16x16>, bias: Option<FP16x16>, lambd: Option<FP16x16>
) -> Tensor<FP16x16> {
math::shrink::shrink(self, bias, lambd)
}
fn reduce_mean(
self: @Tensor<FP16x16>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min( |
self: @Tensor<FP16x16>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<FP16x16>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<FP16x16>) -> Tensor<bool> {
math::is_nan::is_nan(self)
}
fn gather_nd(
self: @Tensor<FP16x16>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<FP16x16> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<FP16x16>, axis: usize, keepdims: bool) -> Tensor<FP16x16> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn reduce_log_sum_exp(self: @Tensor<FP16x16>, axis: usize, keepdims: bool) -> Tensor<FP16x16> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::erf::erf(*self)
}
fn unique(
self: @Tensor<FP16x16>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<FP16x16>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn layer_normalization(
self: @Tensor<FP16x16>,
scale: @Tensor<FP16x16>,
B: Option<@Tensor<FP16x16>>,
axis: Option<i32>,
epsilon: Option<FP16x16>,
stash_type: Option<usize>,
) -> (Tensor<FP16x16>, Tensor<FP16x16>, Tensor<FP16x16>) {
math::layer_normalization::layer_normalization(self, scale, B, axis, epsilon, stash_type)
}
fn resize(
self: @Tensor<FP16x16>,
roi: Option<Tensor<FP16x16>>,
scales: Option<Span<FP16x16>>,
sizes: Option<Span |
<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<FP16x16>,
exclude_outside: Option<bool>,
extrapolation_value: Option<FP16x16>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<FP16x16> {
math::resize::resize(
self,
roi,
scales,
sizes,
antialias,
axes,
coordinate_transformation_mode,
cubic_coeff_a,
exclude_outside,
extrapolation_value,
keep_aspect_ratio_policy,
mode,
nearest_mode
)
}
fn compress(
self: @Tensor<FP16x16>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<FP16x16> {
math::compress::compress(self, condition, axis)
}
fn split(
self: @Tensor<FP16x16>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<FP16x16>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<FP16x16>, high: Option<FP16x16>, low: Option<FP16x16>, seed: Option<usize>
) -> Tensor<FP16x16> {
math::random_uniform_like::random_uniform_like(*tensor, high, low, seed)
}
fn range(start: FP16x16, end: FP16x16, step: FP16x16) -> Tensor<FP16x16> {
math::range::range(start, end, step)
}
fn hann_window(size: FP16x16, periodic: Option<usize>) -> Tensor<FP16x16> {
math::hann_window::hann_window(size, FP16x16 { mag: PI, sign: false }, periodic)
}
fn hamming_window(size: FP16x16, periodic: Option<usize>) -> Tensor<FP16x16> {
math::hamming_window::hamming_window(size, FP16x16 { mag: PI, sign: false }, periodic)
}
fn blackman_window(size: FP16x16, |
periodic: Option<usize>) -> Tensor<FP16x16> {
math::blackman_window::blackman_window(size, FP16x16 { mag: PI, sign: false }, periodic)
}
fn split_to_sequence(
self: @Tensor<FP16x16>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<FP16x16>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<FP16x16>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<FP16x16> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<FP16x16>) -> Option<Tensor<FP16x16>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<FP16x16>
) -> (Tensor::<u32>, Tensor::<FP16x16>, Tensor<FP16x16>) {
quantization::dynamic_quantize_linear::dynamic_quantize_linear(
self,
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(255, false),
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(1, false),
)
}
fn scatter_nd(
self: @Tensor<FP16x16>,
updates: Tensor<FP16x16>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<FP16x16> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<FP16x16>,
default_list: Option<Span<FP16x16>>,
default_tensor: Option<Tensor<FP16x16>>,
keys: Option<Span<FP16x16>>,
keys_tensor: Option<Tensor<FP16x16>>,
values: Option<Span<FP16x16>>,
values_tensor: Option<Tensor<FP16x16>>
) -> Tensor<FP16x16> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
impl FP16x16TensorAdd of Add<Tensor<FP16x16>> { |
fn add(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::add(@lhs, @rhs)
}
}
impl FP16x16TensorSub of Sub<Tensor<FP16x16>> {
fn sub(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::sub(@lhs, @rhs)
}
}
impl FP16x16TensorMul of Mul<Tensor<FP16x16>> {
fn mul(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::mul(@lhs, @rhs)
}
}
impl FP16x16TensorDiv of Div<Tensor<FP16x16>> {
fn div(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> Tensor<FP16x16> {
math::arithmetic::div(@lhs, @rhs)
}
}
impl FP16x16TensorPartialEq of PartialEq<Tensor<FP16x16>> {
fn eq(lhs: @Tensor<FP16x16>, rhs: @Tensor<FP16x16>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<FP16x16>, rhs: @Tensor<FP16x16>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl TensorI8IntoTensorFP16x16 of Into<Tensor<i8>, Tensor<FP16x16>> {
fn into(self: Tensor<i8>) -> Tensor<FP16x16> {
tensor_i8_to_tensor_fp16x16(@self)
}
}
impl FP16x16TensorPartialOrd of PartialOrd<Tensor<FP16x16>> {
fn ge(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
fn gt(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
fn le(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
fn lt(lhs: Tensor<FP16x16>, rhs: Tensor<FP16x16>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
const PRECISION: u32 = 589;
fn relative_eq(lhs: @FP16x16, rhs: @FP16x16) -> bool {
let diff = *lhs - *rhs;
let rel_diff = if *lhs.mag != 0 {
(diff / *lhs).mag
} else {
diff.mag
};
rel_diff <= |
PRECISION
}
fn tensor_eq(mut lhs: Tensor<FP16x16>, mut rhs: Tensor<FP16x16>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0 && is_eq {
is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
}
fn tensor_i8_to_tensor_fp16x16(x: @Tensor<i8>) -> Tensor<FP16x16> {
let mut result_data = ArrayTrait::<FP16x16>::new();
let mut data = *x.data;
while data.len() != 0 {
result_data.append((*data.pop_front().unwrap()).into());
};
TensorTrait::new(*x.shape, result_data.span())
} |
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait, FP16x16W};
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
use orion::numbers::fixed_point::implementations::fp16x16wide::math::trig::PI;
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
FP16x16WImpl, FP16x16WTryIntoFP16x16, FP16x16IntoFP16x16W
};
use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16;
impl FP16x16WTensor of TensorTrait<FP16x16W> {
fn new(shape: Span<usize>, data: Span<FP16x16W>) -> Tensor<FP16x16W> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: FP16x16W) -> Tensor<FP16x16W> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<FP16x16W>, indices: Span<usize>) -> FP16x16W {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<FP16x16W>) -> FP16x16W {
math::min_in_tensor::min_in_tensor::<FP16x16W, u64>(*self.data)
}
fn min(tensors: Span<Tensor<FP16x16W>>) -> Tensor<FP16x16W> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<FP16x16W>) -> FP16x16W { |
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<FP16x16W>>) -> Tensor<FP16x16W> {
math::max::max(tensors)
}
fn stride(self: @Tensor<FP16x16W>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_index(self: @Tensor<FP16x16W>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<FP16x16W>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<FP16x16W>, target_shape: Span<i32>, allowzero: bool) -> Tensor<FP16x16W> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<FP16x16W>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16W> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<FP16x16W>, axis: usize, keepdims: bool) -> Tensor<FP16x16W> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<FP16x16W>,
axis: i32,
keepdims: Option<bool>,
select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<FP16x16W>,
axis: usize,
keepdims: Option<bool>,
select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<FP16x16W>, axes: Span<usize>) -> Tensor<FP16x16W> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::exp::exp(*self)
}
fn log(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::log::log(*self) |
}
fn equal(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::ceil::ceil(*self)
}
fn sin(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<FP16x16W>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<FP16x16W> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<FP16x16W>, axis: usize) -> Tensor<FP16x16W> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W |
> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::atan::atan(*self)
}
fn xor(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<FP16x16W>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<FP16x16W> {
panic(array!['not supported!'])
}
fn sqrt(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<FP16x16W>>, axis: usize,) -> Tensor<FP16x16W> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<FP16x16W>, y_scale: @Tensor<FP16x16W>, y_zero_point: @Tensor<FP16x16W>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<FP16x16W>, x_zero_point: @Tensor<FP16x16W>
) -> Tensor::<FP16x16W> {
panic(array!['not supported!'])
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16W>,
a_zero_point: @Tensor<FP16x16W>,
b: @Tensor<i8>,
b_scale: @Tensor<FP16x16W>,
b_zero_point: @Tensor<FP16x16W>,
y_scale: @Tensor<FP16x16W>,
y_zero_point: @Tensor<FP16x16W>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16W>,
a_zero_point: @Tensor<FP16x16W>,
b: @Tensor<i8>,
b_scale: @Tensor<FP16x16W>,
b_zero_point: @Tens |
or<FP16x16W>,
y_scale: @Tensor<FP16x16W>,
y_zero_point: @Tensor<FP16x16W>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16W>,
a_zero_point: @Tensor<FP16x16W>,
b: @Tensor<i8>,
b_scale: @Tensor<FP16x16W>,
b_zero_point: @Tensor<FP16x16W>,
y_scale: @Tensor<FP16x16W>,
y_zero_point: @Tensor<FP16x16W>
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<FP16x16W>>,
zero_points: Span<Tensor<FP16x16W>>,
y_scale: @Tensor<FP16x16W>,
y_zero_point: @Tensor<FP16x16W>,
axis: usize
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn qlinear_leakyrelu(
self: @Tensor<i8>,
a_scale: @Tensor<FP16x16W>,
a_zero_point: @Tensor<FP16x16W>,
alpha: FP16x16W
) -> Tensor::<i8> {
panic(array!['not supported!'])
}
fn slice(
self: @Tensor<FP16x16W>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<FP16x16W> {
core_tensor::slice::<FP16x16W>(self, starts, ends, axes, steps)
}
fn gather(
self: @Tensor<FP16x16W>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP16x16W> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<FP16x16W>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<FP16x16W>, axes: Option<Span<usize>>) -> Tensor<FP16x16W> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<FP16x16W>, axes: Span<usize>) -> Tensor<FP16x16W> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::sign::sign(*self)
}
fn clip(
self: @Tensor<FP16x16W>, min: Opt |
ion<FP16x16W>, max: Option<FP16x16W>
) -> Tensor<FP16x16W> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
core_tensor::identity(self)
}
fn where(
self: @Tensor<FP16x16W>, x: @Tensor<FP16x16W>, y: @Tensor<FP16x16W>
) -> Tensor<FP16x16W> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<FP16x16W>, axis: usize, keepdims: bool) -> Tensor<FP16x16W> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn trilu(self: @Tensor<FP16x16W>, upper: bool, k: i64) -> Tensor<FP16x16W> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<FP16x16W>,
updates: Tensor<FP16x16W>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<FP16x16W> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn array_feature_extractor(
self: @Tensor<FP16x16W>, indices: Tensor<usize>
) -> Tensor<FP16x16W> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<FP16x16W>, threshold: Option<FP16x16W>) -> Tensor<FP16x16W> {
math::binarizer::binarizer(*self, threshold)
}
fn reduce_sum_square(self: @Tensor<FP16x16W>, axis: usize, keepdim |
s: bool) -> Tensor<FP16x16W> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<FP16x16W>, axis: usize, keepdims: bool) -> Tensor<FP16x16W> {
math::reduce_l2::reduce_l2(self, axis, keepdims)
}
fn not(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<FP16x16W>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP16x16W> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(
self: Tensor<FP16x16W>, bias: Option<FP16x16W>, lambd: Option<FP16x16W>
) -> Tensor<FP16x16W> {
math::shrink::shrink(self, bias, lambd)
}
fn reduce_mean(
self: @Tensor<FP16x16W>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16W> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<FP16x16W>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16W> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<FP16x16W>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<FP16x16W>) -> Tensor<bool> {
math::is_nan::is_nan(self)
}
fn gather_nd(
self: @Tensor<FP16x16W>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<FP16x16W> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<FP16x16W>, axis: usize, keepdims: bool) -> Tensor<FP16x16W> {
mat |
h::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn reduce_log_sum_exp(
self: @Tensor<FP16x16W>, axis: usize, keepdims: bool
) -> Tensor<FP16x16W> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::erf::erf(*self)
}
fn unique(
self: @Tensor<FP16x16W>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<FP16x16W>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn compress(
self: @Tensor<FP16x16W>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<FP16x16W> {
math::compress::compress(self, condition, axis)
}
fn layer_normalization(
self: @Tensor<FP16x16W>,
scale: @Tensor<FP16x16W>,
B: Option<@Tensor<FP16x16W>>,
axis: Option<i32>,
epsilon: Option<FP16x16W>,
stash_type: Option<usize>,
) -> (Tensor<FP16x16W>, Tensor<FP16x16W>, Tensor<FP16x16W>) {
math::layer_normalization::layer_normalization(self, scale, B, axis, epsilon, stash_type)
}
fn resize(
self: @Tensor<FP16x16W>,
roi: Option<Tensor<FP16x16W>>,
scales: Option<Span<FP16x16W>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<FP16x16W>,
exclude_outside: Option<bool>,
extrapolation_value: Option<FP16x16W>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<FP16x16W> {
panic(array!['not supported!'])
}
fn split(
self: @Tensor<FP16x16W>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<FP16x16W>> {
manipulation::split::split(self, axis, num_outputs, spl) |
}
fn random_uniform_like(
tensor: @Tensor<FP16x16W>,
high: Option<FP16x16W>,
low: Option<FP16x16W>,
seed: Option<usize>
) -> Tensor<FP16x16W> {
math::random_uniform_like::random_uniform_like(*tensor, high, low, seed)
}
fn range(start: FP16x16W, end: FP16x16W, step: FP16x16W) -> Tensor<FP16x16W> {
math::range::range(start, end, step)
}
fn hann_window(size: FP16x16W, periodic: Option<usize>) -> Tensor<FP16x16W> {
math::hann_window::hann_window(size, FP16x16W { mag: PI, sign: false }, periodic)
}
fn hamming_window(size: FP16x16W, periodic: Option<usize>) -> Tensor<FP16x16W> {
math::hamming_window::hamming_window(size, FP16x16W { mag: PI, sign: false }, periodic)
}
fn blackman_window(size: FP16x16W, periodic: Option<usize>) -> Tensor<FP16x16W> {
math::blackman_window::blackman_window(size, FP16x16W { mag: PI, sign: false }, periodic)
}
fn split_to_sequence(
self: @Tensor<FP16x16W>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<FP16x16W>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<FP16x16W>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<FP16x16W> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<FP16x16W>) -> Option<Tensor<FP16x16W>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<FP16x16W>
) -> (Tensor::<u32>, Tensor::<FP16x16W>, Tensor<FP16x16W>) {
quantization::dynamic_quantize_linear::dynamic_quantize_linear(
self,
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(255, false),
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(1 |
, false),
)
}
fn scatter_nd(
self: @Tensor<FP16x16W>,
updates: Tensor<FP16x16W>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<FP16x16W> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<FP16x16W>,
default_list: Option<Span<FP16x16W>>,
default_tensor: Option<Tensor<FP16x16W>>,
keys: Option<Span<FP16x16W>>,
keys_tensor: Option<Tensor<FP16x16W>>,
values: Option<Span<FP16x16W>>,
values_tensor: Option<Tensor<FP16x16W>>
) -> Tensor<FP16x16W> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
impl FP16x16WTensorAdd of Add<Tensor<FP16x16W>> {
fn add(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::add(@lhs, @rhs)
}
}
impl FP16x16WTensorSub of Sub<Tensor<FP16x16W>> {
fn sub(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::sub(@lhs, @rhs)
}
}
impl FP16x16WTensorMul of Mul<Tensor<FP16x16W>> {
fn mul(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::mul(@lhs, @rhs)
}
}
impl FP16x16WTensorDiv of Div<Tensor<FP16x16W>> {
fn div(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::arithmetic::div(@lhs, @rhs)
}
}
impl FP16x16WTensorPartialEq of PartialEq<Tensor<FP16x16W>> {
fn eq(lhs: @Tensor<FP16x16W>, rhs: @Tensor<FP16x16W>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<FP16x16W>, rhs: @Tensor<FP16x16W>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl U32TryIntoU32 of TryInto<u32, u32> {
fn try_into(self: u32) -> Option<u32> {
Option |
::Some(self)
}
}
impl FP16x16WTensorPartialOrd of PartialOrd<Tensor<FP16x16W>> {
fn ge(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
fn gt(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
fn le(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
fn lt(lhs: Tensor<FP16x16W>, rhs: Tensor<FP16x16W>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
const PRECISION: u64 = 589;
fn relative_eq(lhs: @FP16x16W, rhs: @FP16x16W) -> bool {
let diff = *lhs - *rhs;
let rel_diff = if *lhs.mag != 0 {
(diff / *lhs).mag
} else {
diff.mag
};
rel_diff <= PRECISION
}
fn tensor_eq(mut lhs: Tensor<FP16x16W>, mut rhs: Tensor<FP16x16W>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0 && is_eq {
is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
} |
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait, FP32x32, FP32x32Impl, I8IntoFP32x32};
use orion::numbers::fixed_point::implementations::fp32x32::core::ONE;
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
impl FP32x32Tensor of TensorTrait<FP32x32> {
fn new(shape: Span<usize>, data: Span<FP32x32>) -> Tensor<FP32x32> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: FP32x32) -> Tensor<FP32x32> {
constant_of_shape(shape, value)
}
fn add(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::div(@lhs, @rhs)
}
fn at(self: @Tensor<FP32x32>, indices: Span<usize>) -> FP32x32 {
*at_tensor(self, indices)
}
fn min_in_tensor(self: @Tensor<FP32x32>) -> FP32x32 {
math::min_in_tensor::min_in_tensor::<FP32x32, u64>(*self.data)
}
fn min(tensors: Span<Tensor<FP32x32>>) -> Tensor<FP32x32> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<FP32x32>) -> FP32x32 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<FP32x32>>) -> Tensor<FP32x32> {
math::max::max(tensors)
}
fn stride(self: @Tensor<FP32x32>) -> Span<usize> { |
stride(*self.shape)
}
fn ravel_index(self: @Tensor<FP32x32>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<FP32x32>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<FP32x32>, target_shape: Span<i32>, allowzero: bool) -> Tensor<FP32x32> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<FP32x32>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP32x32> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<FP32x32>, axis: usize, keepdims: bool) -> Tensor<FP32x32> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<FP32x32>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<FP32x32>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<FP32x32>, axes: Span<usize>) -> Tensor<FP32x32> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<FP32x32> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::exp::exp(*self)
}
fn log(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::log::log(*self)
}
fn equal(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: |
@Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::ceil::ceil(*self)
}
fn sin(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<FP32x32>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<FP32x32> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<FP32x32>, axis: usize) -> Tensor<FP32x32> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::atan::atan(*self)
}
fn xor(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Ten |
sor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<FP32x32>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<FP32x32> {
math::onehot::onehot(self, depth, axis, values)
}
fn sqrt(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<FP32x32>>, axis: usize,) -> Tensor<FP32x32> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<FP32x32>, y_scale: @Tensor<FP32x32>, y_zero_point: @Tensor<FP32x32>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<FP32x32>, x_zero_point: @Tensor<FP32x32>
) -> Tensor::<FP32x32> {
quantization::dequantize_linear::dequantize_linear(self, x_scale, x_zero_point)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<FP32x32>,
a_zero_point: @Tensor<FP32x32>,
b: @Tensor<i8>,
b_scale: @Tensor<FP32x32>,
b_zero_point: @Tensor<FP32x32>,
y_scale: @Tensor<FP32x32>,
y_zero_point: @Tensor<FP32x32>
) -> Tensor::<i8> {
quantization::qlinear_add::qlinear_add(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<FP32x32>,
a_zero_point: @Tensor<FP32x32>,
b: @Tensor<i8>,
b_scale: @Tensor<FP32x32>,
b_zero_point: @Tenso |
r<FP32x32>,
y_scale: @Tensor<FP32x32>,
y_zero_point: @Tensor<FP32x32>
) -> Tensor::<i8> {
quantization::qlinear_mul::qlinear_mul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<FP32x32>,
a_zero_point: @Tensor<FP32x32>,
b: @Tensor<i8>,
b_scale: @Tensor<FP32x32>,
b_zero_point: @Tensor<FP32x32>,
y_scale: @Tensor<FP32x32>,
y_zero_point: @Tensor<FP32x32>
) -> Tensor::<i8> {
quantization::qlinear_matmul::qlinear_matmul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<FP32x32>>,
zero_points: Span<Tensor<FP32x32>>,
y_scale: @Tensor<FP32x32>,
y_zero_point: @Tensor<FP32x32>,
axis: usize
) -> Tensor::<i8> {
quantization::qlinear_concat::qlinear_concat(
tensors,
scales,
zero_points,
y_scale,
y_zero_point,
axis,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<FP32x32>, a_zero_point: @Tensor<FP32x32>, alpha: FP32x32
) -> Tensor::<i8> {
quantization::qlinear_leakyrelu::qlinear_leakyrelu(
self,
a_scale,
a_zero_point,
alpha,
NumberTrait::new_unscaled(128, true),
NumberTrait:: |
new_unscaled(127, false)
)
}
fn slice(
self: @Tensor<FP32x32>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<FP32x32> {
core_tensor::slice::<FP32x32>(self, starts, ends, axes, steps)
}
fn gather(
self: @Tensor<FP32x32>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP32x32> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<FP32x32>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<FP32x32>, axes: Option<Span<usize>>) -> Tensor<FP32x32> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<FP32x32>, axes: Span<usize>) -> Tensor<FP32x32> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<FP32x32>, min: Option<FP32x32>, max: Option<FP32x32>) -> Tensor<FP32x32> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
core_tensor::identity(self)
}
fn where(self: @Tensor<FP32x32>, x: @Tensor<FP32x32>, y: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::round::round(*self)
}
fn trilu(self: @Tensor<FP32x32>, uppe |
r: bool, k: i64) -> Tensor<FP32x32> {
linalg::trilu::trilu(self, upper, k)
}
fn reduce_l1(self: @Tensor<FP32x32>, axis: usize, keepdims: bool) -> Tensor<FP32x32> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn scatter(
self: @Tensor<FP32x32>,
updates: Tensor<FP32x32>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<FP32x32> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn array_feature_extractor(self: @Tensor<FP32x32>, indices: Tensor<usize>) -> Tensor<FP32x32> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<FP32x32>, threshold: Option<FP32x32>) -> Tensor<FP32x32> {
math::binarizer::binarizer(*self, threshold)
}
fn reduce_sum_square(self: @Tensor<FP32x32>, axis: usize, keepdims: bool) -> Tensor<FP32x32> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<FP32x32>, axis: usize, keepdims: bool) -> Tensor<FP32x32> {
math::reduce_l2::reduce_l2(self, axis, keepdims)
}
fn not(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<FP32x32>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP32x32> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(
self: Tensor<FP32x32>, bias: Option<FP32x32>, lambd: Option<FP32x32>
) -> Tensor<FP32x32> {
math::shrink::shrink(self, bias, lambd)
}
fn reduce_mean(
self: @Tensor<FP32x32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP32x32> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<FP32x32>,
axes: Option<Span<usize>>,
keepd |
ims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP32x32> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<FP32x32>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<FP32x32>) -> Tensor<bool> {
math::is_nan::is_nan(self)
}
fn gather_nd(
self: @Tensor<FP32x32>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<FP32x32> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<FP32x32>, axis: usize, keepdims: bool) -> Tensor<FP32x32> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn reduce_log_sum_exp(self: @Tensor<FP32x32>, axis: usize, keepdims: bool) -> Tensor<FP32x32> {
math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims)
}
fn erf(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::erf::erf(*self)
}
fn unique(
self: @Tensor<FP32x32>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<FP32x32>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn layer_normalization(
self: @Tensor<FP32x32>,
scale: @Tensor<FP32x32>,
B: Option<@Tensor<FP32x32>>,
axis: Option<i32>,
epsilon: Option<FP32x32>,
stash_type: Option<usize>,
) -> (Tensor<FP32x32>, Tensor<FP32x32>, Tensor<FP32x32>) {
math::layer_normalization::layer_normalization(self, scale, B, axis, epsilon, stash_type)
}
fn resize(
self: @Tensor<FP32x32>,
roi: Option<Tensor<FP32x32>>,
scales: Option<Span<FP32x32>>,
sizes: Option<Span<usize>>,
antialias: Option<usi |
ze>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<FP32x32>,
exclude_outside: Option<bool>,
extrapolation_value: Option<FP32x32>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<FP32x32> {
math::resize::resize(
self,
roi,
scales,
sizes,
antialias,
axes,
coordinate_transformation_mode,
cubic_coeff_a,
exclude_outside,
extrapolation_value,
keep_aspect_ratio_policy,
mode,
nearest_mode
)
}
fn compress(
self: @Tensor<FP32x32>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<FP32x32> {
math::compress::compress(self, condition, axis)
}
fn split(
self: @Tensor<FP32x32>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<FP32x32>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<FP32x32>, high: Option<FP32x32>, low: Option<FP32x32>, seed: Option<usize>
) -> Tensor<FP32x32> {
math::random_uniform_like::random_uniform_like(*tensor, high, low, seed)
}
fn range(start: FP32x32, end: FP32x32, step: FP32x32) -> Tensor<FP32x32> {
math::range::range(start, end, step)
}
fn hann_window(size: FP32x32, periodic: Option<usize>) -> Tensor<FP32x32> {
panic(array!['not supported!'])
}
fn hamming_window(size: FP32x32, periodic: Option<usize>) -> Tensor<FP32x32> {
panic(array!['not supported!'])
}
fn blackman_window(size: FP32x32, periodic: Option<usize>) -> Tensor<FP32x32> {
panic(array!['not supported!'])
}
fn split_to_sequence(
self: @Tensor<F |
P32x32>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<FP32x32>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<FP32x32>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<FP32x32> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<FP32x32>) -> Option<Tensor<FP32x32>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<FP32x32>
) -> (Tensor::<u32>, Tensor::<FP32x32>, Tensor<FP32x32>) {
quantization::dynamic_quantize_linear::dynamic_quantize_linear(
self,
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(255, false),
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(1, false),
)
}
fn scatter_nd(
self: @Tensor<FP32x32>,
updates: Tensor<FP32x32>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<FP32x32> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<FP32x32>,
default_list: Option<Span<FP32x32>>,
default_tensor: Option<Tensor<FP32x32>>,
keys: Option<Span<FP32x32>>,
keys_tensor: Option<Tensor<FP32x32>>,
values: Option<Span<FP32x32>>,
values_tensor: Option<Tensor<FP32x32>>
) -> Tensor<FP32x32> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
impl FP32x32TensorAdd of Add<Tensor<FP32x32>> {
fn add(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::add(@lhs, @rhs)
}
}
impl FP32x32TensorSub of Sub<Ten |
sor<FP32x32>> {
fn sub(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::sub(@lhs, @rhs)
}
}
impl FP32x32TensorMul of Mul<Tensor<FP32x32>> {
fn mul(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::mul(@lhs, @rhs)
}
}
impl FP32x32TensorDiv of Div<Tensor<FP32x32>> {
fn div(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> Tensor<FP32x32> {
math::arithmetic::div(@lhs, @rhs)
}
}
impl FP32x32TensorPartialEq of PartialEq<Tensor<FP32x32>> {
fn eq(lhs: @Tensor<FP32x32>, rhs: @Tensor<FP32x32>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<FP32x32>, rhs: @Tensor<FP32x32>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl FP32x32TryIntoI8 of TryInto<FP32x32, i8> {
fn try_into(self: FP32x32) -> Option<i8> {
let number_felt: felt252 = (self.mag / ONE).into();
let number_i8: i8 = number_felt.try_into().unwrap();
if self.sign {
return Option::Some(number_i8 * -1_i8);
}
Option::Some(number_i8)
}
}
impl TensorI8IntoTensorFP32x32 of Into<Tensor<i8>, Tensor<FP32x32>> {
fn into(self: Tensor<i8>) -> Tensor<FP32x32> {
tensor_i8_to_tensor_fp32x32(@self)
}
}
impl FP32x32TensorPartialOrd of PartialOrd<Tensor<FP32x32>> {
fn ge(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> bool {
return SpanPartialOrd::ge(lhs.data, rhs.data);
}
fn gt(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> bool {
return SpanPartialOrd::gt(lhs.data, rhs.data);
}
fn le(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> bool {
return SpanPartialOrd::le(lhs.data, rhs.data);
}
fn lt(lhs: Tensor<FP32x32>, rhs: Tensor<FP32x32>) -> bool {
return SpanPartialOrd::lt(lhs.data, rhs.data);
}
}
const PRECISION: u64 = 75497;
fn relative_eq(lhs: @FP32x |
32, rhs: @FP32x32) -> bool {
let diff = *lhs - *rhs;
let rel_diff = if *lhs.mag != 0 {
(diff / *lhs).mag
} else {
diff.mag
};
rel_diff <= PRECISION
}
fn tensor_eq(mut lhs: Tensor<FP32x32>, mut rhs: Tensor<FP32x32>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.data.len() != 0 && is_eq {
is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
}
fn tensor_i8_to_tensor_fp32x32(x: @Tensor<i8>) -> Tensor<FP32x32> {
let mut result_data = ArrayTrait::<FP32x32>::new();
let mut data = *x.data;
while data.len() != 0 {
result_data.append((*data.pop_front().unwrap()).into());
};
TensorTrait::new(*x.shape, result_data.span())
} |
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation};
use orion::numbers::{NumberTrait, FP64x64, FP64x64Impl, I8IntoFP64x64};
use orion::numbers::fixed_point::implementations::fp64x64::core::ONE;
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
impl FP64x64Tensor of TensorTrait<FP64x64> {
fn new(shape: Span<usize>, data: Span<FP64x64>) -> Tensor<FP64x64> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: FP64x64) -> Tensor<FP64x64> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<FP64x64>, indices: Span<usize>) -> FP64x64 {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<FP64x64>) -> FP64x64 {
math::min_in_tensor::min_in_tensor::<FP64x64, u128>(*self.data)
}
fn min(tensors: Span<Tensor<FP64x64>>) -> Tensor<FP64x64> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<FP64x64>) -> FP64x64 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<FP64x64>>) -> Tensor<FP64x64> {
math::max::max(tensors)
}
fn stride(self: @Tensor<FP64x64>) -> Span<usize> { |
stride(*self.shape)
}
fn ravel_index(self: @Tensor<FP64x64>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<FP64x64>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<FP64x64>, target_shape: Span<i32>, allowzero: bool) -> Tensor<FP64x64> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<FP64x64>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP64x64> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<FP64x64>, axis: usize, keepdims: bool) -> Tensor<FP64x64> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<FP64x64>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<FP64x64>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<FP64x64>, axes: Span<usize>) -> Tensor<FP64x64> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<FP64x64> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::exp::exp(*self)
}
fn log(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::log::log(*self)
}
fn equal(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self |
: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<usize> {
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::ceil::ceil(*self)
}
fn sin(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<FP64x64>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<FP64x64> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<FP64x64>, axis: usize) -> Tensor<FP64x64> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::atan::atan(*self)
}
fn xor(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Te |
nsor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<FP64x64>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<FP64x64> {
math::onehot::onehot(self, depth, axis, values)
}
fn sqrt(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<FP64x64>>, axis: usize,) -> Tensor<FP64x64> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<FP64x64>, y_scale: @Tensor<FP64x64>, y_zero_point: @Tensor<FP64x64>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<FP64x64>, x_zero_point: @Tensor<FP64x64>
) -> Tensor::<FP64x64> {
quantization::dequantize_linear::dequantize_linear(self, x_scale, x_zero_point)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<FP64x64>,
a_zero_point: @Tensor<FP64x64>,
b: @Tensor<i8>,
b_scale: @Tensor<FP64x64>,
b_zero_point: @Tensor<FP64x64>,
y_scale: @Tensor<FP64x64>,
y_zero_point: @Tensor<FP64x64>
) -> Tensor::<i8> {
quantization::qlinear_add::qlinear_add(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<FP64x64>,
a_zero_point: @Tensor<FP64x64>,
b: @Tensor<i8>,
b_scale: @Tensor<FP64x64>,
b_zero_point: @Tens |
or<FP64x64>,
y_scale: @Tensor<FP64x64>,
y_zero_point: @Tensor<FP64x64>
) -> Tensor::<i8> {
quantization::qlinear_mul::qlinear_mul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<FP64x64>,
a_zero_point: @Tensor<FP64x64>,
b: @Tensor<i8>,
b_scale: @Tensor<FP64x64>,
b_zero_point: @Tensor<FP64x64>,
y_scale: @Tensor<FP64x64>,
y_zero_point: @Tensor<FP64x64>
) -> Tensor::<i8> {
quantization::qlinear_matmul::qlinear_matmul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<FP64x64>>,
zero_points: Span<Tensor<FP64x64>>,
y_scale: @Tensor<FP64x64>,
y_zero_point: @Tensor<FP64x64>,
axis: usize
) -> Tensor::<i8> {
quantization::qlinear_concat::qlinear_concat(
tensors,
scales,
zero_points,
y_scale,
y_zero_point,
axis,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<FP64x64>, a_zero_point: @Tensor<FP64x64>, alpha: FP64x64
) -> Tensor::<i8> {
quantization::qlinear_leakyrelu::qlinear_leakyrelu(
self,
a_scale,
a_zero_point,
alpha,
NumberTrait::new_unscaled(128, true),
NumberTrait: |
:new_unscaled(127, false)
)
}
fn slice(
self: @Tensor<FP64x64>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<FP64x64> {
core_tensor::slice::<FP64x64>(self, starts, ends, axes, steps)
}
fn gather(
self: @Tensor<FP64x64>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP64x64> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<FP64x64>) -> Tensor<usize> {
core_tensor::nonzero(self)
}
fn squeeze(self: @Tensor<FP64x64>, axes: Option<Span<usize>>) -> Tensor<FP64x64> {
core_tensor::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<FP64x64>, axes: Span<usize>) -> Tensor<FP64x64> {
core_tensor::unsqueeze(self, axes)
}
fn sign(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<FP64x64>, min: Option<FP64x64>, max: Option<FP64x64>) -> Tensor<FP64x64> {
core_tensor::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
core_tensor::identity(self)
}
fn where(self: @Tensor<FP64x64>, x: @Tensor<FP64x64>, y: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<FP64x64>, |
axis: usize, keepdims: bool) -> Tensor<FP64x64> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn array_feature_extractor(self: @Tensor<FP64x64>, indices: Tensor<usize>) -> Tensor<FP64x64> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn binarizer(self: @Tensor<FP64x64>, threshold: Option<FP64x64>) -> Tensor<FP64x64> {
math::binarizer::binarizer(*self, threshold)
}
fn reduce_sum_square(self: @Tensor<FP64x64>, axis: usize, keepdims: bool) -> Tensor<FP64x64> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<FP64x64>, axis: usize, keepdims: bool) -> Tensor<FP64x64> {
math::reduce_l2::reduce_l2(self, axis, keepdims)
}
fn trilu(self: @Tensor<FP64x64>, upper: bool, k: i64) -> Tensor<FP64x64> {
linalg::trilu::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<FP64x64>,
updates: Tensor<FP64x64>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<FP64x64> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn not(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
panic(array!['not supported!'])
}
fn gather_elements(
self: @Tensor<FP64x64>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP64x64> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(
self: Tensor<FP64x64>, bias: Option<FP64x64>, lambd: Option<FP64x64>
) -> Tensor<FP64x64> {
math::shrink::shrink(self, bias, lambd)
}
fn reduce_mean(
self: @Tensor<FP64x64>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP64x64> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_min(
self: @Tensor<FP64x64>,
axes: Option<Span<usize>>,
keep |
dims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP64x64> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<FP64x64>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<FP64x64>) -> Tensor<bool> {
math::is_nan::is_nan(self)
}
fn gather_nd(
self: @Tensor<FP64x64>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<FP64x64> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<FP64x64>, axis: usize, keepdims: bool) -> Tensor<FP64x64> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn reduce_log_sum_exp(self: @Tensor<FP64x64>, axis: usize, keepdims: bool) -> Tensor<FP64x64> {
math::reduce_log_sum_exp::reduce_log_sum_exp(self, axis, keepdims)
}
fn erf(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::erf::erf(*self)
}
fn unique(
self: @Tensor<FP64x64>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<FP64x64>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn layer_normalization(
self: @Tensor<FP64x64>,
scale: @Tensor<FP64x64>,
B: Option<@Tensor<FP64x64>>,
axis: Option<i32>,
epsilon: Option<FP64x64>,
stash_type: Option<usize>,
) -> (Tensor<FP64x64>, Tensor<FP64x64>, Tensor<FP64x64>) {
math::layer_normalization::layer_normalization(self, scale, B, axis, epsilon, stash_type)
}
fn resize(
self: @Tensor<FP64x64>,
roi: Option<Tensor<FP64x64>>,
scales: Option<Span<FP64x64>>,
sizes: Option<Span<usize>>,
antialias: Option<us |
ize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<FP64x64>,
exclude_outside: Option<bool>,
extrapolation_value: Option<FP64x64>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPECT_RATIO_POLICY>,
mode: Option<math::resize::MODE>,
nearest_mode: Option<math::resize::NEAREST_MODE>,
) -> Tensor<FP64x64> {
math::resize::resize(
self,
roi,
scales,
sizes,
antialias,
axes,
coordinate_transformation_mode,
cubic_coeff_a,
exclude_outside,
extrapolation_value,
keep_aspect_ratio_policy,
mode,
nearest_mode
)
}
fn compress(
self: @Tensor<FP64x64>, condition: Tensor<usize>, axis: Option<usize>
) -> Tensor<FP64x64> {
math::compress::compress(self, condition, axis)
}
fn split(
self: @Tensor<FP64x64>, axis: usize, num_outputs: Option<usize>, spl: Option<Tensor<usize>>
) -> Array<Tensor<FP64x64>> {
manipulation::split::split(self, axis, num_outputs, spl)
}
fn random_uniform_like(
tensor: @Tensor<FP64x64>, high: Option<FP64x64>, low: Option<FP64x64>, seed: Option<usize>
) -> Tensor<FP64x64> {
math::random_uniform_like::random_uniform_like(*tensor, high, low, seed)
}
fn range(start: FP64x64, end: FP64x64, step: FP64x64) -> Tensor<FP64x64> {
math::range::range(start, end, step)
}
fn hann_window(size: FP64x64, periodic: Option<usize>) -> Tensor<FP64x64> {
panic(array!['not supported!'])
}
fn hamming_window(size: FP64x64, periodic: Option<usize>) -> Tensor<FP64x64> {
panic(array!['not supported!'])
}
fn blackman_window(size: FP64x64, periodic: Option<usize>) -> Tensor<FP64x64> {
panic(array!['not supported!'])
}
fn split_to_sequence(
self: @Tensor< |
FP64x64>, axis: usize, keepdims: usize, split: Option<Tensor<usize>>
) -> Array<Tensor<FP64x64>> {
manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split)
}
fn reverse_sequence(
self: @Tensor<FP64x64>,
sequence_lens: Tensor<usize>,
batch_axis: Option<usize>,
time_axis: Option<usize>
) -> Tensor<FP64x64> {
manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis)
}
fn optional(self: @Tensor<FP64x64>) -> Option<Tensor<FP64x64>> {
manipulation::optional::optional(self)
}
fn dynamic_quantize_linear(
self: @Tensor<FP64x64>
) -> (Tensor::<u32>, Tensor::<FP64x64>, Tensor<FP64x64>) {
quantization::dynamic_quantize_linear::dynamic_quantize_linear(
self,
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(255, false),
NumberTrait::new_unscaled(0, false),
NumberTrait::new_unscaled(1, false),
)
}
fn scatter_nd(
self: @Tensor<FP64x64>,
updates: Tensor<FP64x64>,
indices: Tensor<usize>,
reduction: Option<usize>
) -> Tensor<FP64x64> {
math::scatter_nd::scatter_nd(self, updates, indices, reduction)
}
fn label_encoder(
self: @Tensor<FP64x64>,
default_list: Option<Span<FP64x64>>,
default_tensor: Option<Tensor<FP64x64>>,
keys: Option<Span<FP64x64>>,
keys_tensor: Option<Tensor<FP64x64>>,
values: Option<Span<FP64x64>>,
values_tensor: Option<Tensor<FP64x64>>
) -> Tensor<FP64x64> {
ml::label_encoder::label_encoder(
self, default_list, default_tensor, keys, keys_tensor, values, values_tensor
)
}
}
impl FP64x64TensorAdd of Add<Tensor<FP64x64>> {
fn add(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::add(@lhs, @rhs)
}
}
impl FP64x64TensorSub of Sub<Te |
nsor<FP64x64>> {
fn sub(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::sub(@lhs, @rhs)
}
}
impl FP64x64TensorMul of Mul<Tensor<FP64x64>> {
fn mul(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::mul(@lhs, @rhs)
}
}
impl FP64x64TensorDiv of Div<Tensor<FP64x64>> {
fn div(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> Tensor<FP64x64> {
math::arithmetic::div(@lhs, @rhs)
}
}
impl FP64x64TensorPartialEq of PartialEq<Tensor<FP64x64>> {
fn eq(lhs: @Tensor<FP64x64>, rhs: @Tensor<FP64x64>) -> bool {
tensor_eq(*lhs, *rhs)
}
fn ne(lhs: @Tensor<FP64x64>, rhs: @Tensor<FP64x64>) -> bool {
!tensor_eq(*lhs, *rhs)
}
}
impl FP64x64TryIntoI8 of TryInto<FP64x64, i8> {
fn try_into(self: FP64x64) -> Option<i8> {
let number_felt: felt252 = (self.mag / ONE).into();
let number_i8: i8 = number_felt.try_into().unwrap();
if self.sign {
return Option::Some(number_i8 * -1_i8);
}
Option::Some(number_i8)
}
}
impl TensorI8IntoTensorFP64x64 of Into<Tensor<i8>, Tensor<FP64x64>> {
fn into(self: Tensor<i8>) -> Tensor<FP64x64> {
tensor_i8_to_tensor_fp64x64(@self)
}
}
impl FP64x64TensorPartialOrd of PartialOrd<Tensor<FP64x64>> {
fn ge(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> bool {
SpanPartialOrd::ge(lhs.data, rhs.data)
}
fn gt(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> bool {
SpanPartialOrd::gt(lhs.data, rhs.data)
}
fn le(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> bool {
SpanPartialOrd::le(lhs.data, rhs.data)
}
fn lt(lhs: Tensor<FP64x64>, rhs: Tensor<FP64x64>) -> bool {
SpanPartialOrd::lt(lhs.data, rhs.data)
}
}
const PRECISION: u128 = 1660000000000000;
fn relative_eq(lhs: @FP64x64, rhs: @FP64x64) |
-> bool {
let diff = *lhs - *rhs;
let rel_diff = if *lhs.mag != 0 {
(diff / *lhs).mag
} else {
diff.mag
};
rel_diff <= PRECISION
}
fn tensor_eq(mut lhs: Tensor<FP64x64>, mut rhs: Tensor<FP64x64>,) -> bool {
let mut is_eq = true;
while lhs.shape.len() != 0 && is_eq {
is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap();
};
if !is_eq {
return false;
}
while lhs.shape.len() != 0 && is_eq {
is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap());
};
is_eq
}
fn tensor_i8_to_tensor_fp64x64(x: @Tensor<i8>) -> Tensor<FP64x64> {
let mut result_data = ArrayTrait::<FP64x64>::new();
let mut data = *x.data;
while data.len() != 0 {
result_data.append((*data.pop_front().unwrap()).into());
};
TensorTrait::new(*x.shape, result_data.span())
} |
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::helpers::SpanPartialOrd;
use orion::operators::tensor::core::{
new_tensor, constant_of_shape, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape,
at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core as core_ops, ml, manipulation};
use orion::numbers::{NumberTrait, FP8x23, I8IntoFP8x23};
use orion::operators::tensor::implementations::{
tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor
};
use orion::numbers::fixed_point::implementations::fp8x23::math::trig::PI;
impl FP8x23Tensor of TensorTrait<FP8x23> {
fn new(shape: Span<usize>, data: Span<FP8x23>) -> Tensor<FP8x23> {
new_tensor(shape, data)
}
fn constant_of_shape(shape: Span<usize>, value: FP8x23) -> Tensor<FP8x23> {
constant_of_shape(shape, value)
}
fn at(self: @Tensor<FP8x23>, indices: Span<usize>) -> FP8x23 {
*at_tensor(self, indices)
}
fn add(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::add(@lhs, @rhs)
}
fn sub(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::sub(@lhs, @rhs)
}
fn mul(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::mul(@lhs, @rhs)
}
fn div(lhs: Tensor<FP8x23>, rhs: Tensor<FP8x23>) -> Tensor<FP8x23> {
math::arithmetic::div(@lhs, @rhs)
}
fn min_in_tensor(self: @Tensor<FP8x23>) -> FP8x23 {
math::min_in_tensor::min_in_tensor::<FP8x23, u32>(*self.data)
}
fn min(tensors: Span<Tensor<FP8x23>>) -> Tensor<FP8x23> {
math::min::min(tensors)
}
fn max_in_tensor(self: @Tensor<FP8x23>) -> FP8x23 {
math::max_in_tensor::max_in_tensor(*self.data)
}
fn max(tensors: Span<Tensor<FP8x23>>) -> Tensor<FP8x23> {
math::max::max(tensors)
}
fn stride(self: @Tensor<FP8x23>) -> Span<usize> {
stride(*self.shape)
}
fn ravel_in |
dex(self: @Tensor<FP8x23>, indices: Span<usize>) -> usize {
ravel_index(*self.shape, indices)
}
fn unravel_index(self: @Tensor<FP8x23>, index: usize) -> Span<usize> {
unravel_index(index, *self.shape)
}
fn reshape(self: @Tensor<FP8x23>, target_shape: Span<i32>, allowzero: bool) -> Tensor<FP8x23> {
reshape(self, target_shape, allowzero)
}
fn reduce_sum(
self: @Tensor<FP8x23>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23> {
math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes)
}
fn reduce_prod(self: @Tensor<FP8x23>, axis: usize, keepdims: bool) -> Tensor<FP8x23> {
math::reduce_prod::reduce_prod(self, axis, keepdims)
}
fn argmax(
self: @Tensor<FP8x23>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32> {
math::argmax::argmax(self, axis, keepdims, select_last_index)
}
fn argmin(
self: @Tensor<FP8x23>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize> {
math::argmin::argmin(self, axis, keepdims, select_last_index)
}
fn transpose(self: @Tensor<FP8x23>, axes: Span<usize>) -> Tensor<FP8x23> {
linalg::transpose::transpose(self, axes)
}
fn matmul(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<FP8x23> {
linalg::matmul::matmul(self, other)
}
fn exp(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::exp::exp(*self)
}
fn log(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::log::log(*self)
}
fn equal(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<usize> {
math::equal::equal(self, other)
}
fn greater(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<usize> {
math::greater::greater(self, other)
}
fn greater_equal(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<usize> { |
math::greater_equal::greater_equal(self, other)
}
fn less(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<i32> {
math::less::less(self, other)
}
fn less_equal(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<i32> {
math::less_equal::less_equal(self, other)
}
fn abs(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::abs::abs(*self)
}
fn neg(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::neg::neg(*self)
}
fn ceil(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::ceil::ceil(*self)
}
fn sin(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::sin::sin(*self)
}
fn cos(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::cos::cos(*self)
}
fn asin(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::asin::asin(*self)
}
fn cumsum(
self: @Tensor<FP8x23>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<FP8x23> {
math::cumsum::cumsum(self, axis, exclusive, reverse)
}
fn flatten(self: @Tensor<FP8x23>, axis: usize) -> Tensor<FP8x23> {
math::flatten::flatten(self, axis)
}
fn sinh(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::sinh::sinh(*self)
}
fn tanh(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::tanh::tanh(*self)
}
fn cosh(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::cosh::cosh(*self)
}
fn acosh(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::acosh::acosh(*self)
}
fn asinh(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::asinh::asinh(*self)
}
fn atan(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::atan::atan(*self)
}
fn xor(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<usize> {
math::xor::xor(self, other)
}
fn or(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<usize> {
math::or::or(self, other)
}
fn acos(self: @Tensor<FP8x23>) -> Tensor<FP8x23 |
> {
math::acos::acos(*self)
}
fn onehot(
self: @Tensor<FP8x23>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<FP8x23> {
math::onehot::onehot(self, depth, axis, values)
}
fn sqrt(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::sqrt::sqrt(*self)
}
fn concat(tensors: Span<Tensor<FP8x23>>, axis: usize,) -> Tensor<FP8x23> {
math::concat::concat(tensors, axis)
}
fn quantize_linear(
self: @Tensor<FP8x23>, y_scale: @Tensor<FP8x23>, y_zero_point: @Tensor<FP8x23>
) -> Tensor::<i8> {
quantization::quantize_linear::quantize_linear(
self,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<FP8x23>, x_zero_point: @Tensor<FP8x23>
) -> Tensor::<FP8x23> {
quantization::dequantize_linear::dequantize_linear(self, x_scale, x_zero_point)
}
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<FP8x23>,
a_zero_point: @Tensor<FP8x23>,
b: @Tensor<i8>,
b_scale: @Tensor<FP8x23>,
b_zero_point: @Tensor<FP8x23>,
y_scale: @Tensor<FP8x23>,
y_zero_point: @Tensor<FP8x23>
) -> Tensor::<i8> {
quantization::qlinear_add::qlinear_add(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<FP8x23>,
a_zero_point: @Tensor<FP8x23>,
b: @Tensor<i8>,
b_scale: @Tensor<FP8x23>,
b_zero_point: @Tensor<FP8x23>,
y_scale: @Tensor<FP8x23>,
y_zero_point: @Tensor<FP8x23>
) -> Tensor::<i8> {
quantization: |
:qlinear_mul::qlinear_mul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<FP8x23>,
a_zero_point: @Tensor<FP8x23>,
b: @Tensor<i8>,
b_scale: @Tensor<FP8x23>,
b_zero_point: @Tensor<FP8x23>,
y_scale: @Tensor<FP8x23>,
y_zero_point: @Tensor<FP8x23>
) -> Tensor::<i8> {
quantization::qlinear_matmul::qlinear_matmul(
self,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
y_scale,
y_zero_point,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_concat(
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<FP8x23>>,
zero_points: Span<Tensor<FP8x23>>,
y_scale: @Tensor<FP8x23>,
y_zero_point: @Tensor<FP8x23>,
axis: usize
) -> Tensor::<i8> {
quantization::qlinear_concat::qlinear_concat(
tensors,
scales,
zero_points,
y_scale,
y_zero_point,
axis,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<FP8x23>, a_zero_point: @Tensor<FP8x23>, alpha: FP8x23
) -> Tensor::<i8> {
quantization::qlinear_leakyrelu::qlinear_leakyrelu(
self,
a_scale,
a_zero_point,
alpha,
NumberTrait::new_unscaled(128, true),
NumberTrait::new_unscaled(127, false)
)
}
fn slice(
self: @Tensor<FP8x23>,
starts: Span<usize>,
ends: Span<usize>, |
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<FP8x23> {
core_ops::slice::<FP8x23>(self, starts, ends, axes, steps)
}
fn gather(self: @Tensor<FP8x23>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<FP8x23> {
math::gather::gather(self, indices, axis)
}
fn nonzero(self: @Tensor<FP8x23>) -> Tensor<usize> {
core_ops::nonzero(self)
}
fn squeeze(self: @Tensor<FP8x23>, axes: Option<Span<usize>>) -> Tensor<FP8x23> {
core_ops::squeeze(self, axes)
}
fn unsqueeze(self: @Tensor<FP8x23>, axes: Span<usize>) -> Tensor<FP8x23> {
core_ops::unsqueeze(self, axes)
}
fn sign(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::sign::sign(*self)
}
fn clip(self: @Tensor<FP8x23>, min: Option<FP8x23>, max: Option<FP8x23>) -> Tensor<FP8x23> {
core_ops::clip(self, min, max)
}
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}
fn identity(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
core_ops::identity(self)
}
fn where(self: @Tensor<FP8x23>, x: @Tensor<FP8x23>, y: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::where::where(self, x, y)
}
fn bitwise_and(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::bitwise_and::bitwise_and(self, other)
}
fn bitwise_xor(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::bitwise_xor::bitwise_xor(self, other)
}
fn bitwise_or(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::bitwise_or::bitwise_or(self, other)
}
fn round(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::round::round(*self)
}
fn reduce_l1(self: @Tensor<FP8x23>, axis: usize, keepdims: bool) -> Tensor<FP8x23> {
math::reduce_l1::reduce_l1(self, axis, keepdims)
}
fn trilu(self: @Tensor<FP8x23>, upper: bool, k: i64) -> Tensor<FP8x23> {
linalg::tril |
u::trilu(self, upper, k)
}
fn scatter(
self: @Tensor<FP8x23>,
updates: Tensor<FP8x23>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<FP8x23> {
math::scatter::scatter(self, updates, indices, axis, reduction)
}
fn reduce_sum_square(self: @Tensor<FP8x23>, axis: usize, keepdims: bool) -> Tensor<FP8x23> {
math::reduce_sum_square::reduce_sum_square(self, axis, keepdims)
}
fn reduce_l2(self: @Tensor<FP8x23>, axis: usize, keepdims: bool) -> Tensor<FP8x23> {
math::reduce_l2::reduce_l2(self, axis, keepdims)
}
fn gather_elements(
self: @Tensor<FP8x23>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<FP8x23> {
math::gather_elements::gather_elements(self, indices, axis)
}
fn shrink(self: Tensor<FP8x23>, bias: Option<FP8x23>, lambd: Option<FP8x23>) -> Tensor<FP8x23> {
math::shrink::shrink(self, bias, lambd)
}
fn reduce_mean(
self: @Tensor<FP8x23>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}
fn binarizer(self: @Tensor<FP8x23>, threshold: Option<FP8x23>) -> Tensor<FP8x23> {
math::binarizer::binarizer(*self, threshold)
}
fn array_feature_extractor(self: @Tensor<FP8x23>, indices: Tensor<usize>) -> Tensor<FP8x23> {
ml::array_feature_extractor::array_feature_extractor(*self, indices)
}
fn not(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
panic(array!['not supported!'])
}
fn reduce_min(
self: @Tensor<FP8x23>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23> {
math::reduce_min::reduce_min(self, axes, keepdims, noop_with_empty_axes)
}
fn pow(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<FP8x23 |
> {
math::pow::pow(self, other)
}
fn is_inf(
self: @Tensor<FP8x23>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
math::is_inf::is_inf(self, detect_negative, detect_positive)
}
fn is_nan(self: @Tensor<FP8x23>) -> Tensor<bool> {
math::is_nan::is_nan(self)
}
fn gather_nd(
self: @Tensor<FP8x23>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<FP8x23> {
math::gather_nd::gather_nd(self, indices, batch_dims)
}
fn reduce_log_sum(self: @Tensor<FP8x23>, axis: usize, keepdims: bool) -> Tensor<FP8x23> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}
fn reduce_log_sum_exp(self: @Tensor<FP8x23>, axis: usize, keepdims: bool) -> Tensor<FP8x23> {
panic(array!['not supported!'])
}
fn erf(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::erf::erf(*self)
}
fn unique(
self: @Tensor<FP8x23>, axis: Option<usize>, sorted: Option<bool>
) -> (Tensor<FP8x23>, Tensor<i32>, Tensor<i32>, Tensor<i32>) {
manipulation::unique::unique(self, axis, sorted)
}
fn layer_normalization(
self: @Tensor<FP8x23>,
scale: @Tensor<FP8x23>,
B: Option<@Tensor<FP8x23>>,
axis: Option<i32>,
epsilon: Option<FP8x23>,
stash_type: Option<usize>,
) -> (Tensor<FP8x23>, Tensor<FP8x23>, Tensor<FP8x23>) {
math::layer_normalization::layer_normalization(self, scale, B, axis, epsilon, stash_type)
}
fn resize(
self: @Tensor<FP8x23>,
roi: Option<Tensor<FP8x23>>,
scales: Option<Span<FP8x23>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<math::resize::TRANSFORMATION_MODE>,
cubic_coeff_a: Option<FP8x23>,
exclude_outside: Option<bool>,
extrapolation_value: Option<FP8x23>,
keep_aspect_ratio_policy: Option<math::resize::KEEP_ASPEC |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.