text
stringlengths 1
2.05k
|
---|
use orion::numbers::NumberTrait;
use orion::operators::tensor::helpers::replace_index;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
fn cumsum<
T,
MAG,
impl TTensorTrait: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAddEq: AddEq<T>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, exclusive: Option<bool>, reverse: Option<bool>
) -> Tensor<T> {
let reverse = match reverse {
Option::Some(val) => val,
Option::None => false
};
if reverse {
cumsum_reverse::<T>(self, axis, exclusive, NumberTrait::zero())
} else {
cumsum_forward::<T>(self, axis, exclusive, NumberTrait::zero())
}
}
fn cumsum_forward<
T,
impl TTensorTrait: TensorTrait<T>,
impl TAdd: Add<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, exclusive: Option<bool>, zero: T,
) -> Tensor<T> {
let exclusive = match exclusive {
Option::Some(val) => val,
Option::None => false,
};
assert(axis < (*self.shape).len(), 'axis out of dimensions');
let data = *self.data;
let mut output_data = array![];
let mut index: usize = 0;
while index != data.len() {
let current_indices = unravel_index(index, *self.shape);
let axis_value = *current_indices[axis];
if axis_value == 0 {
if exclusive {
output_data.append(zero);
} else {
output_data.append(*(data)[index]);
}
} else {
let previous_axis_element_indices = replace_index(
current_indices, axis, axis_value - 1
);
let previous_axis_element_index = ravel_index(
*self.shape, previous_axis_element_indices
);
if exclusive {
output_data
.append(
*output_data[previous_axis_e |
lement_index]
+ *(data)[previous_axis_element_index]
);
} else {
output_data.append(*output_data[previous_axis_element_index] + *(data)[index]);
};
}
index += 1;
};
TensorTrait::<T>::new(*self.shape, output_data.span())
}
fn cumsum_reverse<
T,
impl TTensorTrait: TensorTrait<T>,
impl TAddEq: AddEq<T>,
impl TSub: Sub<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, exclusive: Option<bool>, zero: T,
) -> Tensor<T> {
let exclusive = match exclusive {
Option::Some(val) => val,
Option::None => false,
};
assert(axis < (*self.shape).len(), 'axis out of dimensions');
let data = *self.data;
let mut output_data = array![];
let mut index: usize = 0;
while index != data.len() {
let current_indices = unravel_index(index, *self.shape);
let mut axis_value = *current_indices[axis];
if axis_value == 0 {
let mut sum = *(data)[index];
if exclusive {
sum = zero;
}
let end_index = *(*self.shape)[axis] - 1;
loop {
axis_value += 1;
if axis_value > end_index {
break ();
}
let next_axis_element_indices = replace_index(current_indices, axis, axis_value);
let next_axis_element_index = ravel_index(*self.shape, next_axis_element_indices);
sum += *data[next_axis_element_index];
};
output_data.append(sum);
} else {
let previous_axis_element_indices = replace_index(
current_indices, axis, axis_value - 1
);
let previous_axis_element_index = ravel_index(
*self.shape, previous_axis_element_indices
);
if exclusive {
output_data.append(*outp |
ut_data[previous_axis_element_index] - *(data)[index]);
} else {
output_data
.append(
*output_data[previous_axis_element_index]
- *(data)[previous_axis_element_index]
);
}
}
index += 1;
};
TensorTrait::<T>::new(*self.shape, output_data.span())
} |
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::equal docstring
fn equal<
T,
impl UsizeFTensor: TensorTrait<usize>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<usize> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<usize> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if *(*y.data)[indices_self] == *(*z.data)[indices_other] {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
|
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::erf docstring
fn erf<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TFixed: FixedTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => { data_result.append((*item).erf()); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(z.shape, data_result.span())
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::exp docstring
fn exp<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).exp()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
/// Cf: TensorTrait::exp docstring
fn exp_upcast<
T,
TMAG,
W,
WMAG,
impl TFixedTrait: FixedTrait<T, TMAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl WFixedTrait: FixedTrait<W, WMAG>,
impl WTensor: TensorTrait<W>,
impl WCopy: Copy<W>,
impl WDrop: Drop<W>,
impl TIntoW: Into<T, W>,
>(
mut self: Tensor<T>
) -> Tensor<W> {
let mut result = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((TIntoW::into(*item)).exp()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
|
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::flatten docstring
fn flatten<T, impl TTensorTrait: TensorTrait<T>>(self: @Tensor<T>, axis: usize) -> Tensor<T> {
let mut shape = *self.shape;
assert(axis < shape.len(), 'axis out of dimensions');
let mut new_shape_first_axis = 1;
let mut index = 0;
loop {
match shape.pop_front() {
Option::Some(val) => {
if index == axis {
break;
}
new_shape_first_axis *= *val;
index += 1;
},
Option::None => { break; }
};
};
let new_shape_second_axis = (*self.data).len() / new_shape_first_axis;
self
.reshape(
array![
new_shape_first_axis.try_into().unwrap(), new_shape_second_axis.try_into().unwrap()
]
.span(),
false
)
}
|
use core::option::OptionTrait;
use core::traits::TryInto;
use alexandria_data_structures::array_ext::SpanTraitExt;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{TensorTrait, Tensor};
fn gather<T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,>(
self: @Tensor<T>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<T> {
let axis: usize = match axis {
Option::Some(val) => {
if val < 0 {
(((*self.shape).len()).try_into().unwrap() + val).try_into().unwrap()
} else {
val.try_into().unwrap()
}
},
Option::None => 0
};
assert(axis < (*self.shape).len(), 'axis out of dimensions');
let axis_shape = *(*self.shape).at(axis);
let mut adjusted_indices = array![];
let mut indices_data = indices.data.clone();
loop {
match indices_data.pop_front() {
Option::Some(index) => {
let adjusted_index: usize = if *index < 0 {
let val: u32 = (axis_shape.try_into().unwrap() + *index).try_into().unwrap();
val
} else {
let val: u32 = (*index).try_into().unwrap();
val
};
assert(adjusted_index >= 0 && adjusted_index < axis_shape, 'Index out of bounds');
adjusted_indices.append(adjusted_index);
},
Option::None => { break; }
};
};
let mut output_data = array![];
let mut output_size = array![];
let mut self_shape = *self.shape;
let mut i: usize = 0;
loop {
match self_shape.pop_front() {
Option::Some(val) => {
if i == axis {
let mut indices_shape = indices.shape;
loop {
match indices_shape.pop_front() {
Option::Some(item) => { output_size.append(*item); },
Option::None => { break; } |
};
};
} else {
output_size.append(*val);
}
i += 1;
},
Option::None => { break; }
};
};
let mut outer_loop_break = 1;
let mut divisor = (*self.data).len();
let mut self_shape = *self.shape;
let mut i: usize = 0;
loop {
match self_shape.pop_front() {
Option::Some(val) => {
if i == axis {
divisor /= *val;
break ();
};
outer_loop_break *= *val;
divisor /= *val;
i += 1;
},
Option::None => { break; }
};
};
let mut break_loop: usize = 1;
let mut self_shape = *self.shape;
loop {
match self_shape.pop_back() {
Option::Some(val) => {
if self_shape.len() + 1 == axis {
break;
}
break_loop *= *val;
},
Option::None => { break; }
};
};
let mut outer_loop: usize = 0;
let axis_index = *self.shape[axis];
while outer_loop != outer_loop_break {
let mut adjusted_indices_iter = adjusted_indices.clone();
loop {
match adjusted_indices_iter.pop_front() {
Option::Some(indice) => {
let mut inner_loop = 0;
while inner_loop != break_loop {
let new_val = inner_loop / divisor % axis_index;
if indice == new_val {
output_data.append(*self.data[break_loop * outer_loop + inner_loop]);
}
inner_loop += 1;
}
},
Option::None => { break; },
};
};
outer_loop += 1;
};
let mut output_tensor = TensorTrait::<T>::new(output_size.span(), output_data.span());
output_tensor
} |
use alexandria_data_structures::array_ext::SpanTraitExt;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{TensorTrait, Tensor};
fn gather_elements<T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,>(
self: @Tensor<T>, indices: Tensor<i32>, axis: Option<i32>
) -> Tensor<T> {
let axis: usize = match axis {
Option::Some(val) => {
if val < 0 {
(((*self.shape).len()).try_into().unwrap() + val).try_into().unwrap()
} else {
val.try_into().unwrap()
}
},
Option::None => 0
};
assert(axis < (*self.shape).len(), 'axis out of dimensions');
let axis_shape = *(*self.shape).at(axis);
let mut adjusted_indices = array![];
let mut indices_data = indices.data.clone();
loop {
match indices_data.pop_front() {
Option::Some(index) => {
let adjusted_index: usize = if *index < 0 {
let val: u32 = (axis_shape.try_into().unwrap() + *index).try_into().unwrap();
val
} else {
let val: u32 = (*index).try_into().unwrap();
val
};
assert(adjusted_index >= 0 && adjusted_index < axis_shape, 'Index out of bounds');
adjusted_indices.append(adjusted_index);
},
Option::None => { break; }
};
};
let mut output_data = array![];
let mut data_shape_clone = (*self.shape).clone();
let mut multiplier = 1;
let mut looper = 1;
let mut ind = 0;
loop {
match data_shape_clone.pop_front() {
Option::Some(val) => {
if ind >= axis {
multiplier *= *val;
}
if ind > axis {
looper *= *val;
}
ind += 1;
},
Option::None => { break; }
};
};
let inner_loop = multiplier / axis_shape;
let |
mut adjusted_indices_iter = adjusted_indices.clone();
let mut i: usize = 0;
loop {
match adjusted_indices_iter.pop_front() {
Option::Some(indice) => {
let value = if axis == 0 {
indice * inner_loop + (i % inner_loop)
} else if axis == (*self.shape).len() - 1 {
indice + axis_shape * (i / axis_shape)
} else {
indice * looper
+ (i % looper)
+ (multiplier / axis_shape) * (i / (multiplier / axis_shape))
};
output_data.append(*self.data[value]);
i += 1;
},
Option::None => { break; }
};
};
TensorTrait::<T>::new(indices.shape, output_data.span())
} |
use alexandria_data_structures::array_ext::SpanTraitExt;
use orion::numbers::NumberTrait;
use orion::operators::tensor::U32TensorPartialEq;
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
fn gather_nd<T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,>(
self: @Tensor<T>, indices: Tensor<usize>, batch_dims: Option<usize>
) -> Tensor<T> {
let batch_dims = match batch_dims {
Option::Some(val) => val,
Option::None => 0
};
let data_rank = (*self.shape).len();
let indices_rank = (indices.shape).len();
assert((data_rank >= 1) & (indices_rank >= 1), 'rank must > 1');
let mut data_shape = *self.shape;
let mut indices_shape = indices.shape;
let mut data_shape_clone = data_shape.clone();
let mut indices_shape_clone = indices_shape.clone();
let indices_shape_last = indices_shape_clone.pop_back().unwrap();
assert(
(*indices_shape_last >= 1) & (*indices_shape_last <= data_rank - batch_dims),
'check indices'
);
let mut batch_dims_shape = array![];
let mut output_shape = array![];
let mut index_data = array![];
let mut output_data = array![];
let mut batch_dims_size = batch_dims;
let mut total_data_len = 1;
let mut multiple_data_len = array![];
let mut ind = 0;
while ind != batch_dims {
match indices_shape_clone.pop_front() {
Option::Some(val) => {
batch_dims_size *= *val;
batch_dims_shape.append(*val);
ind += 1;
},
Option::None => { break; }
};
};
loop {
match indices_shape_clone.pop_front() {
Option::Some(val) => { batch_dims_shape.append(*val); },
Option::None => { break; }
};
};
if (*indices_shape_last == data_rank - batch_dims) {
output_shape = batch_dims_shape;
} else {
let mut ind = 0;
output_shape = batch_dims_shape;
loop {
match data_shape_clone.p |
op_front() {
Option::Some(val) => {
if (ind >= (batch_dims + *indices_shape_last)) {
output_shape.append(*val);
}
ind += 1;
},
Option::None => { break; }
};
};
}
let mut ind = 0;
let mut multiple = 1;
let mut incrementer = 1;
let mut data_shape_clone = data_shape.clone();
loop {
match data_shape_clone.pop_front() {
Option::Some(val) => {
if (ind >= batch_dims) {
multiple *= *val;
multiple_data_len.append(multiple);
}
if (ind >= batch_dims + *indices_shape_last) {
incrementer *= *val;
}
ind += 1;
},
Option::None => { break; }
};
};
let mut ind = 0;
let mut indices_shape_clone = indices_shape.clone();
let mut breaker = 1;
loop {
match indices_shape_clone.pop_front() {
Option::Some(val) => {
if (ind >= batch_dims) {
breaker *= *val;
}
ind += 1;
},
Option::None => { break; }
};
};
total_data_len = *multiple_data_len.at(multiple_data_len.len() - 1);
let mut data_indices = indices.data;
let mut ind = 0;
let mut result = 0;
loop {
match data_indices.pop_front() {
Option::Some(val) => {
let index = ind % *indices_shape_last;
let incr = total_data_len * (ind / breaker);
result += (*val * total_data_len / *multiple_data_len.at(index));
ind += 1;
if (index == *indices_shape_last - 1) {
let mut data_ind: usize = result;
while data_ind != result + incrementer {
index_data.append(data_ind + incr);
data_ind += 1; |
};
result = 0;
};
},
Option::None => { break; }
};
};
loop {
match index_data.pop_front() {
Option::Some(val) => { output_data.append(*self.data[val]); },
Option::None => { break; }
};
};
let mut output_tensor = TensorTrait::<T>::new(output_shape.span(), output_data.span());
output_tensor
} |
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::greater docstring
fn greater<
T,
impl UsizeFTensor: TensorTrait<usize>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<usize> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<usize> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if *(*y.data)[indices_self] > *(*z.data)[indices_other] {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
|
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::greater_equal docstring
fn greater_equal<
T,
impl UsizeFTensor: TensorTrait<usize>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<usize> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<usize> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if *(*y.data)[indices_self] >= *(*z.data)[indices_other] {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
|
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn hamming_window<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
size: T, PI: T, periodic: Option<usize>
) -> Tensor<T> {
let start: T = NumberTrait::zero();
let one_step: T = NumberTrait::one();
let two: T = one_step + one_step;
let three: T = two + one_step;
let n25: T = three.pow(three) - two;
let n46: T = n25 * two - two * two;
let alpha: T = n25 / n46;
let beta: T = one_step - alpha;
let ni = TensorTrait::range(start, size, one_step);
assert((ni.shape).len() == 1, 'Unexpected shape 1.');
let mut N_1 = size;
if periodic != Option::Some(1) {
N_1 = N_1 - one_step;
};
let len = *(ni.shape).at(0);
let mut arr: Array<T> = array![];
let mut i: usize = 0;
while i != len {
let v = *(ni.data).at(i);
let r = v * PI * two / N_1;
arr.append(r);
i += 1;
};
let window = TensorTrait::<T>::new(ni.shape, arr.span());
let window_cos = window.cos();
let len2 = *(ni.shape).at(0);
let mut arr2: Array<T> = array![];
let mut j: usize = 0;
while j != len2 {
let v = *(window_cos.data).at(j);
let v_2 = alpha - v * beta;
arr2.append(v_2);
j += 1;
};
let window_cos_2 = TensorTrait::<T>::new(ni.shape, arr2.span());
window_cos_2
}
|
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn hann_window<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
size: T, PI: T, periodic: Option<usize>
) -> Tensor<T> {
let start: T = NumberTrait::zero();
let one_step: T = NumberTrait::one();
let ni = TensorTrait::range(start, size, one_step);
assert((ni.shape).len() == 1, 'Unexpected shape 1.');
let mut N_1 = size;
if periodic != Option::Some(1) {
N_1 = N_1 - one_step;
};
let len = *(ni.shape).at(0);
let mut arr: Array<T> = array![];
let mut i: usize = 0;
while i != len {
let v = *(ni.data).at(i);
let r = v * PI / N_1;
arr.append(r);
i += 1;
};
let window = TensorTrait::<T>::new(ni.shape, arr.span());
let window_sin = window.sin();
let len2 = *(ni.shape).at(0);
let mut arr2: Array<T> = array![];
let mut j: usize = 0;
while j != len2 {
let v = *(window_sin.data).at(j);
let v_2 = v * v;
arr2.append(v_2);
j += 1;
};
let window_sin_2 = TensorTrait::<T>::new(ni.shape, arr2.span());
window_sin_2
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::implementations::tensor_bool::BoolTensor;
fn is_inf<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
x: @Tensor<T>, detect_negative: Option<u8>, detect_positive: Option<u8>
) -> Tensor<bool> {
let neg_opt = match detect_negative {
Option::Some(val) => { if val == 0 {
0
} else {
1
} },
Option::None => 1,
};
let pos_opt = match detect_positive {
Option::Some(val) => { if val == 0 {
0
} else {
1
} },
Option::None => 1,
};
if neg_opt == 0 && pos_opt == 0 {
return TensorTrait::new(*x.shape, ArrayTrait::<bool>::new().span());
}
if neg_opt == 0 && pos_opt == 1 {
return is_pos_inf(x);
}
if neg_opt == 1 && pos_opt == 0 {
return is_neg_inf(x);
}
let mut data_result: Array<bool> = array![];
let mut y: Span<T> = *x.data;
loop {
match y.pop_front() {
Option::Some(item) => { data_result.append((*item).is_inf()); },
Option::None => { break; }
};
};
TensorTrait::new(*x.shape, data_result.span())
}
fn is_pos_inf<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
x: @Tensor<T>
) -> Tensor<bool> {
let mut data_result: Array<bool> = array![];
let mut y: Span<T> = *x.data;
loop {
match y.pop_front() {
Option::Some(item) => { data_result.append((*item).is_pos_inf()); },
Option::None => { break; }
};
};
TensorTrait::new(*x.shape, data_result.span())
}
fn is_neg_inf<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
x: @Tensor<T>
) |
-> Tensor<bool> {
let mut data_result: Array<bool> = array![];
let mut y: Span<T> = *x.data;
loop {
match y.pop_front() {
Option::Some(item) => { data_result.append((*item).is_neg_inf()); },
Option::None => { break; }
};
};
TensorTrait::new(*x.shape, data_result.span())
} |
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::implementations::tensor_bool::BoolTensor;
/// Cf: TensorTrait::is_nan docstring
fn is_nan<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
x: @Tensor<T>
) -> Tensor<bool> {
let mut data_result: Array<bool> = array![];
let mut y: Span<T> = *x.data;
loop {
match y.pop_front() {
Option::Some(item) => { data_result.append((*item).is_nan()); },
Option::None => { break; }
};
};
TensorTrait::new(*x.shape, data_result.span())
}
|
use core::option::OptionTrait;
use core::traits::TryInto;
use orion::numbers::{NumberTrait, I32IntoU32};
use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait};
use orion::operators::tensor::{
TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor
};
use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl};
fn layer_normalization<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+Div<Tensor<T>>,
+Sub<Tensor<T>>,
+Add<Tensor<T>>,
+Mul<Tensor<T>>,
+Into<usize, MAG>,
>(
self: @Tensor<T>,
scale: @Tensor<T>,
B: Option<@Tensor<T>>,
axis: Option<i32>,
epsilon: Option<T>,
stash_type: Option<usize>,
) -> (Tensor<T>, Tensor<T>, Tensor<T>) {
let X_rank = (*self).shape.len();
let mut axis = match axis {
Option::Some(axis) => axis,
Option::None => -1,
};
let epsilon = match epsilon {
Option::Some(epsilon) => epsilon,
Option::None => NumberTrait::zero(),
};
let axis = if axis < 0 {
X_rank - axis.into()
} else {
axis.into()
};
let unsqueezed_rank = X_rank - axis;
let mut reduction_shape = array![];
let mut i = 0;
while i != axis {
reduction_shape.append(*(*self).shape.at(i));
i += 1;
};
let mut i = 0;
while i != unsqueezed_rank {
reduction_shape.append(1);
i += 1;
};
let mut row_number = 1;
let mut col_number = 1;
let mut i = 0;
while i != X_rank {
if i < axis {
row_number *= *(*self).shape.at(i);
} else {
col_number *= *(*self).shape.at(i);
}
i += 1;
};
let mut shape_matrix = array![];
shape_matrix.append(row_number.try_into().unwrap());
shape_matrix.append(col_number.try_into().unwrap());
let mut shape_one = array![];
shape_one.append(1);
shape_one.append(1);
let mut col_number_tensor = array![];
co |
l_number_tensor.append(NumberTrait::new_unscaled(col_number.into(), false));
let mut epsilon_tensor = array![];
epsilon_tensor.append(epsilon);
let mut one_tensor = array![];
one_tensor.append(NumberTrait::one());
let x_mat = self.reshape(shape_matrix.span(), false);
let x_mean = x_mat
.reduce_sum(Option::Some(array![1].span()), Option::Some(true), Option::Some(false))
/ TensorTrait::new(shape_one.span(), col_number_tensor.span());
let x_diff = x_mat - x_mean;
let x_squared_diff = x_diff * x_diff;
let variance = x_squared_diff.reduce_sum(Option::Some(array![1].span()), Option::Some(true), Option::Some(false))
/ TensorTrait::new(shape_one.span(), col_number_tensor.span());
let variance_eps = variance + TensorTrait::new(shape_one.span(), epsilon_tensor.span());
let std_dev = variance_eps.sqrt();
let inv_std_dev = TensorTrait::new(shape_one.span(), one_tensor.span()) / std_dev;
let y_mat = x_diff * inv_std_dev;
let scale = if (*scale).shape.len() < (*self).shape.len() {
let mut shape = array![];
let mut i = 0;
while i != (*self).shape.len() - (*scale).shape.len() {
shape.append(1);
i += 1;
};
let mut i = 0;
while i != (*scale).shape.len() {
shape.append(*(*scale).shape.at(i));
i += 1;
};
TensorTrait::new(shape.span(), (*scale).data)
} else {
*scale
};
let mut i = 0;
let mut target_shape: Array<i32> = array![];
while i < (*self)
.shape
.len() {
target_shape.append((*(*self).shape.at(i)).try_into().unwrap());
i += 1;
};
let Y = y_mat.reshape(target_shape.span(), false) * scale;
let Y = match B {
Option::Some(B) => {
let B = if (*B).shape.len() < (*self).shape.len() {
let mut shape = array![];
let mut i = 0;
while i != (*self).shape.len() - (* |
B).shape.len() {
shape.append(1);
i += 1;
};
let mut i = 0;
while i != (*B).shape.len() {
shape.append(*(*B).shape.at(i));
i += 1;
};
TensorTrait::new(shape.span(), (*B).data)
} else {
*B
};
Y + B
},
Option::None => Y,
};
let X_mean = TensorTrait::new(reduction_shape.span(), x_mean.data);
let X_inv_std_dev = TensorTrait::new(reduction_shape.span(), inv_std_dev.data);
(Y, X_mean, X_inv_std_dev)
} |
use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::less docstring
fn less<T, impl TPartialOrd: PartialOrd<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<i32> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<i32> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if *(*y.data)[indices_self] < *(*z.data)[indices_other] {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
|
use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::less_equal docstring
fn less_equal<
T,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<i32> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<i32> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if *(*y.data)[indices_self] <= *(*z.data)[indices_other] {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::log docstring
fn log<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).ln()); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(self.shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::max docstring
fn max<
T,
MAG,
impl TTensorTrait: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
tensors: Span<Tensor<T>>
) -> Tensor<T> {
assert(tensors.len() >= 1, 'Input tensors must be >= 1');
let first_tensor = *tensors.at(0);
if tensors.len() == 1 {
return first_tensor;
}
let mut max_shape: Span<usize> = first_tensor.shape;
let mut max_data: Span<T> = first_tensor.data;
let mut tensor_counter: usize = 1;
while tensor_counter != tensors.len() {
let mut new_max_data: Array<T> = array![];
let mut current_tensor = *tensors.at(tensor_counter);
let mut broadcasted_shape = broadcast_shape(max_shape, current_tensor.shape);
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let mut indices_broadcasted = unravel_index(n, broadcasted_shape);
let mut indices_self = broadcast_index_mapping(max_shape, indices_broadcasted);
let mut indices_other = broadcast_index_mapping(
current_tensor.shape, indices_broadcasted
);
let mut max_value = NumberTrait::max(
*(max_data)[indices_self], *(current_tensor.data)[indices_other]
);
new_max_data.append(max_value);
n += 1;
};
max_shape = broadcasted_shape;
max_data = new_max_data.span();
tensor_counter += 1;
};
TensorTrait::<T>::new(max_shape, max_data)
}
|
use orion::numbers::NumberTrait;
/// Cf: TensorTrait::max_in_tensor docstring
fn max_in_tensor<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut vec: Span::<T>
) -> T {
let mut max_value: T = NumberTrait::min_value();
loop {
match vec.pop_front() {
Option::Some(item) => {
let check_max = max_value.max(*item);
if (max_value < check_max) {
max_value = check_max;
}
},
Option::None => { break; }
};
};
max_value
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::min docstring
fn min<
T,
MAG,
impl TTensorTrait: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
tensors: Span<Tensor<T>>
) -> Tensor<T> {
assert(tensors.len() >= 1, 'Input tensors must be >= 1');
let first_tensor = *tensors.at(0);
if tensors.len() == 1 {
return first_tensor;
}
let mut min_shape: Span<usize> = first_tensor.shape;
let mut min_data: Span<T> = first_tensor.data;
let mut tensor_counter: usize = 1;
while tensor_counter != tensors.len() {
let mut new_min_data: Array<T> = array![];
let mut current_tensor = *tensors.at(tensor_counter);
let mut broadcasted_shape = broadcast_shape(min_shape, current_tensor.shape);
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let mut indices_broadcasted = unravel_index(n, broadcasted_shape);
let mut indices_self = broadcast_index_mapping(min_shape, indices_broadcasted);
let mut indices_other = broadcast_index_mapping(
current_tensor.shape, indices_broadcasted
);
let mut min_value = NumberTrait::min(
*(min_data)[indices_self], *(current_tensor.data)[indices_other]
);
new_min_data.append(min_value);
n += 1;
};
min_shape = broadcasted_shape;
min_data = new_min_data.span();
tensor_counter += 1;
};
TensorTrait::<T>::new(min_shape, min_data)
}
|
use orion::numbers::NumberTrait;
/// Cf: TensorTrait::min_in_tensor docstring
fn min_in_tensor<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut vec: Span::<T>
) -> T {
let mut min_value: T = NumberTrait::max_value();
loop {
match vec.pop_front() {
Option::Some(item) => {
let check_min = min_value.min(*item);
if (min_value > check_min) {
min_value = check_min;
}
},
Option::None => { break; }
};
};
min_value
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::neg docstring
fn neg<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => { data_result.append((*item).neg()); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(z.shape, data_result.span())
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::implementations::{tensor_bool::BoolTensor};
// Cf TensorTrait::not docstring
fn not(mut z: Tensor<bool>) -> Tensor<bool> {
let mut data_result: Array<bool> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => { data_result.append((!*item)); },
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::{TensorTrait, Tensor};
fn onehot_encode<
T,
MAG,
impl FFixed: FixedTrait<T, MAG>,
impl FTensorTrait: TensorTrait<T>,
impl FNumber: NumberTrait<T, MAG>,
impl U32TryIntoMAG: TryInto<u32, MAG>,
impl FPartialEq: PartialEq<T>,
impl FAdd: Add<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>,
>(
self: @Tensor<T>, depth: usize, axis: Option<usize>, values: Tensor<T>
) -> Tensor<T> {
let mut data = *self.data;
let mut shape = *self.shape;
let rank = shape.len();
let axis = match axis {
Option::Some(val) => val,
Option::None => 999
};
assert(((axis == 999) | (axis.into() <= rank)), 'axis out of dimensions');
let mut output_data = array![];
let mut output_size: Array<usize> = array![];
loop {
match shape.pop_front() {
Option::Some(size) => { output_size.append(*size); },
Option::None => { break; }
};
};
output_size.append(depth.into());
loop {
match data.pop_front() {
Option::Some(outer_index) => {
let mut fixed_number = *outer_index;
if fixed_number.is_neg() {
fixed_number =
FixedTrait::<T, MAG>::new_unscaled(depth.try_into().unwrap(), false)
+ fixed_number
}
let mut inner_index = 0;
while inner_index != depth {
let ind = FixedTrait::<
T, MAG
>::new_unscaled(inner_index.try_into().unwrap(), false);
if fixed_number == ind {
output_data.append(*values.data.at(1));
} else {
output_data.append(*values.data.at(0));
};
inner_index += 1;
};
},
Option::None => |
{ break; }
};
};
let mut output_tensor = TensorTrait::new(output_size.span(), output_data.span());
let mut tranpose_axes = array![];
if (axis != 999) & (axis.into() != rank) {
let mut index: usize = 0;
loop {
let max_dim = output_size.len() - 1;
if index.into() == max_dim {
break ();
};
if axis == index {
tranpose_axes.append(max_dim.into())
}
tranpose_axes.append(index.into());
index += 1;
};
output_tensor = output_tensor.transpose(tranpose_axes.span());
}
output_tensor
}
fn onehot<
T,
MAG,
impl FFixed: FixedTrait<T, MAG>,
impl FTensorTrait: TensorTrait<T>,
impl FNumber: NumberTrait<T, MAG>,
impl U32TryIntoMAG: TryInto<u32, MAG>,
impl FPartialEq: PartialEq<T>,
impl FAdd: Add<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>,
>(
self: @Tensor<T>, depth: usize, axis: Option<usize>, mut values: Span<usize>,
) -> Tensor<T> {
assert(values.len() == 2, 'Wrong values dimensions');
let mut sizes = array![];
sizes.append(2);
let mut first = *values.pop_front().unwrap();
let mut second = *values.pop_front().unwrap();
let mut data = array![];
data.append(FixedTrait::<T, MAG>::new_unscaled(first.try_into().unwrap(), false));
data.append(FixedTrait::<T, MAG>::new_unscaled(second.try_into().unwrap(), false));
let values = TensorTrait::new(sizes.span(), data.span());
onehot_encode(self, depth, axis, values)
} |
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::optional_get_element docstring
fn optional_get_element<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut z: Tensor<T>, index: usize
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
// use of match to get element within and out the array bound
match z.data.get(index) {
Option::Some(item) => { data_result.append((*item.unbox())); },
Option::None => {}
};
TensorTrait::<T>::new(z.shape, data_result.span())
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::or docstring
fn or<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl UsizeFTensor: TensorTrait<usize>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<usize> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<usize> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if NumberTrait::or(*(*y.data)[indices_self], *(*z.data)[indices_other]) {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{broadcast_shape, broadcast_index_mapping, len_from_shape};
/// Cf: TensorTrait::pow docstring
fn pow<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensorTrait: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<T> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<T> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
result.append(NumberTrait::pow(*(*y.data)[indices_self], *(*z.data)[indices_other]));
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
|
use core::integer;
use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices};
use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast};
fn random_uniform_like<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TRem: Rem<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
tensor: Tensor<T>, high: Option<T>, low: Option<T>, seed: Option<usize>
) -> Tensor<T> {
let mut seed: usize = match seed {
Option::Some(seed) => seed,
Option::None => NumberTrait::max_value(),
};
let mut high = match high {
Option::Some(high) => high,
Option::None => NumberTrait::one(),
};
let mut low = match low {
Option::Some(low) => low,
Option::None => NumberTrait::zero(),
};
assert!(high > low, "high must be larger than low");
let res = tensor_get_state(tensor, seed, high, low);
res
}
fn tensor_get_state<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TRem: Rem<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
tensor: Tensor<T>, mut seed: usize, high: T, low: T
) -> Tensor<T> {
let mut data = array![];
let mut count = (tensor.data).len();
let mut i = 0;
while count != i {
let mut v = |
NumberTrait::one();
v = hash_random_range(seed, low, high);
let a: u64 = 1664525;
let c: u64 = 1013904223;
let m: u64 = 4294967295;
let s: u64 = (a * seed.try_into().unwrap() + c) % m;
seed = s.try_into().unwrap();
data.append(v);
i += 1;
};
TensorTrait::new(tensor.shape, data.span())
}
fn hash_random_range<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TRem: Rem<T>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
seed: usize, min: T, max: T
) -> T {
let mut key = PedersenHasherImpl::new();
let hash: felt252 = key.hash(seed.into(), 1);
let a: u128 = 4294967295;
let b: u128 = match integer::u128s_from_felt252(hash) {
integer::U128sFromFelt252Result::Narrow(x) => x,
integer::U128sFromFelt252Result::Wide((x, _)) => x,
} % a;
let c: felt252 = b.into();
let rnd: T = NumberTrait::from_felt(c);
let range = max - min + NumberTrait::one();
min + rnd % range
} |
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn range<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAdd: Add<T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TPartialOrd: PartialOrd<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut start: T, end: T, step: T
) -> Tensor<T> {
let mut result: Array<T> = array![];
let zero: T = NumberTrait::zero();
while !(step >= zero && start >= end) && !(step <= zero && start <= end) {
let v = start;
result.append(v);
start += step;
};
let shape = array![result.len()];
TensorTrait::<T>::new(shape.span(), result.span())
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
/// Cf: TensorTrait::reduce_sum docstring
fn reduce_l1<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let data_abs = self.abs();
data_abs.reduce_sum(Option::Some(array![axis.try_into().unwrap()].span()), Option::Some(keepdims), Option::Some(false))
}
|
use core::debug::PrintTrait;
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
fn square<
T,
MAG,
impl TTensorTrait: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>
) -> Tensor<T> {
let mut data = *self.data;
let mut output_data = array![];
loop {
match data.pop_front() {
Option::Some(item) => {
let ele = *item;
output_data.append(ele * ele);
},
Option::None => { break; }
};
};
let tensor_square = TensorTrait::new(*self.shape, output_data.span());
tensor_square
}
/// Cf: TensorTrait::reduce_l2 docstring
fn reduce_l2<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let tensor_square = square(self);
let tensor_square_sum = tensor_square
.reduce_sum(
Option::Some(array![axis.try_into().unwrap()].span()),
Option::Some(keepdims),
Option::Some(false)
);
tensor_square_sum.sqrt()
}
fn reduce_l2_complex<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TPrint: PrintTrait<T>
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let mut tensor_square = square(@self.abs());
let mut tensor_square_sum = tensor_square
.reduce_sum(
Option::Some(array![axis.try_into().unwrap()].span()),
Option::Some(keepdims),
Option::Some(false)
);
tensor_square_sum.sqrt()
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
/// Cf: TensorTrait::reduce_sum_square docstring
fn reduce_log_sum<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let tensor_square_sum = self
.reduce_sum(
Option::Some(array![axis.try_into().unwrap()].span()),
Option::Some(keepdims),
Option::Some(false)
);
let tensor_square_sum_log = tensor_square_sum.log();
tensor_square_sum_log
}
|
use core::option::OptionTrait;
use core::array::ArrayTrait;
use core::array::SpanTrait;
use core::debug::PrintTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast};
/// Cf: TensorTrait::reduce_log_sum_exp docstring
// fn reduce_log_sum_exp_wide<
// T,
// TMAG,
// W,
// WMAG,
// impl TIntoW: Into<T, W>,
// impl WTryIntoT: TryInto<W, T>,
// impl WCopy: Copy<W>,
// impl WDrop: Drop<W>,
// impl TCopy: Copy<T>,
// impl TDrop: Drop<T>,
// impl TDiv: Div<T>,
// impl TTensor: TensorTrait<T>,
// impl WTensor: TensorTrait<W>,
// impl TFixed: FixedTrait<T, TMAG>,
// impl WFixed: FixedTrait<W, WMAG>
// >(
// self: @Tensor<T>, axis: usize, keepdims: bool
// ) -> Tensor<W> {
// let tensor_exp: Tensor<W> = exp_upcast(*self);
// let tensor_exp_log_sum = tensor_exp.reduce_log_sum(axis, keepdims);
// return tensor_exp_log_sum;
// }
fn reduce_log_sum_exp<
T,
MAG,
impl Tensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let tensor_exp = self.exp();
let tensor_exp_log_sum = tensor_exp.reduce_log_sum(axis: axis, keepdims: keepdims);
return tensor_exp_log_sum;
}
|
use alexandria_sorting::bubble_sort;
use alexandria_data_structures::array_ext::{SpanTraitExt};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::operators::tensor::helpers::{
reduce_output_shape, len_from_shape, combine_indices, get_all_axes
};
fn reduce_mean<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TDiv: Div<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<T> {
let noop_with_empty_axes = match noop_with_empty_axes {
Option::Some(noop_with_empty_axes) => noop_with_empty_axes,
Option::None => false,
};
let axes = match axes {
Option::Some(axes) => {
if (axes.len() == 0) {
get_all_axes(*self.shape)
} else {
assert(axes.len() == axes.unique().len(), 'duplicated axis.');
let mut axes_arr = array![];
let mut copy_axes = axes;
loop {
match copy_axes.pop_front() {
Option::Some(axis) => { axes_arr.append(*axis); },
Option::None => { break; }
};
};
let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr, true).span();
sorted_axes
}
},
Option::None => {
if noop_with_empty_axes {
return *self;
}
get_all_axes(*self.shape)
},
};
let keepdims = match keepdims {
Option::Some(keepdims) => keepdims,
Option::None => true,
};
let mut axis_c = 0;
let mut copy_axes = axes;
let mut shape = *self.shape;
let mut data = *self.data;
loop {
match copy_axes.pop_fron |
t() {
Option::Some(axis) => {
if (shape.len() == 1) {
let current_mean = accumulate_mean::<T>(data, shape, shape, 0);
shape = array![].span();
data = array![current_mean].span();
break ();
}
let mut temp_data = array![];
let mut temp_shape = reduce_output_shape(shape, *axis - axis_c, false);
let data_len = len_from_shape(temp_shape);
let mut index: usize = 0;
while index != data_len {
let indices = unravel_index(index, temp_shape);
let current_mean = accumulate_mean::<T>(data, shape, indices, *axis - axis_c);
temp_data.append(current_mean);
index += 1;
};
shape = temp_shape;
data = temp_data.span();
axis_c += 1;
},
Option::None => { break; }
};
};
let mut axes_copy = axes;
if keepdims {
shape = *self.shape;
loop {
match axes_copy.pop_front() {
Option::Some(axis) => { shape = reduce_output_shape(shape, *axis, true); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(shape, data)
} else {
TensorTrait::<T>::new(shape, data)
}
}
fn accumulate_mean<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TDiv: Div<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut input_data: Span<T>, input_shape: Span<usize>, output_indices: Span<usize>, axis: usize
) -> T {
let axis_len = *(input_shape)[axis];
let mut acc: T = NumberTrait::zero();
let mut axis_index: T = NumberTrait::zero();
let mut axis_indexu32 = 0;
if (input_shape).len() > 1 {
while axis_indexu32 != axis_len {
let input_indices = combine_indices(output_indices, a |
xis_indexu32, axis);
let input_index = ravel_index(input_shape, input_indices);
let ele = *(input_data)[input_index];
acc += ele;
axis_index += NumberTrait::one();
axis_indexu32 += 1;
};
} else {
loop {
match input_data.pop_front() {
Option::Some(item) => {
acc += *item;
axis_index += NumberTrait::one();
axis_indexu32 += 1;
},
Option::None => { break; }
};
};
}
acc / axis_index
} |
use alexandria_sorting::bubble_sort;
use alexandria_data_structures::array_ext::{SpanTraitExt};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::operators::tensor::helpers::{
reduce_output_shape, len_from_shape, combine_indices, get_all_axes
};
fn reduce_min<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<T> {
let noop_with_empty_axes = match noop_with_empty_axes {
Option::Some(noop_with_empty_axes) => noop_with_empty_axes,
Option::None => false,
};
let axes = match axes {
Option::Some(axes) => {
if (axes.len() == 0) {
get_all_axes(*self.shape)
} else {
assert(axes.len() == axes.unique().len(), 'duplicated axis.');
let mut axes_arr: Array<usize> = array![];
let mut copy_axes = axes;
loop {
match copy_axes.pop_front() {
Option::Some(axis) => { axes_arr.append(*axis); },
Option::None => { break; }
};
};
let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr, true).span();
sorted_axes
}
},
Option::None => {
if noop_with_empty_axes {
return *self;
}
get_all_axes(*self.shape)
},
};
let keepdims = match keepdims {
Option::Some(keepdims) => keepdims,
Option::None => true,
};
let mut axis_c = 0;
let mut copy_axes = axes;
let mut shape = *self.shape;
let mut data = *self.data;
loop {
match copy_axes.pop_fron |
t() {
Option::Some(axis) => {
if (shape.len() == 1) {
let current_min = accumulate_min::<T>(data, shape, shape, 0);
shape = array![].span();
data = array![current_min].span();
break ();
}
let mut temp_data = array![];
let mut temp_shape = reduce_output_shape(shape, *axis - axis_c, false);
let data_len = len_from_shape(temp_shape);
let mut index: usize = 0;
while index != data_len {
let indices = unravel_index(index, temp_shape);
let current_min = accumulate_min::<T>(data, shape, indices, *axis - axis_c);
temp_data.append(current_min);
index += 1;
};
shape = temp_shape;
data = temp_data.span();
axis_c += 1;
},
Option::None => { break; }
};
};
let mut axes_copy = axes;
if keepdims {
shape = *self.shape;
loop {
match axes_copy.pop_front() {
Option::Some(axis) => { shape = reduce_output_shape(shape, *axis, true); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(shape, data)
} else {
TensorTrait::<T>::new(shape, data)
}
}
fn accumulate_min<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut input_data: Span<T>, input_shape: Span<usize>, output_indices: Span<usize>, axis: usize
) -> T {
let axis_len = *(input_shape)[axis];
let mut min: T = NumberTrait::max_value();
let mut axis_index = 0;
if (input_shape).len() > 1 {
while axis_index != axis_len {
let input_indices = combine_indices(output_indices, axis_index, axis);
let input_index = ravel_index(input_shap |
e, input_indices);
let ele = *(input_data)[input_index];
if (ele < min) {
min = ele;
}
axis_index += 1;
};
} else {
loop {
match input_data.pop_front() {
Option::Some(item) => { if (*item < min) {
min = *item;
} },
Option::None => { break; }
};
};
}
min
} |
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices};
fn reduce_prod<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAddEq: AddEq<T>,
impl TMulEq: MulEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let mut output_data = array![];
if (*self.shape).len() == 1 {
assert(axis == 0, 'axis out of dimensions');
let current_prod = accumulate_production::<T>(*self.data, *self.shape, *self.shape, axis);
output_data.append(current_prod);
let mut output_shape = array![];
output_shape.append(1);
return TensorTrait::new(output_shape.span(), output_data.span());
} else {
assert(axis <= (*self.shape).len(), 'axis out of dimensions');
let output_shape = reduce_output_shape(*self.shape, axis, false);
let output_data_len = len_from_shape(output_shape);
let mut index: usize = 0;
while index != output_data_len {
let output_indices = unravel_index(index, output_shape);
let current_sum = accumulate_production::<
T
>(*self.data, *self.shape, output_indices, axis);
output_data.append(current_sum);
index += 1;
};
if keepdims {
let output_shape = reduce_output_shape(*self.shape, axis, true);
TensorTrait::<T>::new(output_shape, output_data.span())
} else {
TensorTrait::<T>::new(output_shape, output_data.span())
}
}
}
fn accumulate_production<
T,
MAG,
impl TN |
umber: NumberTrait<T, MAG>,
impl TAddEq: AddEq<T>,
impl TMulEq: MulEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut input_data: Span<T>, input_shape: Span<usize>, output_indices: Span<usize>, axis: usize
) -> T {
let axis_len = *(input_shape)[axis];
let mut acc: T = NumberTrait::one();
let mut axis_index: usize = 0;
if (input_shape).len() > 1 {
loop {
if axis_index == axis_len {
break ();
}
let input_indices = combine_indices(output_indices, axis_index, axis);
let input_index = ravel_index(input_shape, input_indices);
let ele = *(input_data)[input_index];
acc *= ele;
axis_index += 1;
};
} else {
loop {
match input_data.pop_front() {
Option::Some(item) => { acc *= *item; },
Option::None => { break; }
};
};
}
return acc;
} |
use core::option::OptionTrait;
use core::traits::TryInto;
use alexandria_sorting::bubble_sort;
use alexandria_data_structures::array_ext::{SpanTraitExt};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::operators::tensor::helpers::{
reduce_output_shape, len_from_shape, combine_indices, get_all_axes
};
fn reduce_sum<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<T> {
let noop_with_empty_axes = match noop_with_empty_axes {
Option::Some(noop_with_empty_axes) => noop_with_empty_axes,
Option::None => false,
};
let axes = match axes {
Option::Some(axes) => {
if (axes.len() == 0) {
get_all_axes(*self.shape)
} else {
assert(axes.len() == axes.unique().len(), 'duplicated axis.');
let mut axes_arr: Array<usize> = array![];
let mut copy_axes = axes.clone();
loop {
match copy_axes.pop_front() {
Option::Some(axis) => {
let adjusted_axis = if *axis < 0 {
((*self.shape).len().try_into().unwrap() + *axis)
.try_into()
.unwrap()
} else {
(*axis).try_into().unwrap()
};
axes_arr.append(adjusted_axis);
},
Option::None => { break; }
};
};
let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr, true).span(); |
sorted_axes
}
},
Option::None => {
if noop_with_empty_axes {
return *self;
}
get_all_axes(*self.shape)
},
};
let keepdims = match keepdims {
Option::Some(keepdims) => keepdims,
Option::None => true,
};
let mut axis_c = 0;
let mut copy_axes = axes.clone();
let mut shape = *self.shape;
let mut data = *self.data;
loop {
match copy_axes.pop_front() {
Option::Some(axis) => {
if (shape.len() == 1) {
let current_sum = accumulate_sum::<T>(data, shape, shape, 0);
shape = array![].span();
data = array![current_sum].span();
break ();
}
let mut temp_data = array![];
let mut temp_shape = reduce_output_shape(shape, *axis - axis_c, false);
let data_len = len_from_shape(temp_shape);
let mut index: usize = 0;
while index != data_len {
let indices = unravel_index(index, temp_shape);
let current_sum = accumulate_sum::<T>(data, shape, indices, *axis - axis_c);
temp_data.append(current_sum);
index += 1;
};
shape = temp_shape;
data = temp_data.span();
axis_c += 1;
},
Option::None => { break; }
};
};
let mut axes_copy = axes.clone();
if keepdims {
shape = *self.shape;
loop {
match axes_copy.pop_front() {
Option::Some(axis) => { shape = reduce_output_shape(shape, *axis, true); },
Option::None => { break; }
};
};
TensorTrait::<T>::new(shape, data)
} else {
TensorTrait::<T>::new(shape, data)
}
}
fn accumulate_sum<
T, MAG, impl TNumber: NumberTrait<T, MAG>, impl TCopy: Copy<T>, impl TD |
rop: Drop<T>
>(
mut input_data: Span<T>, input_shape: Span<usize>, output_indices: Span<usize>, axis: usize
) -> T {
let axis_len = *(input_shape)[axis];
let mut sum: T = NumberTrait::zero();
let mut axis_index = 0;
if (input_shape).len() > 1 {
while axis_index != axis_len {
let input_indices = combine_indices(output_indices, axis_index, axis);
let input_index = ravel_index(input_shape, input_indices);
let ele = *(input_data)[input_index];
sum = NumberTrait::add(sum, ele);
axis_index += 1;
};
} else {
loop {
match input_data.pop_front() {
Option::Some(item) => sum = NumberTrait::add(sum, *item),
Option::None => { break; }
};
};
}
sum
} |
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::numbers::fixed_point::core::FixedTrait;
fn square<
T,
MAG,
impl FTensorTrait: TensorTrait<T>,
impl FNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>,
>(
self: @Tensor<T>
) -> Tensor<T> {
let mut data = *self.data;
let mut output_data = array![];
loop {
match data.pop_front() {
Option::Some(item) => {
let ele = *item;
output_data.append(ele * ele);
},
Option::None => { break; }
};
};
let tensor_square = TensorTrait::new(*self.shape, output_data.span());
tensor_square
}
/// Cf: TensorTrait::reduce_sum_square docstring
fn reduce_sum_square<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TMul: Mul<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let tensor_square = square(self);
let tensor_square_sum = tensor_square
.reduce_sum(
Option::Some(array![axis.try_into().unwrap()].span()),
Option::Some(keepdims),
Option::Some(false)
);
tensor_square_sum
}
|
use alexandria_sorting::bubble_sort;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{
TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor
};
use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait};
enum MODE {
NEAREST,
LINEAR,
CUBIC,
}
enum NEAREST_MODE {
ROUND_PREFER_FLOOR,
ROUND_PREFER_CEIL,
FLOOR,
CEIL
}
enum KEEP_ASPECT_RATIO_POLICY {
STRETCH,
NOT_LARGER,
NOT_SMALLER
}
enum TRANSFORMATION_MODE {
HALF_PIXEL,
ALIGN_CORNERS,
ASYMMETRIC,
TF_CROP_AND_RESIZE,
PYTORCH_HALF_PIXEL,
HALF_PIXEL_SYMMETRIC
}
fn resize<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
self: @Tensor<T>,
roi: Option<Tensor<T>>,
scales: Option<Span<T>>,
sizes: Option<Span<usize>>,
antialias: Option<usize>,
axes: Option<Span<usize>>,
coordinate_transformation_mode: Option<TRANSFORMATION_MODE>,
cubic_coeff_a: Option<T>,
exclude_outside: Option<bool>,
extrapolation_value: Option<T>,
keep_aspect_ratio_policy: Option<KEEP_ASPECT_RATIO_POLICY>,
mode: Option<MODE>,
nearest_mode: Option<NEAREST_MODE>,
) -> Tensor<T> {
let output = interpolate_nd(
self,
antialias,
mode,
nearest_mode,
scales,
sizes,
roi,
keep_aspect_ratio_policy,
exclude_outside,
coordinate_transformation_mode,
extrapolation_value,
axes,
cubic_coeff_a
);
output
}
fn interpolate_nd<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
data: @Tensor<T>,
antialias: Option<usize>,
mode: |
Option<MODE>,
nearest_mode: Option<NEAREST_MODE>,
scale_factors: Option<Span<T>>,
output_size: Option<Span<usize>>,
roi: Option<Tensor<T>>,
keep_aspect_ratio_policy: Option<KEEP_ASPECT_RATIO_POLICY>,
exclude_outside: Option<bool>,
coordinate_transformation_mode: Option<TRANSFORMATION_MODE>,
extrapolation_value: Option<T>,
axes: Option<Span<usize>>,
cubic_coeff_a: Option<T>,
) -> Tensor<T> {
let mode = match mode {
Option::Some(mode) => mode,
Option::None => { MODE::NEAREST },
};
let keep_aspect_ratio_policy = match keep_aspect_ratio_policy {
Option::Some(keep_aspect_ratio_policy) => keep_aspect_ratio_policy,
Option::None => { KEEP_ASPECT_RATIO_POLICY::STRETCH },
};
let exclude_outside = match exclude_outside {
Option::Some(exclude_outside) => exclude_outside,
Option::None => { false },
};
let extrapolation_value = match extrapolation_value {
Option::Some(extrapolation_value) => extrapolation_value,
Option::None => { NumberTrait::zero() },
};
if output_size.is_none() && scale_factors.is_none() {
core::panic_with_felt252('size and scale are None');
}
let r = (*data).shape.len();
let (axes, scale_factors, output_size, roi) = match axes {
Option::Some(axes) => {
let mut scale_factors = match scale_factors {
Option::Some(scale_factors) => {
let mut new_scale_factors = ArrayTrait::<T>::new();
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break NumberTrait::one();
}
if *axes.at(i) == d {
break *scale_factors.at(i);
}
i += 1;
}; |
new_scale_factors.append(item);
d += 1;
};
Option::Some(new_scale_factors.span())
},
Option::None => { Option::None },
};
let mut output_size = match output_size {
Option::Some(output_size) => {
let mut new_output_size = array![];
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break *(*data).shape.at(d);
}
if *axes.at(i) == d {
break *output_size.at(i);
}
i += 1;
};
new_output_size.append(item);
d += 1;
};
Option::Some(new_output_size.span())
},
Option::None => { Option::None },
};
let mut roi = match roi {
Option::Some(roi) => {
let mut new_roi_data = array![];
let naxes = axes.len();
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break NumberTrait::zero();
}
if *axes.at(i) == d {
break *roi.data.at(i);
}
i += 1;
};
new_roi_data.append(item);
d += 1;
};
let mut d = 0;
while d != r {
let mut i = 0;
let item = |
loop {
if i == axes.len() {
break NumberTrait::one();
}
if *axes.at(i) == d {
break *roi.data.at(i + naxes);
}
i += 1;
};
new_roi_data.append(item);
d += 1;
};
let mut shape = ArrayTrait::new();
shape.append(r * 2);
Option::Some(TensorTrait::new(shape.span(), new_roi_data.span()))
},
Option::None => { Option::None },
};
(axes, scale_factors, output_size, roi)
},
Option::None => {
let mut axes = array![];
let mut i = 0;
while i != r {
axes.append(i);
i += 1;
};
(axes.span(), scale_factors, output_size, roi)
}
};
let (mut output_size, mut scale_factors) = match output_size {
Option::Some(output_size) => {
let mut scale_factors: Array<T> = array![];
let mut i = 0;
while i != r {
let output_size_i: T = NumberTrait::new_unscaled(
(*output_size.at(i)).into(), false
);
let data_shape_i: T = NumberTrait::new_unscaled(
(*(*data).shape.at(i)).into(), false
);
scale_factors.append(output_size_i / data_shape_i);
i += 1;
};
let (mut output_size, mut scale_factors) = match keep_aspect_ratio_policy {
KEEP_ASPECT_RATIO_POLICY::STRETCH => { (output_size, scale_factors.span()) },
KEEP_ASPECT_RATIO_POLICY::NOT_LARGER => {
let mut scale = *scale_factors.at(*axes.at(0));
let mut i = 1;
while i != axes.len() { |
if scale > *scale_factors.at(*axes.at(i)) {
scale = *scale_factors.at(*axes.at(i));
}
i += 1;
};
let mut scale_factors: Array<T> = array![];
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break NumberTrait::one();
}
if *axes.at(i) == d {
break scale;
}
i += 1;
};
scale_factors.append(item);
d += 1;
};
let mut output_size = array![];
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break *(*data).shape.at(d);
}
if *axes.at(i) == d {
break NumberTrait::round(
scale
* NumberTrait::new_unscaled(
(*(*data).shape.at(d)).into(), false
)
)
.try_into()
.unwrap();
}
i += 1;
};
output_size.append(item);
d += 1;
};
(output_size.span(), scale_factors.span())
},
KEEP_ASPECT_RATIO_POLICY::NOT_SMALLER => {
let |
mut scale = *scale_factors.at(*axes.at(0));
let mut i = 1;
while i != axes.len() {
if scale < *scale_factors.at(*axes.at(i)) {
scale = *scale_factors.at(*axes.at(i));
}
i += 1;
};
let mut scale_factors: Array<T> = array![];
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break NumberTrait::one();
}
if *axes.at(i) == d {
break scale;
}
i += 1;
};
scale_factors.append(item);
d += 1;
};
let mut output_size = array![];
let mut d = 0;
while d != r {
let mut i = 0;
let item = loop {
if i == axes.len() {
break *(*data).shape.at(d);
}
if *axes.at(i) == d {
break NumberTrait::round(
scale
* NumberTrait::new_unscaled(
(*(*data).shape.at(d)).into(), false
)
)
.try_into()
.unwrap();
}
i += 1;
};
output_size.append(item);
d += 1;
};
(output_size.span(), |
scale_factors.span())
},
};
(output_size, scale_factors)
},
Option::None => {
let mut output_size: Array<usize> = array![];
let scale_factors = match scale_factors {
Option::Some(scale_factors) => scale_factors,
Option::None => { core::panic_with_felt252('size and scale None') },
};
let mut i = 0;
while i != scale_factors.len() {
let item = *scale_factors.at(i)
* NumberTrait::new_unscaled((*(*(data).shape).at(i)).into(), false);
output_size.append(item.try_into().unwrap());
i += 1;
};
(output_size.span(), scale_factors)
},
};
let mut ret: Array<Span<usize>> = array![];
let mut i = 0;
while i != output_size.len() {
let mut temp = ArrayTrait::<usize>::new();
let mut j = 0;
while j != *output_size.at(i) {
temp.append(j);
j += 1;
};
ret.append(temp.span());
i += 1;
};
let mut ret = cartesian(ret.span());
let mut ret_data = array![];
loop {
match ret.pop_front() {
Option::Some(X) => {
let mut x: Array<T> = array![];
let mut i = 0;
while i != X.len() {
x.append(NumberTrait::new_unscaled((*X.at(i)).into(), false));
i += 1;
};
let mut x = x.span();
let item = interpolate_nd_with_x(
data,
(*data).shape.len(),
scale_factors,
output_size,
x,
antialias,
mode,
nearest_mode,
roi,
extrapolation_value,
coordinate_transformation_mode,
exclude_outside,
cubic_coeff_a |
);
ret_data.append(*item.data.at(0));
},
Option::None => { break; }
}
};
let mut shape = array![];
shape.append(ret_data.len());
TensorTrait::new(output_size, ret_data.span())
}
fn cartesian(mut arrays: Span<Span<usize>>,) -> Array<Array<usize>> {
let mut n = 1;
let mut i = arrays.len() - 1;
loop {
n = n * (*(arrays.at(i))).len();
if i == 0 {
break;
}
i -= 1;
};
let mut i = 0;
let mut size_arrays = array![];
while i != arrays.len() {
size_arrays.append((*(arrays.at(i))).len());
i += 1;
};
let size_arrays = size_arrays.span();
let mut output_arrays = array![];
let mut m = n;
let mut i = 0;
while i != arrays.len() {
m = m / (*(arrays.at(i))).len();
let mut out = repeat(*(arrays.at(i)), m);
out = repeat_2(out, size_arrays, i);
output_arrays.append(out);
i += 1;
};
let output_arrays = output_arrays.span();
let mut i = 0;
let mut ret = array![];
while i != n {
let mut j = 0;
let mut x = array![];
while j != arrays.len() {
x.append(*(output_arrays.at(j)).at(i));
j += 1;
};
ret.append(x);
i += 1;
};
ret
}
fn repeat_2(mut array: Array<usize>, size_array: Span<usize>, index: usize) -> Array<usize> {
let mut size = array.len();
let mut i = 0;
while i != index {
let mut j = 1;
while j != *size_array.at(index - 1 - i) {
let mut k = 0;
while k != size {
array.append(*array.at(k));
k += 1;
};
j += 1;
};
size = size * *size_array.at(index - 1 - i);
i += 1;
};
array
}
fn repeat(array: Span<usize>, m: usize,) -> Array<usize> {
let mut out = array![];
let mut j = 0;
while j != array.len() {
let mut k = 0;
while k != m { |
out.append(*array.at(j));
k += 1;
};
j += 1;
};
out
}
fn interpolate_nd_with_x<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
data: @Tensor<T>,
n: usize,
mut scale_factor: Span<T>,
mut output_size: Span<usize>,
mut x: Span<T>,
antialias: Option<usize>,
mode: MODE,
nearest_mode: Option<NEAREST_MODE>,
roi: Option<Tensor<T>>,
extrapolation_value: T,
coordinate_transformation_mode: Option<TRANSFORMATION_MODE>,
exclude_outside: bool,
cubic_coeff_a: Option<T>,
) -> Tensor<T> {
if n == 1 {
return interpolate_1d_with_x(
data,
*scale_factor.at(0),
*output_size.at(0),
*x.at(0),
antialias,
mode,
nearest_mode,
roi,
extrapolation_value,
coordinate_transformation_mode,
exclude_outside,
cubic_coeff_a
);
}
let mut res1d = array![];
let scale_factor_zero = match scale_factor.pop_front() {
Option::Some(item) => { *item },
Option::None => core::panic_with_felt252('scale factor empty')
};
let output_size_zero = match output_size.pop_front() {
Option::Some(item) => { *item },
Option::None => core::panic_with_felt252('output_size empty')
};
let x_zero = match x.pop_front() {
Option::Some(item) => { *item },
Option::None => core::panic_with_felt252('x empty')
};
let reduced_roi = match roi {
Option::Some(roi) => {
let mut reduced_roi = ArrayTrait::new();
let mut reduced_roi_shape = ArrayTrait::new();
reduced_roi_shape.append(roi.data.len() - 2);
let mut i = 1;
while i != 2 * n {
if i != n {
re |
duced_roi.append(*roi.data.at(i));
}
i += 1;
};
Option::Some(TensorTrait::new(reduced_roi_shape.span(), reduced_roi.span()))
},
Option::None => { Option::None }
};
let mut i = 0;
while i != *(*data).shape.at(0) {
let data = get_row_n(data, i);
let mut r = interpolate_nd_with_x(
@data,
n - 1,
scale_factor,
output_size,
x,
antialias,
mode,
nearest_mode,
reduced_roi,
extrapolation_value,
coordinate_transformation_mode,
exclude_outside,
cubic_coeff_a
);
loop {
match r.data.pop_front() {
Option::Some(item) => { res1d.append(*item); },
Option::None => { break; }
}
};
i += 1;
};
let mut shape = array![];
shape.append(res1d.len());
let res1d = TensorTrait::new(shape.span(), res1d.span());
let reduced_roi = match roi {
Option::Some(roi) => {
let mut reduced_roi = array![];
let mut reduced_roi_shape = array![];
reduced_roi_shape.append(2);
reduced_roi.append(*roi.data.at(0));
reduced_roi.append(*roi.data.at(n));
Option::Some(TensorTrait::new(reduced_roi_shape.span(), reduced_roi.span()))
},
Option::None => { Option::None }
};
let a = interpolate_1d_with_x(
@res1d,
scale_factor_zero,
output_size_zero,
x_zero,
antialias,
mode,
nearest_mode,
reduced_roi,
extrapolation_value,
coordinate_transformation_mode,
exclude_outside,
cubic_coeff_a
);
a
}
fn get_row_n<T, +TensorTrait<T>, +Copy<T>, +Drop<T>,>(
data: @Tensor<T>, index: usize,
) -> Tensor<T> {
let mut output_data = array![];
let mut output_shape = array![];
let mu |
t stride_output = 1;
let mut i = 0;
while i != (*data).shape.len() {
if i != 0 {
output_shape.append(*(*data).shape.at(i));
stride_output = stride_output * *(*data).shape.at(i);
}
i += 1;
};
let mut i = 0;
while i != stride_output {
output_data.append(*(*data).data.at(index * stride_output + i));
i += 1;
};
TensorTrait::new(output_shape.span(), output_data.span())
}
fn interpolate_1d_with_x<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
data: @Tensor<T>,
scale_factor: T,
output_width_int: usize,
x: T,
antialias: Option<usize>,
mode: MODE,
nearest_mode: Option<NEAREST_MODE>,
roi: Option<Tensor<T>>,
extrapolation_value: T,
coordinate_transformation_mode: Option<TRANSFORMATION_MODE>,
exclude_outside: bool,
cubic_coeff_a: Option<T>,
) -> Tensor<T> {
let coordinate_transformation_mode = match coordinate_transformation_mode {
Option::Some(coordinate_transformation_mode) => coordinate_transformation_mode,
Option::None => { TRANSFORMATION_MODE::HALF_PIXEL },
};
let input_width = (*data).data.len();
let output_width = (scale_factor * NumberTrait::new_unscaled((input_width).into(), false));
let x_ori: T = match coordinate_transformation_mode {
TRANSFORMATION_MODE::HALF_PIXEL => {
(x + NumberTrait::half()) / scale_factor - NumberTrait::half()
},
TRANSFORMATION_MODE::ALIGN_CORNERS => {
let mut x_ori = NumberTrait::zero();
if output_width != NumberTrait::one() {
x_ori = x
* (NumberTrait::new_unscaled(input_width.into(), false) - NumberTrait::one())
/ (output_width - NumberTrait::one());
}
x_ori
}, |
TRANSFORMATION_MODE::ASYMMETRIC => { x / scale_factor },
TRANSFORMATION_MODE::TF_CROP_AND_RESIZE => {
let x_ori = match roi {
Option::Some(roi) => {
let mut x_ori = if output_width == NumberTrait::one() {
(*roi.data.at(1) - *roi.data.at(0))
* (NumberTrait::new_unscaled(input_width.into(), false)
- NumberTrait::one())
/ (NumberTrait::one() + NumberTrait::one())
} else {
x
* (*roi.data.at(1) - *roi.data.at(0))
* (NumberTrait::new_unscaled(input_width.into(), false)
- NumberTrait::one())
/ (output_width - NumberTrait::one())
};
x_ori = x_ori
+ *roi.data.at(0)
* (NumberTrait::new_unscaled(input_width.into(), false)
- NumberTrait::one());
if x_ori < NumberTrait::zero()
|| x_ori > (NumberTrait::new_unscaled(input_width.into(), false)
- NumberTrait::one()) {
let mut ret = ArrayTrait::new();
let mut shape = ArrayTrait::new();
shape.append(1);
ret.append(extrapolation_value);
return TensorTrait::new(shape.span(), ret.span());
};
x_ori
},
Option::None => { core::panic_with_felt252('roi cannot be None.') },
};
x_ori
},
TRANSFORMATION_MODE::PYTORCH_HALF_PIXEL => {
if output_width == NumberTrait::one() {
NumberTrait::neg(NumberTrait::<T>::half())
} else {
(x + NumberTrait::half()) / scale_factor - NumberTrait::half() |
}
},
TRANSFORMATION_MODE::HALF_PIXEL_SYMMETRIC => {
let adjustement: T = NumberTrait::new_unscaled(output_width_int.into(), false)
/ output_width;
let center: T = NumberTrait::new_unscaled(input_width.into(), false)
/ (NumberTrait::one() + NumberTrait::one());
let offset = center * (NumberTrait::one() - adjustement);
offset + (x + NumberTrait::half()) / scale_factor - NumberTrait::half()
},
};
let x_ori_int = x_ori.floor();
let ratio = if x_ori_int.try_into().unwrap() == x_ori {
NumberTrait::one()
} else {
x_ori - x_ori_int.try_into().unwrap()
};
let mut coeffs = match mode {
MODE::NEAREST => {
let coeffs = match antialias {
Option::Some => core::panic_with_felt252('antialias not for mode NEAREST'),
Option::None => { nearest_coeffs(ratio, nearest_mode) },
};
coeffs
},
MODE::LINEAR => {
let coeffs = match antialias {
Option::Some(antialias) => {
let coeffs = if antialias == 0 {
linear_coeffs(ratio)
} else {
linear_coeffs_antialias(ratio, scale_factor)
};
coeffs
},
Option::None => { linear_coeffs(ratio) },
};
coeffs
},
MODE::CUBIC => {
let coeffs = match antialias {
Option::Some => { cubic_coeffs_antialias(ratio, scale_factor, cubic_coeff_a) },
Option::None => { cubic_coeffs(ratio, cubic_coeff_a) },
};
coeffs
},
};
let n = coeffs.data.len();
let (idxes, points) = get_neighbor(x_ori, n, data);
if exclude_outside {
let mut coeffs_exclude_outside: Array<T> = array![];
let mut sum = NumberTrait::zero();
let mut i = 0;
while i != idx |
es.data.len() {
if *idxes.data.at(i) {
coeffs_exclude_outside.append(NumberTrait::zero());
sum += NumberTrait::zero();
} else {
coeffs_exclude_outside.append(*coeffs.data.at(i));
sum += *coeffs.data.at(i);
}
i += 1;
};
let mut coeff_div: Array<T> = array![];
let mut i = 0;
while i != n {
coeff_div.append(*coeffs_exclude_outside.at(i) / sum);
i += 1;
};
coeffs = TensorTrait::new(coeffs.shape, coeff_div.span());
}
TensorTrait::matmul(@coeffs, @points)
}
fn get_neighbor<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut x: T, n: usize, data: @Tensor<T>,
) -> (Tensor<bool>, Tensor<T>) {
let pad_width: usize = NumberTrait::ceil(
NumberTrait::new_unscaled(n.into(), false)
/ (NumberTrait::<T>::one() + NumberTrait::<T>::one())
)
.try_into()
.unwrap();
let mut padded = array![];
let mut i = 0;
while i != pad_width {
padded.append(*(*data).data.at(0));
i += 1;
};
let mut i = 0;
while i != (*data).data.len() {
padded.append(*(*data).data.at(i));
i += 1;
};
let mut i = 0;
while i != pad_width {
padded.append(*(*data).data.at((*data).data.len() - 1));
i += 1;
};
x = x + NumberTrait::new_unscaled(pad_width.into(), false);
let mut idxes = get_neighbor_idxes(x, n, padded.len());
let mut idxes_centered = array![];
let mut ret = array![];
let mut i = 0;
while i != idxes.data.len() {
ret.append(*padded.at(*idxes.data.at(i)));
if *idxes.data.at(i) >= pad_width {
if (*idxes.data.at(i) - pad_width) >= (*data).data.len() {
idxes_centered. |
append(true);
} else {
idxes_centered.append(false);
}
} else {
idxes_centered.append(true);
}
i += 1;
};
let mut shape = array![];
shape.append(idxes.data.len());
(
TensorTrait::new(shape.span(), idxes_centered.span()),
TensorTrait::new(shape.span(), ret.span())
)
}
fn get_neighbor_idxes<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut x: T, n: usize, limit: usize,
) -> Tensor<usize> {
let _pad_width: usize = NumberTrait::<
T
>::ceil(
NumberTrait::new_unscaled(n.into(), false)
/ (NumberTrait::<T>::one() + NumberTrait::<T>::one())
)
.try_into()
.unwrap();
let mut idxes = array![];
if n % 2 == 0 {
let (mut i_low, mut i_high) = if x < NumberTrait::zero() {
(0, 1)
} else {
(NumberTrait::floor(x).try_into().unwrap(), NumberTrait::ceil(x).try_into().unwrap())
};
if i_high >= limit {
i_low = limit - 2;
i_high = limit - 1;
}
if i_low == i_high {
if i_low == 0 {
i_high = i_high + 1;
} else {
i_low = i_low - 1;
}
}
let mut i = 0;
while i != n / 2 {
if i_low - i < 0 {
idxes.append(i_high + i);
i_high += 1;
} else {
idxes.append(i_low - i);
}
if i_high + i >= limit {
i_low -= 1;
idxes.append(i_low - i);
} else {
idxes.append(i_high + i);
}
i += 1;
}
} else {
core::panic_with_felt252('MUST BE EVEN');
}
idxes = bubble_sort::bubble_sort_elements(idxes, |
true);
let mut shape = array![];
shape.append(n);
TensorTrait::new(shape.span(), idxes.span())
}
fn linear_coeffs<
T,
MAG,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+TensorTrait<T>,
+Copy<T>,
+Drop<T>,
+Sub<T>
>(
mut ratio: T
) -> Tensor<T> {
let mut ret = array![];
let mut shape = array![];
shape.append(2);
ret.append(NumberTrait::one() - ratio);
ret.append(ratio);
TensorTrait::new(shape.span(), ret.span())
}
fn linear_coeffs_antialias<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut ratio: T, scale: T
) -> Tensor<T> {
let scale = NumberTrait::min(scale, NumberTrait::one());
let start = (NumberTrait::floor(NumberTrait::neg(NumberTrait::one()) / scale)
+ NumberTrait::one());
let footprint = (NumberTrait::one() + NumberTrait::one())
- (NumberTrait::one() + NumberTrait::one()) * start;
let mut coeffs: Array<T> = array![];
let mut sum = NumberTrait::zero();
let mut i = start;
while i != start + footprint {
let value = NumberTrait::one() - NumberTrait::abs((i - ratio) * scale);
if value < NumberTrait::zero() {
coeffs.append(NumberTrait::zero());
} else if value > NumberTrait::one() {
coeffs.append(NumberTrait::one());
sum += NumberTrait::one();
} else {
coeffs.append(value);
sum += value;
}
i += NumberTrait::one();
};
let n = coeffs.len();
let mut coeff_div: Array<T> = array![];
let mut i = 0;
while i != n {
coeff_div.append(*coeffs.at(i) / sum);
i += 1;
};
let mut shape = array![];
shape.append(n);
TensorTrait::new(shape.span(), coeff_div.span())
}
fn cubic_coeffs<
T,
MAG,
+TensorTrait<T>,
+Nu |
mberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut ratio: T, A: Option<T>
) -> Tensor<T> {
let one = NumberTrait::one();
let two = one + NumberTrait::one();
let three = two + NumberTrait::one();
let four = three + NumberTrait::one();
let five = four + NumberTrait::one();
let eigth = four + four;
let A = match A {
Option::Some(A) => A,
Option::None => { NumberTrait::neg(three / four) },
};
let mut coeffs = array![];
let mut shape = array![];
coeffs
.append(
((A * (ratio + one) - five * A) * (ratio + one) + eigth * A) * (ratio + one) - four * A
);
coeffs.append(((A + two) * ratio - (A + three)) * ratio * ratio + one);
coeffs.append(((A + two) * (one - ratio) - (A + three)) * (one - ratio) * (one - ratio) + one);
coeffs
.append(
((A * ((one - ratio) + one) - five * A) * ((one - ratio) + one) + eigth * A)
* ((one - ratio) + one)
- four * A
);
shape.append(4);
TensorTrait::new(shape.span(), coeffs.span())
}
fn cubic_coeffs_antialias<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut ratio: T, scale: T, A: Option<T>
) -> Tensor<T> {
let one = NumberTrait::one();
let two = one + NumberTrait::one();
let three = two + NumberTrait::one();
let four = three + NumberTrait::one();
let scale = NumberTrait::min(scale, NumberTrait::one());
let i_start = NumberTrait::floor(NumberTrait::neg(two) / scale) + NumberTrait::one();
let i_end = two - i_start;
assert(i_end > i_start, 'i_end must be greater');
let A = match A {
Option::Some(A) => A,
Option::None => { NumberTrait::neg(three / four) }, |
};
let mut coeffs = array![];
let mut sum = NumberTrait::zero();
let mut i = i_start;
while i != i_end {
let value = compute_coeff(scale * (i - ratio), A);
coeffs.append(value);
sum += value;
i += NumberTrait::one();
};
let n = coeffs.len();
let mut coeff_div: Array<T> = array![];
let mut i = 0;
while i != n {
coeff_div.append(*coeffs.at(i) / sum);
i += 1;
};
let mut shape = array![];
shape.append(n);
TensorTrait::new(shape.span(), coeff_div.span())
}
fn compute_coeff<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut x: T, A: T
) -> T {
let one = NumberTrait::one();
let two = one + NumberTrait::one();
let three = two + NumberTrait::one();
let four = three + NumberTrait::one();
let five = four + NumberTrait::one();
let eigth = four + four;
x = x.abs();
let mut x_2 = x * x;
let mut x_3 = x * x_2;
if x <= one {
return (A + two) * x_3 - (A + three) * x_2 + one;
}
if x < two {
return A * x_3 - five * A * x_2 + eigth * A * x - four * A;
}
NumberTrait::zero()
}
fn nearest_coeffs<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
mut ratio: T, nearest_mode: Option<NEAREST_MODE>
) -> Tensor<T> {
let nearest_mode = match nearest_mode {
Option::Some(nearest_mode) => { nearest_mode },
Option::None => { NEAREST_MODE::ROUND_PREFER_FLOOR },
};
let mut ret = array![];
let mut shape = array![];
shape.append(2);
if ratio == NumberTrait::one() {
ret.append(NumberTrait::zero());
ret.append(NumberTrait::one());
return TensorTrait::new(shape.span(), ret.span());
}
matc |
h nearest_mode {
NEAREST_MODE::ROUND_PREFER_FLOOR => {
if ratio <= NumberTrait::half() {
ret.append(NumberTrait::one());
ret.append(NumberTrait::zero());
return TensorTrait::new(shape.span(), ret.span());
} else {
ret.append(NumberTrait::zero());
ret.append(NumberTrait::one());
return TensorTrait::new(shape.span(), ret.span());
}
},
NEAREST_MODE::ROUND_PREFER_CEIL => {
if ratio < NumberTrait::half() {
ret.append(NumberTrait::one());
ret.append(NumberTrait::zero());
return TensorTrait::new(shape.span(), ret.span());
} else {
ret.append(NumberTrait::zero());
ret.append(NumberTrait::one());
return TensorTrait::new(shape.span(), ret.span());
}
},
NEAREST_MODE::FLOOR => {
ret.append(NumberTrait::one());
ret.append(NumberTrait::zero());
return TensorTrait::new(shape.span(), ret.span());
},
NEAREST_MODE::CEIL => {
ret.append(NumberTrait::zero());
ret.append(NumberTrait::one());
return TensorTrait::new(shape.span(), ret.span());
},
}
} |
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn round<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl FTensor: TensorTrait<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).round()); },
Option::None => { break; }
};
};
return TensorTrait::new(self.shape, result.span());
}
|
use alexandria_data_structures::array_ext::SpanTraitExt;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
use core::dict::Felt252DictTrait;
use core::nullable::{nullable_from_box, match_nullable, FromNullableResult};
fn scatter<
T,
impl TTensorTrait: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TAddEq: AddEq<T>,
impl TMulEq: MulEq<T>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
>(
self: @Tensor<T>,
updates: Tensor<T>,
indices: Tensor<usize>,
axis: Option<usize>,
reduction: Option<usize>
) -> Tensor<T> {
let mut axis = match axis {
Option::Some(val) => val,
Option::None => 0
};
let reduction = match reduction {
Option::Some(val) => val,
Option::None => 'none'
};
let data_rank = (*self.shape).len();
let indices_rank = (indices.shape).len();
let updates_rank = (updates.shape).len();
assert((data_rank == updates_rank) & (updates_rank == indices_rank), 'must be same rank');
let data_shape = *self.shape;
let ind_max = indices.data.max().unwrap();
assert(ind_max < *data_shape.at(axis), 'index is out of bound');
let data_shape = *self.shape;
let mut indices_shape = indices.shape;
let updates_shape = updates.shape;
assert(
(*indices_shape[0] == *updates_shape[0]) & (*indices_shape[1] == *updates_shape[1]),
'shape must be same'
);
let mut output_data = array![];
let mut data_indices = indices.data;
let mut data_updates = updates.data;
let mut indices_updates: Felt252Dict<usize> = Default::default();
let mut indices_updates_reduction: Felt252Dict<Nullable<Span<usize>>> = Default::default();
let mut data_shape_copy = data_shape;
let mut indices_shape_copy = indices_shape;
*data_shape_copy.pop_front().unwrap();
*indices_shape_copy.pop_front().unwrap();
let mut indices_loop: usize = 1;
let mut data_loop: usize = 1;
if (axis |
== 0) {
loop {
match indices_shape_copy.pop_front() {
Option::Some(val) => { indices_loop *= *val; },
Option::None => { break; }
};
};
loop {
match data_shape_copy.pop_front() {
Option::Some(val) => { data_loop *= *val; },
Option::None => { break; }
};
};
}
let mut transpose = false;
if ((data_rank > 2) & (axis == 1)) {
let index = indices.transpose(axes: array![0, 2, 1].span());
let update = updates.transpose(axes: array![0, 2, 1].span());
data_indices = index.data;
data_updates = update.data;
indices_shape = index.shape;
axis = 2;
transpose = true;
}
if (axis == (data_rank - 1)) {
data_loop = *data_shape_copy.pop_back().unwrap();
indices_loop = *indices_shape_copy.pop_back().unwrap();
}
let mut total_count: usize = 0;
let mut shift = 0;
loop {
let mut result: usize = 0;
match data_indices.pop_front() {
Option::Some(val) => {
let value = total_count + 1;
if (axis == 0) {
let column = total_count % indices_loop;
result = (*val * data_loop) + (column);
if ((result % *data_shape.at(data_rank - 1)) != total_count % *indices_shape
.at(data_rank - 1)) {
result +=
(*data_shape.at(data_rank - 1) - *indices_shape.at(data_rank - 1));
}
}
if (axis == (data_rank - 1)) {
let mut row = total_count / indices_loop;
if ((data_rank > 2) & (row % *data_shape.at(1) >= *indices_shape.at(1))) {
shift = (*data_shape.at(1) - *indices_shape.at(1));
}
result = *val + (data_loop * (row + shift));
}
if (r |
eduction == 'none') {
indices_updates.insert(result.into(), value.into());
} else {
let mut arr = array![];
let val = indices_updates_reduction.get(result.into());
let mut a = ArrayTrait::new();
let mut span = match match_nullable(val) {
FromNullableResult::Null(()) => a.span(),
FromNullableResult::NotNull(val) => val.unbox(),
};
loop {
match span.pop_front() {
Option::Some(val) => { arr.append(*val); },
Option::None => { break; }
};
};
arr.append(total_count);
indices_updates_reduction
.insert(result.into(), nullable_from_box(BoxTrait::new(arr.span())));
}
total_count += 1;
},
Option::None => { break; }
};
};
let mut data = *self.data;
let mut i: usize = 0;
loop {
match data.pop_front() {
Option::Some(val) => {
if (reduction == 'none') {
let value = indices_updates.get(i.into());
if (value == 0) {
output_data.append(*val);
} else {
let data_value = data_updates[value - 1];
output_data.append(*data_value);
}
} else {
let value = indices_updates_reduction.get(i.into());
let mut a = array![];
let mut span = match match_nullable(value) {
FromNullableResult::Null(()) => a.span(),
FromNullableResult::NotNull(value) => value.unbox(),
};
if (span.len() == 0) {
output_data.append(*va |
l);
} else {
let mut result = *val;
if (reduction == 'add') {
loop {
match span.pop_front() {
Option::Some(val) => { result += *data_updates[*val]; },
Option::None => { break; }
};
};
output_data.append(result);
}
if (reduction == 'mul') {
loop {
match span.pop_front() {
Option::Some(val) => { result *= *data_updates[*val]; },
Option::None => { break; }
};
};
output_data.append(result);
}
if (reduction == 'max') {
loop {
match span.pop_front() {
Option::Some(val) => {
let holder = *data_updates[*val];
if (holder > result) {
result = holder;
}
},
Option::None => { break; }
};
};
output_data.append(result);
}
if (reduction == 'min') {
loop {
match span.pop_front() {
Option::Some(val) => {
let holder = *data_updates[*val];
if (holder < result) { |
result = holder;
}
},
Option::None => { break; }
};
};
output_data.append(result);
}
}
}
i += 1;
},
Option::None => { break; }
};
};
let mut output_tensor = TensorTrait::<T>::new(*self.shape, output_data.span());
if transpose {
output_tensor = output_tensor.transpose(axes: array![0, 2, 1].span())
}
output_tensor
} |
use core::nullable::{nullable_from_box, match_nullable, FromNullableResult};
use alexandria_data_structures::array_ext::SpanTraitExt;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
fn scatter_nd<
T,
impl TTensorTrait: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TAdd: Add<T>,
impl TMul: Mul<T>,
impl TPartialOrd: PartialOrd<T>,
impl TPartialEq: PartialEq<T>,
>(
self: @Tensor<T>, updates: Tensor<T>, indices: Tensor<usize>, reduction: Option<usize>
) -> Tensor<T> {
let reduction = match reduction {
Option::Some(val) => val,
Option::None => 'none'
};
let data_rank = (*self.shape).len();
let mut data_shape = *self.shape;
let mut indices_shape = indices.shape;
let updates_shape = updates.shape;
let indices_last_axis = indices_shape.pop_back().unwrap();
assert(*indices_last_axis <= data_rank, 'must be <= data rank');
let ind_max = indices.data.max().unwrap();
if (data_rank > 1) {
assert(ind_max < data_rank, 'index is out of bound');
}
let mut batch_dims_shape = array![];
let mut ind: usize = 0;
loop {
match indices_shape.pop_front() {
Option::Some(val) => { batch_dims_shape.append(*val); },
Option::None => { break; }
};
};
let mut data_shape_clone = data_shape.clone();
loop {
match data_shape_clone.pop_front() {
Option::Some(val) => {
if (ind >= *indices_last_axis) {
batch_dims_shape.append(*val);
}
},
Option::None => { break; }
};
};
let mut ind: usize = 0;
loop {
match batch_dims_shape.pop_front() {
Option::Some(val) => { assert(val == *updates_shape[ind], 'must be same'); },
Option::None => { break; }
};
};
let mut data_indices = indices.data;
let mut data_updates = updates.data;
let mut data_shape_clone |
= data_shape.clone();
let mut indexer = 1;
let data_shape_first = data_shape_clone.pop_front();
if data_rank >= 1 {
loop {
match data_shape_clone.pop_front() {
Option::Some(val) => { indexer *= *val; },
Option::None => { break; }
};
}
}
let mut updates_index_dict: Felt252Dict<u32> = Default::default();
let mut dict_ind: usize = 1;
loop {
match data_indices.pop_front() {
Option::Some(val) => {
updates_index_dict.insert((*val).into(), dict_ind);
dict_ind += 1;
},
Option::None => { break; }
};
};
let mut output_data: Array<T> = array![];
let mut data = *self.data;
let mut index: usize = 0;
let mut inner_index: usize = 0;
let num = *data_shape_first.unwrap();
while index != num {
let comp_index = updates_index_dict.get(index.into());
if comp_index == 0 {
loop {
if (inner_index == indexer) {
inner_index = 0;
break;
}
let val = *data.at((index * indexer) + inner_index);
output_data.append(val);
inner_index += 1;
};
} else {
loop {
if (inner_index == indexer) {
inner_index = 0;
break;
}
if (reduction == 'none') {
let val = data_updates.at(((comp_index - 1) * indexer) + inner_index);
output_data.append(*val);
}
if (reduction == 'add') {
let val = data_updates.at(((comp_index - 1) * indexer) + inner_index);
let data_val = *data.at((index * indexer) + inner_index);
output_data.append(*val + data_val);
}
if (reduction == 'mul') {
let val = data_updates.at(((comp_index |
- 1) * indexer) + inner_index);
let data_val = *data.at((index * indexer) + inner_index);
output_data.append((*val) * data_val);
}
if (reduction == 'max') {
let val = data_updates.at(((comp_index - 1) * indexer) + inner_index);
let data_val = *data.at((index * indexer) + inner_index);
if (*val > data_val) {
output_data.append(*val);
} else {
output_data.append(data_val);
}
}
if (reduction == 'min') {
let val = data_updates.at(((comp_index - 1) * indexer) + inner_index);
let data_val = *data.at((index * indexer) + inner_index);
if (*val > data_val) {
output_data.append(data_val);
} else {
output_data.append(*val);
}
}
inner_index += 1;
}
}
index += 1;
};
let mut output_tensor = TensorTrait::<T>::new(*self.shape, output_data.span());
output_tensor
} |
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::shrink docstring
fn shrink<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut self: Tensor<T>, bias: Option<T>, lambd: Option<T>
) -> Tensor<T> {
let bias: T = if bias.is_some() {
bias.unwrap()
} else {
NumberTrait::zero()
};
let lambd: T = if lambd.is_some() {
lambd.unwrap()
} else {
NumberTrait::half()
};
let mut data_result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => {
if (*item) < lambd.neg() {
let mut y = NumberTrait::add(*item, bias);
data_result.append(y);
} else if (*item) > lambd {
let mut y = NumberTrait::sub(*item, bias);
data_result.append(y);
} else {
data_result.append(NumberTrait::zero());
}
},
Option::None => { break; }
};
};
TensorTrait::new(self.shape, data_result.span())
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn sign<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl FTensor: TensorTrait<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).sign()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::sin docstring
fn sin<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).sin()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::sinh docstring
fn sinh<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).sinh()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
fn sqrt<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).sqrt()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: TensorTrait::tanh docstring
fn tanh<
T,
MAG,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut self: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => { result.append((*item).tanh()); },
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::where docstring
fn where<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TFTensor: TensorTrait<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
self: @Tensor<T>, x: @Tensor<T>, y: @Tensor<T>
) -> Tensor<T> {
let xy_shape = broadcast_shape(*x.shape, *y.shape);
let broadcasted_shape = broadcast_shape(*self.shape, xy_shape);
let mut result: Array<T> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_cond = broadcast_index_mapping(*self.shape, indices_broadcasted);
let indices_x = broadcast_index_mapping(*x.shape, indices_broadcasted);
let indices_y = broadcast_index_mapping(*y.shape, indices_broadcasted);
let res = NumberTrait::where(
*(*self.data)[indices_cond], *(*x.data)[indices_x], *(*y.data)[indices_y]
);
result.append(res);
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index};
use orion::operators::tensor::helpers::{
broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility
};
/// Cf: TensorTrait::xor docstring
fn xor<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl UsizeFTensor: TensorTrait<usize>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
y: @Tensor<T>, z: @Tensor<T>
) -> Tensor<usize> {
let broadcasted_shape = broadcast_shape(*y.shape, *z.shape);
let mut result: Array<usize> = array![];
let num_elements = len_from_shape(broadcasted_shape);
let mut n: usize = 0;
while n != num_elements {
let indices_broadcasted = unravel_index(n, broadcasted_shape);
let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted);
let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted);
if NumberTrait::xor(*(*y.data)[indices_self], *(*z.data)[indices_other]) {
result.append(1);
} else {
result.append(0);
}
n += 1;
};
TensorTrait::new(broadcasted_shape, result.span())
}
|
mod array_feature_extractor;
mod label_encoder;
|
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::NumberTrait;
fn array_feature_extractor<
T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>
>(
self: Tensor<T>, indices: Tensor<usize>
) -> Tensor<T> {
assert(indices.shape.len() == 1, 'Indices must be a 1D tensor');
if self.shape.len() == 1 {
return process_1D_tensor(self, indices);
}
let (output_shape, total_elements) = calculate_output_shape::<T>(self.shape, indices);
let output_data = calculate_output_data::<T>(self, indices, total_elements);
TensorTrait::new(output_shape.span(), output_data.span())
}
fn process_1D_tensor<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: Tensor<T>, indices: Tensor<usize>
) -> Tensor<T> {
let mut output_data: Array<T> = array![];
let mut indices_values: Span<usize> = indices.data;
let self_len = *self.shape.at(0);
loop {
match indices_values.pop_front() {
Option::Some(current_indices_value) => {
assert(*current_indices_value < self_len, 'Indices out of range');
let mut current_data_value = *self.data.at(*current_indices_value);
output_data.append(current_data_value);
},
Option::None => { break; }
};
};
TensorTrait::new(indices.shape, output_data.span())
}
fn calculate_output_shape<
T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>
>(
input_shape: Span<usize>, indices: Tensor<usize>
) -> (Array<usize>, usize) {
let mut total_elements: usize = 1;
let mut output_shape: Array<usize> = array![];
let mut input_shape_copy = input_shape;
let mut input_shape_counter: usize = 0;
let breaker = input_shape.len() - 2;
loop {
match input_shape_copy.pop_front() {
Option::Some(current_shape_value) => {
if input_shape_counter > breaker {
break;
} |
output_shape.append(*current_shape_value);
total_elements = total_elements * *current_shape_value;
input_shape_counter += 1;
},
Option::None => { break; }
};
};
output_shape.append(indices.data.len());
(output_shape, total_elements)
}
fn calculate_output_data<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: Tensor<T>, indices: Tensor<usize>, total_elements: usize
) -> Array<T> {
let last_tensor_axis: usize = *self.shape.at(self.shape.len() - 1);
let mut output_data: Array<T> = array![];
let strides: Span<usize> = TensorTrait::stride(@self);
let mut element_counter: usize = 0;
let mut stride_l2 = *strides.at(strides.len() - 2);
let mut stride_l1 = *strides.at(strides.len() - 1);
while element_counter != total_elements {
let mut base_index = if strides.len() > 1 {
element_counter * stride_l2
} else {
0
};
let mut indices_values = indices.data;
loop {
match indices_values.pop_front() {
Option::Some(current_indices_value) => {
assert(*current_indices_value < last_tensor_axis, 'Indices out of range');
let mut flat_index = base_index + *current_indices_value * (stride_l1);
let mut current_data_value = *self.data.at(flat_index);
output_data.append(current_data_value);
},
Option::None => { break; }
};
};
element_counter += 1;
};
output_data
} |
use core::array::ArrayTrait;
use core::option::OptionTrait;
use core::array::SpanTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::NumberTrait;
use core::dict::Felt252DictTrait;
use core::nullable::{nullable_from_box, match_nullable, FromNullableResult};
use core::debug::PrintTrait;
use core::traits::Into;
use core::traits::TryInto;
fn label_encoder<
T, +Drop<T>, +Copy<T>, +AddEq<T>, +TensorTrait<T>, +PartialOrd<T>, +Into<T, felt252>,
>(
self: @Tensor<T>,
default_list: Option<Span<T>>,
default_tensor: Option<Tensor<T>>,
keys: Option<Span<T>>,
keys_tensor: Option<Tensor<T>>,
values: Option<Span<T>>,
values_tensor: Option<Tensor<T>>,
) -> Tensor<T> {
let mut default = match default_list {
Option::Some(value) => value,
Option::None => {
match default_tensor {
Option::Some(value) => value.data,
Option::None => { core::panic_with_felt252('None') },
}
}
};
let default = match default.pop_front() {
Option::Some(value) => *value,
Option::None => { core::panic_with_felt252('None') }
};
let mut keys = match keys {
Option::Some(value) => { value },
Option::None => {
match keys_tensor {
Option::Some(value) => { value.data },
Option::None => { core::panic_with_felt252('None') },
}
}
};
let mut values = match values {
Option::Some(value) => { value },
Option::None => {
match values_tensor {
Option::Some(value) => { value.data },
Option::None => { core::panic_with_felt252('None') },
}
}
};
assert(keys.len() == values.len(), 'keys must be eq to values');
let mut key_value_dict: Felt252Dict<Nullable<T>> = Default::default();
let mut output_data = ArrayTrait::<T>::new();
loop {
let key = match keys.pop_front() {
Option::Some(key) => |
key,
Option::None => { break; }
};
let value = match values.pop_front() {
Option::Some(value) => value,
Option::None => { break; }
};
key_value_dict.insert((*key).into(), nullable_from_box(BoxTrait::new(*value)));
};
let mut data = *self.data;
loop {
match data.pop_front() {
Option::Some(val) => {
let value = *val;
let res = key_value_dict.get(value.into());
let mut span = match match_nullable(res) {
FromNullableResult::Null => default,
FromNullableResult::NotNull(res) => res.unbox(),
};
output_data.append(span);
},
Option::None => { break; }
};
};
let mut output_tensor = TensorTrait::<T>::new(*self.shape, output_data.span());
return output_tensor;
} |
mod quantize_linear;
mod dynamic_quantize_linear;
mod dequantize_linear;
mod qlinear_matmul;
mod qlinear_concat;
mod qlinear_add;
mod qlinear_mul;
mod qlinear_leakyrelu;
|
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::helpers::check_compatibility;
use orion::utils::saturate;
fn dequantize_linear<
Q,
T,
impl TTensor: TensorTrait<T>,
impl QIntoT: Into<Q, T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl QTensorIntoTTensor: Into<Tensor<Q>, Tensor<T>>,
impl TDrop: Drop<T>,
impl TCopy: Copy<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>
>(
x: @Tensor<Q>, x_scale: @Tensor<T>, x_zero_point: @Tensor<T>
) -> Tensor::<T> {
if (*x_scale.data).len() == 1 && (*x_zero_point.data).len() == 1 {
dequantize_element_wise(*x, *x_scale.data[0], *x_zero_point.data[0])
} else {
check_compatibility(*x.shape, *x_scale.shape);
check_compatibility(*x.shape, *x_zero_point.shape);
check_compatibility(*x_scale.shape, *x_zero_point.shape);
dequantize_per_axis(@(*x).into(), x_scale, x_zero_point)
}
}
fn dequantize_per_axis<
T,
impl TTensor: TensorTrait<T>,
impl TTensorSub: Sub<Tensor<T>>,
impl TTensorMul: Mul<Tensor<T>>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
x: @Tensor<T>, x_scale: @Tensor<T>, x_zero_point: @Tensor<T>
) -> Tensor::<T> {
(*x - *x_zero_point) * *x_scale
}
fn dequantize_element_wise<
Q,
T,
impl TTensor: TensorTrait<T>,
impl QIntoT: Into<Q, T>,
impl TSub: Sub<T>,
impl TMul: Mul<T>,
impl TDrop: Drop<T>,
impl TCopy: Copy<T>,
impl QCopy: Copy<Q>,
impl QDrop: Drop<Q>
>(
mut x: Tensor::<Q>, x_scale: T, x_zero_point: T
) -> Tensor::<T> {
let mut result_data: Array<T> = array![];
loop {
match x.data.pop_front() {
Option::Some(item) => {
let dequantized = dequantize(*item, x_scale, x_zero_point);
result_data.append(dequantized);
},
Option::None => { break; }
};
};
TensorTrait::new(x.shape, result_data.span())
}
fn |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.