text
stringlengths 1
2.05k
|
---|
_index.into() * (*strides.at(nx)).into() + *b_index.at(nx));
r_index.append(n_index.into());
nx += 1;
};
if r_index_check(r_index.span(), shape_out) {
let mut indices: Array<i32> = array![];
let mut i1_index: Array<usize> = array![];
let mut i2_index: Array<usize> = array![];
let mut idiff_index: Array<usize> = array![];
let mut nx = 0;
while nx != nd {
indices.append(*io_index.at(nx) + (*kernel_shape.at(nx) % 2).into());
i1_index
.append(
I32Number::max(0, *indices.at(nx) + *o_index.at(nx)).into()
);
i2_index
.append(
I32Number::min(
(*(*X).shape.at(nx + 2)).into(),
*indices.at(nx)
+ *o_index.at(nx)
+ (*kernel_shape.at(nx)).into()
)
.into()
);
if nx != nd - 1 {
idiff_index.append(*i2_index.at(nx) - *i1_index.at(nx));
}
nx += 1;
};
let i1_index = i1_index.span();
let mut img: Array<T> = array![];
let img = if nx == 1 {
let img = SpanTrait::slice(
(*X).data,
n * sN + c * sC + *i1_index.at(nd - 1),
*i2_index.at(nd - 1) - *i1_index.at(n |
d - 1)
);
img
} else {
let i_stride = stride(idiff_index.span());
let mut ii = 0;
while ii != *i_stride.at(0) * *idiff_index.at(0) {
let mut flatten_index = ii;
let mut start = n * *x_stride.at(0) + c * *x_stride.at(1);
let mut nx = 0;
while nx != nd - 1 {
let (ii_index, rem) = DivRem::div_rem(
flatten_index, (*i_stride.at(nx)).try_into().unwrap()
);
flatten_index = rem;
start += (*i1_index.at(nx) + ii_index) * *x_stride.at(2 + nx);
nx += 1;
};
img
.append_span(
SpanTrait::slice(
(*X).data,
start + *i1_index.at(nd - 1),
*i2_index.at(nd - 1) - *i1_index.at(nd - 1)
)
);
ii += 1;
};
img.span()
};
let s = if w.len() != img.len() {
let mut j1_index: Array<usize> = array![];
let mut j2_index: Array<usize> = array![];
let mut jdiff_index: Array<usize> = array![];
let mut nx = 0;
while nx != nd {
j1_index
.append( |
I32Number::max(0, -*indices.at(nx) - *o_index.at(nx)).into()
);
j2_index
.append(
I32Number::min(
(*(*X).shape.at(nx + 2)).into()
- *indices.at(nx)
- *o_index.at(nx),
(*kernel_shape.at(nx)).into()
)
.into()
);
if nx != nd - 1 {
jdiff_index.append(*j2_index.at(nx) - *j1_index.at(nx));
}
nx += 1;
};
let j1_index = j1_index.span();
let mut w_: Array<T> = array![];
let w_ = if nx == 1 {
let w_ = SpanTrait::slice(
w,
*j1_index.at(nd - 1),
*j2_index.at(nd - 1) - *j1_index.at(nd - 1)
);
w_
} else {
let j_stride = stride(jdiff_index.span());
let mut jj = 0;
while jj != *j_stride.at(0) * *jdiff_index.at(0) {
let mut flatten_index = jj;
let mut start = 0;
let mut nx = 0;
while nx != nd - 1 {
let (jj_index, rem) = DivRem::div_rem(
flatten_index, (*j_stride.at(nx)).try_into() |
.unwrap()
);
flatten_index = rem;
start += (*j1_index.at(nx) + jj_index)
* *kernel_shape.at(nx);
nx += 1;
};
w_
.append_span(
SpanTrait::slice(
w,
start + *j1_index.at(nd - 1),
*j2_index.at(nd - 1) - *j1_index.at(nd - 1)
)
);
jj += 1;
};
w_.span()
};
dot(img, w_)
} else {
dot(img, w)
};
let mut res_index = n * *res_strides.at(0) + nw * *res_strides.at(1);
let mut nx = 0;
while nx != nd {
res_index += (*r_index.at(nx)).into() * *res_strides.at(2 + nx);
nx += 1;
};
res.set(res_index, res.at(res_index) + s);
};
i += 1
};
c += 1;
};
nw += 1;
};
n += 1;
};
let mut res_data: Array<T> = array![];
let mut i = 0;
while i != res.len() {
res_data.append(res.at(i));
i += 1;
};
TensorTrait::new(res_shape, res_data.span())
}
fn r_index_check(r_index: Span<i32>, shape_out: Span<usize>) -> bool {
let mut i = 0;
let flag = loop {
if i == r_index.len() {
break true; |
}
if *r_index.at(i) >= (*shape_out.at(i)).into() {
break false;
}
i += 1;
};
flag
}
fn prod<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>,>(
pA: Span<T>, start: usize
) -> T {
let mut i = start;
let mut prod = NumberTrait::one();
while i != pA.len() {
prod = prod * (*pA.at(i));
i += 1;
};
prod
}
fn min(mut a: Span<usize>) -> usize {
assert(a.len() > 0, 'span cannot be empty');
let mut min = *a.at(0);
loop {
match a.pop_front() {
Option::Some(v) => { if *v < min {
min = *v;
}; },
Option::None => { break min; }
};
}
}
fn max(mut a: Span<usize>) -> usize {
assert(a.len() > 0, 'span cannot be empty');
let mut max = *a.at(0);
loop {
match a.pop_front() {
Option::Some(v) => { if *v > max {
max = *v;
}; },
Option::None => { break max; }
};
}
}
fn arange(start: usize, end: usize, step: usize) -> Span<usize> {
assert((end - start) % step == 0, 'incompatible step value');
let mut arr: Array<usize> = array![];
let mut i = start;
while i < end {
arr.append(i);
i += step;
};
arr.span()
}
fn cartesian(mut arrays: Span<Span<usize>>,) -> Span<Span<usize>> {
let mut n = 1;
let mut i = arrays.len() - 1;
loop {
n = n * (*(arrays.at(i))).len();
if i == 0 {
break;
}
i -= 1;
};
let mut i = 0;
let mut size_arrays: Array<usize> = array![];
while i != arrays.len() {
size_arrays.append((*(arrays.at(i))).len());
i += 1;
};
let size_arrays = size_arrays.span();
let mut output_arrays = array![];
let mut m = n;
let mut i = 0;
while i != arrays.len() {
m = m / (*(arrays.at(i))).len();
let mut out = repeat(*(arrays.at(i)), m);
out = repeat_2(out, size_arrays, i); |
output_arrays.append(out);
i += 1;
};
let output_arrays = output_arrays.span();
let mut i = 0;
let mut ret = ArrayTrait::new();
while i != n {
let mut j = 0;
let mut x: Array<usize> = array![];
while j != arrays.len() {
x.append(*(output_arrays.at(j)).at(i));
j += 1;
};
ret.append(x.span());
i += 1;
};
ret.span()
}
fn repeat_2(mut array: Array<usize>, size_array: Span<usize>, index: usize) -> Array<usize> {
let mut size = array.len();
let mut i = 0;
while i != index {
let mut j = 1;
while j != *size_array.at(index - 1 - i) {
let mut k = 0;
while k != size {
array.append(*array.at(k));
k += 1;
};
j += 1;
};
size = size * *size_array.at(index - 1 - i);
i += 1;
};
array
}
fn repeat(array: Span<usize>, m: usize,) -> Array<usize> {
let mut out: Array<usize> = array![];
let mut j = 0;
while j != array.len() {
let mut k = 0;
while k != m {
out.append(*array.at(j));
k += 1;
};
j += 1;
};
out
}
fn dot<
T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +AddEq<T>, +Mul<T>,
>(
a: Span<T>, b: Span<T>
) -> T {
let mut i = 0;
let mut sum = NumberTrait::zero();
while i != a.len() {
sum = sum + *a.at(i) * *b.at(i);
i += 1;
};
sum
} |
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{stride};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,};
use orion::operators::vec::{NullableVec, NullableVecImpl};
enum AUTO_PAD {
NOTSET,
SAME_UPPER,
SAME_LOWER,
VALID
}
fn conv_transpose<
T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +Copy<T>, +Drop<T>, +Add<T>, +Mul<T>,
>(
X: @Tensor<T>,
W: @Tensor<T>,
B: Option<@Tensor<T>>,
auto_pad: Option<AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<T> {
let auto_pad = match auto_pad {
Option::Some(auto_pad) => auto_pad,
Option::None => { AUTO_PAD::NOTSET },
};
let dilations = match dilations {
Option::Some(dilations) => dilations,
Option::None => {
let mut dilations: Array<usize> = array![];
let mut i = 2;
while i != (*X).shape.len() {
dilations.append(1);
i += 1;
};
dilations.span()
},
};
let kernel_shape = match kernel_shape {
Option::Some(kernel_shape) => kernel_shape,
Option::None => {
let mut kernel_shape: Array<usize> = array![];
let mut i = 2;
while i != (*W).shape.len() {
kernel_shape.append(*(*W).shape.at(i));
i += 1;
};
kernel_shape.span()
},
};
let output_padding = match output_padding {
Option::Some(output_padding) => output_padding,
Option::None => {
let mut output_padding: Array<usize> = array![];
let mut i = 2;
while i != (*X)
.shape
.len() {
output_padding.append(0);
output_padding.append(0);
i += |
1;
};
output_padding.span()
},
};
let strides = match strides {
Option::Some(strides) => strides,
Option::None => {
let mut strides: Array<usize> = array![];
let mut i = 2;
while i != (*X).shape.len() {
strides.append(1);
i += 1;
};
strides.span()
},
};
let (pads, _, output_shape) = match pads {
Option::Some(pads) => {
let n_dims = (*X).shape.len() - 2;
let output_shape = match output_shape {
Option::Some(output_shape) => output_shape,
Option::None => {
let mut output_shape: Array<usize> = array![];
let mut i = 0;
while i != n_dims {
output_shape
.append(
(*(*X).shape.at(i + 2) - 1) * *strides.at(i)
+ *output_padding.at(i)
+ ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1)
- (*pads.at(i) + *pads.at(i + n_dims))
);
i += 1;
};
output_shape.span()
},
};
(pads, n_dims, output_shape)
},
Option::None => {
let (pads, n_dims, output_shape) = match auto_pad {
AUTO_PAD::NOTSET => {
let mut pads: Array<usize> = array![];
let mut i = 0;
while i != strides.len() * 2 {
pads.append(0);
i += 1;
};
let pads = pads.span();
let n_dims = (*X).shape.len() - 2;
let output_shape = match output_shape {
Option::Some(output_shape) => output_shape,
Op |
tion::None => {
let mut output_shape: Array<usize> = array![];
let mut i = 0;
while i != n_dims {
output_shape
.append(
(*(*X).shape.at(i + 2) - 1) * *strides.at(i)
+ *output_padding.at(i)
+ ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1)
- (*pads.at(i) + *pads.at(i + n_dims))
);
i += 1;
};
output_shape.span()
},
};
(pads, n_dims, output_shape)
},
AUTO_PAD::SAME_UPPER => {
let output_shape = match output_shape {
Option::Some(output_shape) => output_shape,
Option::None => {
let mut output_shape: Array<usize> = array![];
let mut i = 0;
while i != strides
.len() {
output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i));
i += 1;
};
output_shape.span()
},
};
let mut total_padding: Array<usize> = array![];
let mut i = 0;
while i != output_shape
.len() {
total_padding
.append(
(*(*X).shape.at(i + 2) - 1) * *strides.at(i)
+ *output_padding.at(i)
+ ((*kernel_shape.at(i) |
- 1) * *dilations.at(i) + 1)
- *output_shape.at(i)
);
i += 1;
};
let total_padding = total_padding.span();
let mut pads: Array<usize> = array![];
let mut i = 0;
while i != output_shape.len() {
pads.append(*total_padding.at(i) / 2);
i += 1;
};
let mut i = 0;
while i != output_shape
.len() {
pads.append(*total_padding.at(i) - (*total_padding.at(i) / 2));
i += 1;
};
(pads.span(), pads.len() / 2, output_shape)
},
AUTO_PAD::SAME_LOWER => {
let output_shape = match output_shape {
Option::Some(output_shape) => output_shape,
Option::None => {
let mut output_shape: Array<usize> = array![];
let mut i = 0;
while i != strides
.len() {
output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i));
i += 1;
};
output_shape.span()
},
};
let mut total_padding: Array<usize> = array![];
let mut i = 0;
while i != output_shape
.len() {
total_padding
.append(
(*(*X).shape.at(i + 2) - 1) * *strides.at(i)
+ *output_padding.at(i)
+ ((*kernel_shape.at(i) - 1) * *dil |
ations.at(i) + 1)
- *output_shape.at(i)
);
i += 1;
};
let total_padding = total_padding.span();
let mut pads: Array<usize> = array![];
let mut i = 0;
while i != output_shape
.len() {
pads.append(*total_padding.at(i) - *total_padding.at(i) / 2);
i += 1;
};
let mut i = 0;
while i != output_shape.len() {
pads.append(*total_padding.at(i) / 2);
i += 1;
};
(pads.span(), pads.len() / 2, output_shape)
},
AUTO_PAD::VALID => {
let mut pads: Array<usize> = array![];
let mut i = 0;
while i != strides.len() * 2 {
pads.append(0);
i += 1;
};
let pads = pads.span();
let n_dims = (*X).shape.len() - 2;
let output_shape = match output_shape {
Option::Some(output_shape) => output_shape,
Option::None => {
let mut output_shape: Array<usize> = array![];
let mut i = 0;
while i != n_dims {
output_shape
.append(
(*(*X).shape.at(i + 2) - 1) * *strides.at(i)
+ *output_padding.at(i)
+ ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1)
- (*pads.at(i) + *pads.at(i + n_dims))
); |
i += 1;
};
output_shape.span()
},
};
(pads, n_dims, output_shape)
},
};
(pads, n_dims, output_shape)
},
};
let group = match group {
Option::Some(group) => group,
Option::None => { 1 },
};
let mut kernel_shape: Array<usize> = array![];
let mut i = 2;
while i != (*W).shape.len() {
kernel_shape.append(*(*W).shape.at(i));
i += 1;
};
let kernel_shape = kernel_shape.span();
let kernel_size = prod(kernel_shape, 0);
let mut num_output_channels = *(*W).shape.at(1) * group;
let mut kernel_dim = (num_output_channels / group) * kernel_size;
let C = *(*X).shape.at(1);
let m = kernel_dim;
let n = prod((*X).shape, 2);
let k = C / group;
let mut final: Array<T> = array![];
if group == 1 {
let mut image_id = 0;
while image_id != *(*X)
.shape
.at(0) {
let w_t = TensorTrait::new(array![k, m].span(), (*W).data)
.transpose(array![1, 0].span());
let image = SpanTrait::slice((*X).data, image_id * k * n, k * n);
let gemm = w_t.matmul(@TensorTrait::new(array![k, n].span(), image));
let gemmc = gemm
.reshape(
array![
num_output_channels.try_into().unwrap(),
(m / num_output_channels).try_into().unwrap(),
n.try_into().unwrap()
]
.span(),
false
);
let mut c = 0;
while c != num_output_channels {
let gemmc_c = TensorTrait::new(
array![m / num_output_channels, n].span(),
SpanTrait::slice( |
gemmc.data,
(m / num_output_channels) * n * c,
(m / num_output_channels) * n
)
);
let mut res = col2im_naive_implementation(
@gemmc_c, output_shape, kernel_shape, dilations, pads, strides
);
match B {
Option::Some(B) => {
let mut i = 0;
while i != res
.len() {
res.set(i, res.at(i) + *(*B).data.at(c));
i += 1;
};
},
Option::None => {},
}
c += 1;
let mut i = 0;
while i != res.len() {
final.append(res.at(i));
i += 1;
};
};
image_id += 1;
};
} else {
let mut output_array: Array<Span<T>> = array![];
let mut i = 0;
let mut output_size = 1;
while i != output_shape.len() {
output_size *= *output_shape.at(i);
i += 1;
};
let mut group_id = 0;
while group_id != group {
let mut group_X: Array<T> = array![];
let mut group_W: Array<T> = array![];
let mut image_id = 0;
while image_id != *(*X)
.shape
.at(0) {
let start = image_id * n * C + (group_id * C / group) * n;
let end = image_id * n * C + ((group_id + 1) * C / group) * n;
let mut i = start;
while i != end {
group_X.append(*(*X).data.at(i));
i += 1;
};
image_id += 1;
}; |
let start = (group_id * C / group) * *(*W).shape.at(1) * kernel_size;
let end = (group_id + 1) * C / group * *(*W).shape.at(1) * kernel_size;
let mut i = start;
while i != end {
group_W.append(*(*W).data.at(i));
i += 1;
};
let mut shape_X: Array<usize> = array![];
shape_X.append(*(*X).shape.at(0));
shape_X.append(C / group);
let mut i = 2;
while i != (*X).shape.len() {
shape_X.append(*(*X).shape.at(i));
i += 1;
};
let mut shape_W: Array<usize> = array![];
shape_W.append(C / group);
let mut i = 1;
while i != (*W).shape.len() {
shape_W.append(*(*W).shape.at(i));
i += 1;
};
let group_X = TensorTrait::new(shape_X.span(), group_X.span());
let group_W = TensorTrait::new(shape_W.span(), group_W.span());
let group_output = conv_transpose(
@group_X,
@group_W,
B,
Option::Some(auto_pad),
Option::Some(dilations),
Option::Some(1),
Option::Some(kernel_shape),
Option::Some(output_padding),
Option::Some(output_shape),
Option::Some(pads),
Option::Some(strides)
);
output_array.append(group_output.data);
group_id += 1;
};
let output_array = output_array.span();
let mut image_id = 0;
while image_id != *(*X)
.shape
.at(0) {
let mut group_id = 0;
while group_id != group {
let group_output = *output_array.at(group_id);
let mut i = image_id * output_size * (num_output_channels / group);
while i != (image_id + |
1)
* output_size
* (num_output_channels / group) {
final.append(*group_output.at(i));
i += 1;
};
group_id += 1;
};
image_id += 1;
};
}
let mut shape = array![*(*X).shape.at(0), num_output_channels];
let mut i = 0;
while i != output_shape.len() {
shape.append(*output_shape.at(i));
i += 1;
};
TensorTrait::new(shape.span(), final.span())
}
fn get_image<T, +Drop<T>, +Copy<T>>(self: @Tensor<T>, row: usize) -> Span<T> {
assert((*self).shape.len() == 2, 'Expected a 2D tensor');
let row_length = *self.shape[1];
let start = row * row_length;
(*self).data.slice(start, row_length)
}
fn col2im_naive_implementation<
T, MAG, +TensorTrait<T>, +NumberTrait<T, MAG>, +Copy<T>, +Drop<T>, +Add<T>,
>(
data: @Tensor<T>,
image_shape: Span<usize>,
kernel_shape: Span<usize>,
dilations: Span<usize>,
pads: Span<usize>,
strides: Span<usize>,
) -> NullableVec<T> {
let n_dims = pads.len() / 2;
col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides);
let mut dim_col: Array<usize> = array![];
let mut i = 0;
while i != n_dims {
dim_col
.append(
(*image_shape.at(i)
+ (*pads.at(i) + *pads.at(i + n_dims))
- (*dilations.at(i) * (*kernel_shape.at(i) - 1) + 1))
/ *strides.at(i)
+ 1
);
i += 1;
};
let dim_col = dim_col.span();
let stride_img = stride(image_shape);
let mut data_im = NullableVecImpl::new();
data_im.set(*image_shape.at(0) * *stride_img.at(0) - 1, NumberTrait::zero());
let kernel_size = prod(kernel_shape, 0);
let col_size = prod(dim_col, 0);
let mut c_col = 0;
while c_col != kernel_size {
let offset = get_indices(c_col, kernel_shape).sp |
an();
let mut col = 0;
while col != col_size {
let ind_col = get_indices(col, dim_col).span();
let mut ind_im: Array<usize> = array![];
let mut i = 0;
while i != n_dims {
if (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)) < *pads
.at(i) {
let neg_index = *pads.at(i)
- (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i));
ind_im.append(*image_shape.at(i) + neg_index);
} else {
ind_im
.append(
*ind_col.at(i) * *strides.at(i)
+ *offset.at(i) * *dilations.at(i)
- *pads.at(i)
);
}
i += 1;
};
let ind_im = ind_im.span();
if !is_out(ind_im, image_shape) {
let mut index = 0;
let mut i = 0;
while i != image_shape.len() {
index += *stride_img.at(i) * *ind_im.at(i);
i += 1;
};
data_im.set(index, data_im.at(index) + *(*data).data.at(c_col * col_size + col));
}
col += 1;
};
c_col += 1;
};
data_im
}
fn col2im_shape_check<T, +TensorTrait<T>, +Copy<T>, +Drop<T>,>(
X: @Tensor<T>,
output_shape: Span<usize>,
kernel_shape: Span<usize>,
dilations: Span<usize>,
pads: Span<usize>,
strides: Span<usize>,
) {
let n_input_plane = *(*X).shape.at(0);
let kernel_size = prod(kernel_shape, 0);
assert(n_input_plane % kernel_size == 0, 'wrong input dimension');
let input_length = *(*X).shape.at(1);
let n_dims = output_shape.len();
let mut n_blocks: Array<usize> = array![];
let mut i = 0;
while i != n_dims {
n_blocks
.append(
(*output_shape.at(i |
)
+ (*pads.at(i) + *pads.at(i + n_dims))
- *dilations.at(i) * (*kernel_shape.at(i) - 1)
- 1)
/ *strides.at(i)
+ 1
);
i += 1;
};
let block_size = prod(n_blocks.span(), 0);
assert(input_length == block_size, 'input_length != block_size');
}
fn get_indices(index: usize, shape: Span<usize>,) -> Array<usize> {
let mut i = index;
let mut res: Array<usize> = array![];
let mut k = shape.len() - 1;
while k != 0 {
let m = i % *shape.at(k);
res.append(m);
i -= m;
i /= *shape.at(k);
k -= 1;
};
let mut new_res: Array<usize> = array![];
new_res.append(i);
let mut i = shape.len() - 1;
while i != 0 {
new_res.append(*res.at(i - 1));
i -= 1;
};
new_res
}
fn is_out(ind: Span<usize>, shape: Span<usize>,) -> bool {
let mut n = 0;
let is_out = loop {
if n == ind.len() {
break false;
}
let s = *shape.at(n);
let i = *ind.at(n);
if i < 0 {
break true;
}
if i >= s {
break true;
}
n += 1;
};
is_out
}
fn prod<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>,>(
pA: Span<T>, start: usize
) -> T {
let mut i = start;
let mut prod = NumberTrait::one();
while i != pA.len() {
prod = prod * (*pA.at(i));
i += 1;
};
prod
} |
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices};
use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast};
fn depth_to_space<
T,
impl TTensor: TensorTrait<T>,
impl TAdd: Add<T>,
impl TMul: Mul<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
tensor: Tensor<T>, blocksize: usize, mode: felt252
) -> Tensor<T> {
assert((tensor.shape).len() == 4, 'Unexpected shape 4.');
let blocksize_i32: i32 = blocksize.try_into().unwrap();
let b: i32 = (*(tensor.shape).at(0)).try_into().unwrap();
let C: u32 = (*(tensor.shape).at(1)).try_into().unwrap();
let H: i32 = (*(tensor.shape).at(2)).try_into().unwrap();
let W: i32 = (*(tensor.shape).at(3)).try_into().unwrap();
let finalshape: Array<i32> = array![
b,
(C / (blocksize * blocksize)).try_into().unwrap(),
(H * blocksize_i32),
(W * blocksize_i32)
];
if mode == 'DCR' {
let tmpshape: Array<i32> = array![
b, blocksize_i32, blocksize_i32, (C / (blocksize * blocksize)).try_into().unwrap(), H, W
];
let reshaped = (tensor).reshape(target_shape: tmpshape.span(), allowzero: false);
let transposed = reshaped.transpose(axes: array![0, 3, 4, 1, 5, 2].span());
transposed.reshape(target_shape: finalshape.span(), allowzero: false)
} else {
let tmpshape: Array<i32> = array![
b, (C / (blocksize * blocksize)).try_into().unwrap(), blocksize_i32, blocksize_i32, H, W
];
let reshaped = (tensor).reshape(target_shape: tmpshape.span(), allowzero: false);
let transposed = reshaped.transpose(axes: array![0, 1, 4, 2, 5, 3].span());
transposed.reshape(target_shape: finalshape.s |
pan(), allowzero: false)
}
} |
use core::array::SpanTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::{core::{Tensor, TensorTrait}, math::arithmetic::mul_by_scalar};
/// Cf: NNTrait::gemm docstring
fn gemm<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TAddTensor: Add<Tensor<T>>,
impl TNumberTrait: NumberTrait<T, MAG>,
impl TPartialEq: PartialEq<T>,
impl TMul: Mul<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
A: Tensor<T>,
B: Tensor<T>,
C: Option<Tensor<T>>,
alpha: Option<T>,
beta: Option<T>,
transA: bool,
transB: bool
) -> Tensor<T> {
let mut A = A;
let mut B = B;
let alpha: T = if alpha.is_some() {
alpha.unwrap()
} else {
NumberTrait::one()
};
let beta: T = if beta.is_some() {
beta.unwrap()
} else {
NumberTrait::one()
};
if transA {
A = A.transpose(array![1, 0].span());
}
if transB {
B = B.transpose(array![1, 0].span());
}
match C {
Option::Some(c) => { mul_by_scalar(@A.matmul(@B), alpha) + mul_by_scalar(@c, beta) },
Option::None(_) => { mul_by_scalar(@A.matmul(@B), alpha) }
}
}
|
use core::debug::PrintTrait;
use orion::numbers::FP16x16;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{stride};
use orion::operators::tensor::{FP16x16Tensor, TensorTrait, Tensor, U32Tensor,};
use orion::operators::vec::{NullableVec, NullableVecImpl};
enum MODE {
NEAREST,
LINEAR,
CUBIC,
}
enum PADDING_MODE {
ZEROS,
BORDER,
REFLECTION,
}
fn grid_sample<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, MAG>,
+Copy<T>,
+Drop<T>,
+Add<T>,
+Mul<T>,
+Sub<T>,
+Div<T>,
+AddEq<T>,
+PrintTrait<T>,
+PartialOrd<T>,
+PartialEq<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+Rem<T>,
+Neg<T>,
+SubEq<T>,
>(
X: @Tensor<T>,
grid: @Tensor<T>,
align_corner: Option<usize>,
mode: Option<MODE>,
padding_mode: Option<PADDING_MODE>,
) -> Tensor<T> {
let align_corner = match align_corner {
Option::Some(align_corner) => align_corner,
Option::None => 0,
};
let mode = match mode {
Option::Some(mode) => mode,
Option::None => MODE::LINEAR,
};
let padding_mode = match padding_mode {
Option::Some(padding_mode) => padding_mode,
Option::None => PADDING_MODE::ZEROS,
};
let x_dims = (*X).shape;
let x_stride = stride((*X).shape);
let grid_dims = (*grid).shape;
let grid_stride = stride((*grid).shape);
let N = *x_dims.at(0);
let C = *x_dims.at(1);
let num_dims = x_dims.len() - 2;
let dims = SpanTrait::slice(x_dims, 2, num_dims);
let border = prepare_border(X, dims, align_corner);
let mut y_dims: Array<usize> = array![N, C];
y_dims.append_span(SpanTrait::slice(grid_dims, 1, grid_dims.len() - 2));
let y_dims = y_dims.span();
if prod(y_dims, 0) == 0 {
return TensorTrait::new(array![].span(), array![].span());
}
let mut Y: Array<T> = array![];
let mut n = 0;
while n != N {
let grid_data = SpanTrait::slice((*grid).data, n * *grid_stride.at(0), *grid_stride.at( |
0));
let grid_data_stride = SpanTrait::slice(grid_stride, 1, grid_stride.len() - 1);
let mut c = 0;
while c != C {
let X_data = SpanTrait::slice(
(*X).data, n * *x_stride.at(0) + c * *x_stride.at(1), *x_stride.at(1)
);
let X_data_stride = SpanTrait::slice(x_stride, 2, grid_stride.len() - 2);
let all_coords = get_all_coords(SpanTrait::slice(grid_dims, 1, grid_dims.len() - 2));
let mut ix = 0;
while ix != all_coords.len() {
let ox = *all_coords.at(ix);
let nx = get_sub(grid_data, grid_data_stride, ox);
let nx = reverse(nx);
let x = gs_denormalize_coordinates(nx, dims, align_corner);
let x = match mode {
MODE::NEAREST => { rint(x) },
MODE::LINEAR => { x },
MODE::CUBIC => { x },
};
let mut new_x: Array<T> = array![];
let mut i = 0;
while i != x.len() {
let v = *x.at(i);
let mut x_min = *border.at(i);
let mut x_max = *border.at(i + num_dims);
let new_v = if v < x_min || v > x_max {
let v = match padding_mode {
PADDING_MODE::ZEROS => { v },
PADDING_MODE::BORDER => {
clamp(
v,
NumberTrait::zero(),
NumberTrait::new_unscaled((*dims.at(i)).into(), false)
- NumberTrait::one()
)
},
PADDING_MODE::REFLECTION => { gs_reflect(v, x_min, x_max) },
};
v
} else {
v
}; |
new_x.append(new_v);
i += 1;
};
let x = new_x.span();
let y = match mode {
MODE::NEAREST => {
pixel_at_ndarray(X_data, dims, X_data_stride, x, border, padding_mode)
},
MODE::LINEAR => {
gs_linear_interpolation_nd_with_x(
X_data, dims, X_data_stride, x, border, padding_mode
)
},
MODE::CUBIC => {
gs_cubic_interpolation_nd_with_x(
X_data, dims, X_data_stride, x, border, padding_mode
)
},
};
Y.append(y);
ix += 1;
};
c += 1;
};
n += 1;
};
TensorTrait::new(y_dims, Y.span())
}
fn gs_cubic_interpolation_1d_with_x<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+TensorTrait<T>,
+Mul<T>,
+Add<T>,
+Div<T>,
+Sub<T>,
+AddEq<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Rem<T>,
+PrintTrait<T>,
>(
data: Span<T>, x: T, border: Span<T>, padding_mode: PADDING_MODE
) -> T {
let x_0 = NumberTrait::floor(x);
let x_1 = x_0 + NumberTrait::one();
let x_2 = x_1 + NumberTrait::one();
let x_minus_1 = x_0 - NumberTrait::one();
let coeffs = gs_get_cubic_coeffs(x - x_0);
let v_0 = pixel_at_array(data, x_minus_1.try_into().unwrap(), border, padding_mode);
let v_1 = pixel_at_array(data, x_0.try_into().unwrap(), border, padding_mode);
let v_2 = pixel_at_array(data, x_1.try_into().unwrap(), border, padding_mode);
let v_3 = pixel_at_array(data, x_2.try_into().unwrap(), border, padding_mode);
let v: Span<T> = array![v_0, v_1, v_2, v_3].span();
dot(coeffs, v)
}
fn gs_get_cubic_coeffs<
T,
MAG,
+TensorTrait<T>,
+NumberTrait<T, |
MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Copy<T>,
+Drop<T>,
+AddEq<T>,
+Add<T>,
+Div<T>,
+Mul<T>,
+Sub<T>,
>(
x: T
) -> Span<T> {
let one = NumberTrait::one();
let two = one + NumberTrait::one();
let three = two + NumberTrait::one();
let four = three + NumberTrait::one();
let five = four + NumberTrait::one();
let eigth = four + four;
let A = NumberTrait::neg(three / four);
let x = NumberTrait::abs(x);
let mut coeffs: Array<T> = array![];
coeffs.append(((A * (x + one) - five * A) * (x + one) + eigth * A) * (x + one) - four * A);
coeffs.append(((A + two) * x - (A + three)) * x * x + one);
coeffs.append(((A + two) * (one - x) - (A + three)) * (one - x) * (one - x) + one);
coeffs
.append(
((A * ((one - x) + one) - five * A) * ((one - x) + one) + eigth * A) * ((one - x) + one)
- four * A
);
coeffs.span()
}
fn gs_cubic_interpolation_nd_with_x<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+TensorTrait<T>,
+Mul<T>,
+Add<T>,
+Div<T>,
+Sub<T>,
+AddEq<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Rem<T>,
+PrintTrait<T>,
>(
data: Span<T>,
data_dims: Span<usize>,
data_stride: Span<usize>,
x: Span<T>,
border: Span<T>,
padding_mode: PADDING_MODE
) -> T {
let num_dims = data_dims.len();
assert(num_dims == x.len(), 'pixel at nd array: wrong dim');
assert(num_dims == (border.len() / 2), 'pixel at nd array: wrong dim');
if num_dims == 1 {
let a = gs_cubic_interpolation_1d_with_x(data, *x.at(0), border, padding_mode);
return a;
}
let mut res1d: Array<T> = array![];
let mut i = 0;
while i != *data_dims.at(0) {
let sub_data = SpanTrait::slice(data, i * *data_stride.at(0), *data_stride.at(0));
let sub_x = SpanTrait::slice(x, 1, x.len() - 1);
let data_dims_sub = SpanTrait::slice(data_dims, 1, data_ |
dims.len() - 1);
let data_stride_sub = SpanTrait::slice(data_stride, 1, data_stride.len() - 1);
let border1 = SpanTrait::slice(border, 1, num_dims - 1);
let border2 = SpanTrait::slice(border, num_dims + 1, num_dims - 1);
let mut border = ArrayTrait::new();
border.append_span(border1);
border.append_span(border2);
let r = gs_cubic_interpolation_nd_with_x(
sub_data, data_dims_sub, data_stride_sub, sub_x, border.span(), padding_mode
);
res1d.append(r);
i += 1;
};
gs_cubic_interpolation_1d_with_x(
res1d.span(), *x.at(0), array![*border.at(0), *border.at(num_dims)].span(), padding_mode
)
}
fn gs_get_linear_coeffs<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Sub<T>,>(
x: T
) -> Span<T> {
let x = NumberTrait::abs(x);
array![NumberTrait::one() - x, x].span()
}
fn gs_linear_interpolation_1d_with_x<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+TensorTrait<T>,
+Mul<T>,
+Add<T>,
+Div<T>,
+Sub<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Rem<T>,
+PrintTrait<T>
>(
data: Span<T>, x: T, border: Span<T>, padding_mode: PADDING_MODE
) -> T {
let x_0 = NumberTrait::floor(x);
let x_1 = x_0 + NumberTrait::one();
let coeffs = gs_get_linear_coeffs(x - x_0);
let v_0 = pixel_at_array(data, x_0.try_into().unwrap(), border, padding_mode);
let v_1 = pixel_at_array(data, x_1.try_into().unwrap(), border, padding_mode);
let v: Span<T> = array![v_0, v_1].span();
dot(coeffs, v)
}
fn dot<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +Add<T>, +TensorTrait<T>, +Mul<T>,>(
a: Span<T>, b: Span<T>
) -> T {
assert(a.len() == b.len(), 'dot: wrong len');
let mut i = 0;
let mut sum = NumberTrait::zero();
while i != a.len() {
sum = sum + *a.at(i) * *b.at(i);
i += 1;
};
sum
}
fn gs_linear_interpolation_nd_with_x<
T,
MAG,
+Drop<T |
>,
+Copy<T>,
+NumberTrait<T, MAG>,
+TensorTrait<T>,
+Mul<T>,
+Add<T>,
+Div<T>,
+Sub<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Rem<T>,
+PrintTrait<T>
>(
data: Span<T>,
data_dims: Span<usize>,
data_stride: Span<usize>,
x: Span<T>,
border: Span<T>,
padding_mode: PADDING_MODE
) -> T {
let num_dims = data_dims.len();
assert(num_dims == x.len(), 'pixel at nd array: wrong dim');
assert(num_dims == (border.len() / 2), 'pixel at nd array: wrong dim');
if num_dims == 1 {
let a = gs_linear_interpolation_1d_with_x(data, *x.at(0), border, padding_mode);
return a;
}
let mut res1d: Array<T> = array![];
let mut i = 0;
while i != *data_dims.at(0) {
let sub_data = SpanTrait::slice(data, i * *data_stride.at(0), *data_stride.at(0));
let sub_x = SpanTrait::slice(x, 1, x.len() - 1);
let data_dims_sub = SpanTrait::slice(data_dims, 1, data_dims.len() - 1);
let data_stride_sub = SpanTrait::slice(data_stride, 1, data_stride.len() - 1);
let border1 = SpanTrait::slice(border, 1, num_dims - 1);
let border2 = SpanTrait::slice(border, num_dims + 1, num_dims - 1);
let mut border = ArrayTrait::new();
border.append_span(border1);
border.append_span(border2);
let r = gs_linear_interpolation_nd_with_x(
sub_data, data_dims_sub, data_stride_sub, sub_x, border.span(), padding_mode
);
res1d.append(r);
i += 1;
};
gs_linear_interpolation_1d_with_x(
res1d.span(), *x.at(0), array![*border.at(0), *border.at(num_dims)].span(), padding_mode
)
}
fn pixel_at_ndarray<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+TensorTrait<T>,
+Mul<T>,
+Add<T>,
+Div<T>,
+Sub<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Rem<T>,
+PrintTrait<T>,
>(
ndarray: Span<T>,
ndarray_dim |
s: Span<usize>,
ndarray_stride: Span<usize>,
x: Span<T>,
border: Span<T>,
padding_mode: PADDING_MODE
) -> T {
let num_dims = ndarray_dims.len();
assert(num_dims == x.len(), 'pixel at nd array: wrong dim');
assert(num_dims == (border.len() / 2), 'pixel at nd array: wrong dim');
let i = *x.at(0);
if num_dims == 1 {
return pixel_at_array(ndarray, *x.at(0), border, padding_mode);
}
let d = NumberTrait::new_unscaled((*ndarray_dims.at(0)).into(), false);
let ndarray = match padding_mode {
PADDING_MODE::ZEROS => {
let ndarray = if i >= NumberTrait::zero() && i < d {
SpanTrait::slice(
ndarray, i.try_into().unwrap() * *ndarray_stride.at(0), *ndarray_stride.at(0)
)
} else {
let ndarray: Span<T> = zeros(*ndarray_stride.at(0));
ndarray
};
ndarray
},
PADDING_MODE::BORDER => {
let i = clamp(i, NumberTrait::zero(), d - NumberTrait::one());
SpanTrait::slice(
ndarray, i.try_into().unwrap() * *ndarray_stride.at(0), *ndarray_stride.at(0)
)
},
PADDING_MODE::REFLECTION => {
let i: usize = (gs_reflect(i, *border.at(0), *border.at(num_dims))).try_into().unwrap();
SpanTrait::slice(ndarray, i * *ndarray_stride.at(0), *ndarray_stride.at(0))
},
};
let x = SpanTrait::slice(x, 1, x.len() - 1);
let ndarray_dims = SpanTrait::slice(ndarray_dims, 1, ndarray_dims.len() - 1);
let ndarray_stride = SpanTrait::slice(ndarray_stride, 1, ndarray_stride.len() - 1);
let border1 = SpanTrait::slice(border, 1, num_dims - 1);
let border2 = SpanTrait::slice(border, num_dims + 1, num_dims - 1);
let mut border = ArrayTrait::new();
border.append_span(border1);
border.append_span(border2);
pixel_at_ndarray(ndarray, ndarray_dims, ndarray_stride, x, border.span(), padding_mode)
}
fn pixel_at_array<
T,
MA |
G,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+TensorTrait<T>,
+Mul<T>,
+Add<T>,
+Div<T>,
+Sub<T>,
+TryInto<T, usize>,
+Into<usize, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Rem<T>,
+PrintTrait<T>,
>(
array: Span<T>, i: T, border: Span<T>, padding_mode: PADDING_MODE
) -> T {
let d = NumberTrait::new_unscaled(array.len().into(), false);
let pixel = match padding_mode {
PADDING_MODE::ZEROS => {
let pixel = if i >= NumberTrait::zero() && i < d {
*array.at(i.try_into().unwrap())
} else {
NumberTrait::zero()
};
pixel
},
PADDING_MODE::BORDER => {
let i = clamp(i, NumberTrait::zero(), d - NumberTrait::one());
let pixel = *array.at(i.try_into().unwrap());
pixel
},
PADDING_MODE::REFLECTION => {
let i: usize = (gs_reflect(i, *border.at(0), *border.at(1))).try_into().unwrap();
let pixel = *array.at(i);
pixel
},
};
pixel
}
fn zeros<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>>(n: usize) -> Span<T> {
let mut zeros: Array<T> = array![];
let mut i = 0;
while i != n {
zeros.append(NumberTrait::zero());
i += 1;
};
zeros.span()
}
fn rint<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+SubEq<T>,
+Rem<T>,
+PartialEq<T>,
+PartialOrd<T>,
+Add<T>,
+Sub<T>
>(
data: Span<T>
) -> Span<T> {
let mut rint: Array<T> = array![];
let two: T = NumberTrait::one() + NumberTrait::one();
let mut i = 0;
while i != data.len() {
let x = *data.at(i);
let mut round = NumberTrait::round(x);
let diff = round - x;
if diff == NumberTrait::half() {
if round % two != NumberTrait::zero() {
round -= NumberTrait::one()
}
}
rint.append(round);
i += 1;
};
rint.span()
}
fn clam |
p<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +PartialOrd<T>>(
val: T, low: T, high: T
) -> T {
if val < low {
return low;
}
if val > high {
return high;
}
val
}
fn gs_reflect<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+PartialOrd<T>,
+PartialEq<T>,
+Add<T>,
+Sub<T>,
+Div<T>,
+Mul<T>,
+Rem<T>,
+PrintTrait<T>,
>(
x: T, x_min: T, x_max: T
) -> T {
let two: T = NumberTrait::one() + NumberTrait::one();
let mut fx = x;
let rng = x_max - x_min;
let fx = if fx < x_min {
let dx = x_min - fx;
let n = NumberTrait::floor(dx / rng);
let r = dx - n * rng;
let fx = if NumberTrait::round(n % two) == NumberTrait::zero() {
x_min + r
} else {
x_max - r
};
fx
} else if fx > x_max {
let dx = fx - x_max;
let n = NumberTrait::floor(dx / rng);
let r = dx - n * rng;
let fx = if NumberTrait::round(n % two) == NumberTrait::zero() {
x_max - r
} else {
x_min + r
};
fx
} else {
fx
};
fx
}
fn reverse<T, +Copy<T>, +Drop<T>,>(data: Span<T>) -> Span<T> {
let mut rev: Array<T> = array![];
let mut i = data.len();
while i != 0 {
rev.append(*data.at(i - 1));
i -= 1;
};
rev.span()
}
fn get_sub<T, +Copy<T>, +Drop<T>,>(
data: Span<T>, stride_data: Span<usize>, index: Span<usize>,
) -> Span<T> {
let mut acc_indices = 0;
let mut i = 0;
while i != index.len() {
acc_indices += *index.at(i) * *stride_data.at(i);
i += 1;
};
SpanTrait::slice(data, acc_indices, *stride_data.at(index.len() - 1))
}
fn prod<T, MAG, +Drop<T>, +Copy<T>, +NumberTrait<T, MAG>, +TensorTrait<T>, +Mul<T>,>(
pA: Span<T>, start: usize
) -> T {
let mut i = start;
let mut prod = NumberTrait::one();
while i != pA.len() {
prod = prod * (*pA.at(i));
i += 1;
};
pro |
d
}
fn prepare_border<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+TensorTrait<T>,
+Mul<T>,
+Add<T>,
+Div<T>,
+Sub<T>,
+Into<usize, MAG>,
+Neg<T>
>(
self: @Tensor<T>, dims: Span<usize>, align_corner: usize
) -> Span<T> {
let num_dims = dims.len();
let mut borders1: Array<T> = array![];
let mut borders2: Array<T> = array![];
let mut i = 0;
while i != num_dims {
if align_corner == 0 {
borders1.append(-NumberTrait::half());
borders2
.append(
NumberTrait::new_unscaled((*dims.at(i)).into(), false) - NumberTrait::half()
);
} else {
borders1.append(NumberTrait::zero());
borders2
.append(
NumberTrait::new_unscaled((*dims.at(i)).into(), false) - NumberTrait::one()
);
}
i += 1;
};
borders1.append_span(borders2.span());
borders1.span()
}
fn arange(start: usize, end: usize, step: usize) -> Span<usize> {
assert((end - start) % step == 0, 'incompatible step value');
let mut arr: Array<usize> = array![];
let mut i = start;
while i != end {
arr.append(i);
i += step;
};
arr.span()
}
fn gs_denormalize_coordinates<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+TensorTrait<T>,
+Mul<T>,
+Add<T>,
+Div<T>,
+Sub<T>,
+Into<usize, MAG>
>(
n: Span<T>, dims: Span<usize>, align_corner: usize
) -> Span<T> {
let mut x: Array<T> = array![];
let mut i = 0;
while i != n.len() {
let v = *n.at(i);
let dim = *dims.at(i);
x.append(gs_denormalize(v, dim, align_corner));
i += 1;
};
x.span()
}
fn gs_denormalize<
T,
MAG,
+Drop<T>,
+Copy<T>,
+NumberTrait<T, MAG>,
+TensorTrait<T>,
+Mul<T>,
+Add<T>,
+Div<T>,
+Sub<T>,
+Into<usize, MAG>
>(
n: T, length: usize, align_corner: usize
) -> T { |
let length = NumberTrait::new_unscaled(length.into(), false);
let two: T = NumberTrait::one() + NumberTrait::one();
let x = if align_corner == 0 {
((n + NumberTrait::one()) * length - NumberTrait::one()) / two
} else {
(n + NumberTrait::one()) / two * (length - NumberTrait::one())
};
x
}
fn get_all_coords(shape: Span<usize>) -> Span<Span<usize>> {
let mut all_indices = array![];
let mut i = 0;
while i != shape.len() {
all_indices.append(arange(0, *shape.at(i), 1));
i += 1;
};
cartesian(all_indices.span())
}
fn cartesian(mut arrays: Span<Span<usize>>,) -> Span<Span<usize>> {
let mut n = 1;
let mut i = arrays.len() - 1;
loop {
n = n * (*(arrays.at(i))).len();
if i == 0 {
break;
}
i -= 1;
};
let mut i = 0;
let mut size_arrays: Array<usize> = array![];
while i != arrays.len() {
size_arrays.append((*(arrays.at(i))).len());
i += 1;
};
let size_arrays = size_arrays.span();
let mut output_arrays = ArrayTrait::<Array<usize>>::new();
let mut m = n;
let mut i = 0;
while i != arrays.len() {
m = m / (*(arrays.at(i))).len();
let mut out = repeat(*(arrays.at(i)), m);
out = repeat_2(out, size_arrays, i);
output_arrays.append(out);
i += 1;
};
let output_arrays = output_arrays.span();
let mut i = 0;
let mut ret = array![];
while i != n {
let mut j = 0;
let mut x = ArrayTrait::new();
while j != arrays.len() {
x.append(*(output_arrays.at(j)).at(i));
j += 1;
};
ret.append(x.span());
i += 1;
};
ret.span()
}
fn repeat_2(mut array: Array<usize>, size_array: Span<usize>, index: usize) -> Array<usize> {
let mut size = array.len();
let mut i = 0;
while i != index {
let mut j = 1;
while j != *size_array.at(index - 1 - i) {
let mut k = 0;
while k != siz |
e {
array.append(*array.at(k));
k += 1;
};
j += 1;
};
size = size * *size_array.at(index - 1 - i);
i += 1;
};
array
}
fn repeat(array: Span<usize>, m: usize,) -> Array<usize> {
let mut out: Array<usize> = array![];
let mut j = 0;
while j != array.len() {
let mut k = 0;
while k != m {
out.append(*array.at(j));
k += 1;
};
j += 1;
};
out
} |
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: NNTrait::hard_sigmoid docstring
fn hard_sigmoid<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TPartialOrd: PartialOrd<T>,
impl TAdd: Add<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut x: Tensor<T>, alpha: @T, beta: @T
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match x.data.pop_front() {
Option::Some(item) => {
let temp = (*item) * (*alpha) + (*beta);
let result = temp.min(NumberTrait::one()).max(NumberTrait::zero());
data_result.append(result);
},
Option::None => { break; }
};
};
TensorTrait::new(x.shape, data_result.span())
}
|
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: NNTrait::leaky_relu docstring
fn leaky_relu<
T,
MAG,
impl FNumber: NumberTrait<T, MAG>,
impl FTensor: TensorTrait<T>,
impl FPartialOrd: PartialOrd<T>,
impl FMul: Mul<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>,
>(
mut z: Tensor<T>, alpha: @T
) -> Tensor<T> {
assert(*alpha < NumberTrait::one(), 'alpha must be less than 1');
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => {
if (*item >= NumberTrait::zero()) {
data_result.append(*item);
} else {
data_result.append(*item * *alpha);
};
},
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: NNTrait::linear docstring
fn linear<
T,
impl TTensor: TensorTrait<T>,
impl TAddTensor: Add<Tensor<T>>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
z: Tensor<T>, weights: Tensor<T>, bias: Tensor<T>
) -> Tensor<T> {
assert(z.shape.len() == 1, 'input tensor must be 1D');
assert(weights.shape.len() == 2, 'weights tensor must be 2D');
assert(bias.shape.len() == 1, 'bias tensor must be 1D');
let dot = weights.matmul(@z);
let sum = dot + bias;
sum
}
|
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast};
/// Cf: NNTrait::logsoftmax docstring
fn logsoftmax<
T, impl TTensor: TensorTrait<T>, impl TDivTensor: Div<Tensor<T>>, impl TDrop: Drop<T>
>(
z: @Tensor<T>, axis: usize
) -> Tensor<T> {
let exp_tensor = z.exp();
let sum = exp_tensor
.reduce_sum(
Option::Some(array![axis.try_into().unwrap()].span()),
Option::Some(true),
Option::Some(false)
);
let softmax = exp_tensor / sum;
let logsoftmax = softmax.log();
logsoftmax
}
/// Cf: NNTrait::logsoftmax docstring
fn logsoftmaxWide<
T,
TMAG,
W,
WMAG,
impl TTensor: TensorTrait<T>,
impl WTensor: TensorTrait<W>,
impl TDiv: Div<T>,
impl TIntoW: Into<T, W>,
impl WTryIntoT: TryInto<W, T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl WCopy: Copy<W>,
impl WDrop: Drop<W>,
impl TFixed: FixedTrait<T, TMAG>,
impl WFixed: FixedTrait<W, WMAG>,
>(
z: @Tensor<T>, axis: usize
) -> Tensor<T> {
let exp_tensor: Tensor<W> = exp_upcast(*z);
let sum = exp_tensor
.reduce_sum(
Option::Some(array![axis.try_into().unwrap()].span()),
Option::Some(true),
Option::Some(false)
);
let softmax = div_downcast(@exp_tensor, @sum);
softmax.log()
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: NNTrait::relu docstring
fn relu<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => {
if (*item) < NumberTrait::zero() {
data_result.append(NumberTrait::zero());
} else {
data_result.append(*item);
};
},
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
|
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: NNTrait::sigmoid docstring
fn sigmoid<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl TTensor: TensorTrait<T>,
impl TPartialOrd: PartialOrd<T>,
impl TAdd: Add<T>,
impl TMul: Mul<T>,
impl TDiv: Div<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => {
let result = NumberTrait::one()
/ (NumberTrait::one() + (*item * NumberTrait::neg_one()).exp());
data_result.append(result);
},
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
|
use orion::numbers::fixed_point::core::FixedTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::math::{exp::exp_upcast, arithmetic::div_downcast};
/// Cf: NNTrait::softmax docstring
fn softmax<
T,
impl TTensor: TensorTrait<T>,
impl TTensorDiv: Div<Tensor<T>>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
z: @Tensor<T>, axis: Option<i32>
) -> Tensor<T> {
let axis = match axis {
Option::Some(val) => val,
Option::None => -1
};
let exp_tensor = z.exp();
let sum = exp_tensor
.reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false));
exp_tensor / sum
}
/// Cf: NNTrait::softmax docstring
fn softmaxWide<
T,
TMAG,
W,
WMAG,
impl TTensor: TensorTrait<T>,
impl WTensor: TensorTrait<W>,
impl TDiv: Div<T>,
impl TIntoW: Into<T, W>,
impl WTryIntoT: TryInto<W, T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl WCopy: Copy<W>,
impl WDrop: Drop<W>,
impl TFixed: FixedTrait<T, TMAG>,
impl WFixed: FixedTrait<W, WMAG>,
>(
z: @Tensor<T>, axis: Option<i32>
) -> Tensor<T> {
let axis = match axis {
Option::Some(val) => val,
Option::None => -1
};
let exp_tensor: Tensor<W> = exp_upcast(*z);
let sum = exp_tensor
.reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false));
div_downcast(@exp_tensor, @sum)
}
|
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index};
use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices};
use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast};
fn softmax_zero<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TTensorDiv: Div<Tensor<T>>,
impl TPartialEq: PartialEq<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TAddEq: AddEq<T>,
>(
z: @Tensor<T>, axis: usize
) -> Tensor<T> {
let exp_tensor = exp_zero(*z);
let sum_no_zero = reduce_sum_no_zero(@exp_tensor, axis, true);
exp_tensor / sum_no_zero
}
fn softmaxWide_zero<
T,
TMAG,
W,
WMAG,
impl TTensor: TensorTrait<T>,
impl WTensor: TensorTrait<W>,
impl TDiv: Div<T>,
impl TIntoW: Into<T, W>,
impl WTryIntoT: TryInto<W, T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl WCopy: Copy<W>,
impl WDrop: Drop<W>,
impl TNumber: NumberTrait<T, TMAG>,
impl WNumber: NumberTrait<W, WMAG>,
impl TPartialEq: PartialEq<T>,
impl WPartialEq: PartialEq<W>,
impl TAddEq: AddEq<T>,
impl WAddEq: AddEq<W>,
>(
z: @Tensor<T>, axis: usize
) -> Tensor<T> {
let exp_tensor: Tensor<W> = exp_upcast_zero(*z);
let sum_no_zero = reduce_sum_no_zero(@exp_tensor, axis, true);
div_downcast(@exp_tensor, @sum_no_zero)
}
fn exp_zero<
T,
MAG,
impl TNumber: NumberTrait<T, MAG>,
impl FTensor: TensorTrait<T>,
impl TPartialEq: PartialEq<T>,
impl FCopy: Copy<T>,
impl FDrop: Drop<T>,
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => {
if *item == NumberTrait::zero() {
result.append(NumberTrait::zero());
} else {
res |
ult.append((*item).exp());
}
},
Option::None => { break; }
};
};
TensorTrait::new(z.shape, result.span())
}
fn exp_upcast_zero<
T,
TMAG,
W,
WMAG,
impl TNumber: NumberTrait<T, TMAG>,
impl TTensor: TensorTrait<T>,
impl TPartialEq: PartialEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl WNumber: NumberTrait<W, WMAG>,
impl WTensor: TensorTrait<W>,
impl WCopy: Copy<W>,
impl WDrop: Drop<W>,
impl TIntoW: Into<T, W>,
>(
mut self: Tensor<T>
) -> Tensor<W> {
let mut result: Array<W> = array![];
loop {
match self.data.pop_front() {
Option::Some(item) => {
if *item == NumberTrait::zero() {
result.append(NumberTrait::zero());
} else {
result.append((TIntoW::into(*item)).exp());
}
},
Option::None => { break; }
};
};
TensorTrait::new(self.shape, result.span())
}
fn reduce_sum_no_zero<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
impl TPartialEq: PartialEq<T>,
>(
self: @Tensor<T>, axis: usize, keepdims: bool
) -> Tensor<T> {
let mut output_data: Array<T> = array![];
if (*self.shape).len() == 1 {
assert(axis == 0, 'axis out of dimensions');
let current_sum = accumulate_sum::<T>(*self.data, *self.shape, *self.shape, axis);
output_data.append(current_sum);
let mut output_shape: Array<usize> = array![];
output_shape.append(1);
return TensorTrait::new(output_shape.span(), output_data.span());
} else {
assert(axis <= (*self.shape).len(), 'axis out of dimensions');
let output_shape = reduce_output_shape(*self.shape, axis, false);
let output_data_len = len_from_shape(output_shape);
let mut index: usize = 0;
while i |
ndex != output_data_len {
let output_indices = unravel_index(index, output_shape);
let mut current_sum = accumulate_sum::<
T
>(*self.data, *self.shape, output_indices, axis);
if current_sum == NumberTrait::zero() {
current_sum = NumberTrait::one();
}
output_data.append(current_sum);
index += 1;
};
if keepdims {
let output_shape = reduce_output_shape(*self.shape, axis, true);
TensorTrait::<T>::new(output_shape, output_data.span())
} else {
TensorTrait::<T>::new(output_shape, output_data.span())
}
}
} |
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
/// Cf: NNTrait::softplus docstring
fn softplus<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TFixed: FixedTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TAdd: Add<T>,
impl TDiv: Div<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => {
let result = (FixedTrait::ONE() + (*item).exp()).ln();
data_result.append(result);
},
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
|
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: NNTrait::softsign docstring
fn softsign<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TFixed: FixedTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TAdd: Add<T>,
impl TDiv: Div<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
mut z: Tensor<T>
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => {
let result = *item / (FixedTrait::ONE() + (*item).abs());
data_result.append(result);
},
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
|
use core::option::OptionTrait;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices};
use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast};
/// Cf: NNTrait::space_to_depth docstring
fn space_to_depth<
T,
impl TTensor: TensorTrait<T>,
impl TAdd: Add<T>,
impl TMul: Mul<T>,
impl TTensorAdd: Add<Tensor<T>>,
impl TPartialOrd: PartialOrd<T>,
impl TAddEq: AddEq<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>,
>(
tensor: Tensor<T>, blocksize: usize
) -> Tensor<T> {
assert((tensor.shape).len() == 4, 'Unexpected shape 4.');
let blocksize_i32: i32 = blocksize.try_into().unwrap();
let b: i32 = (*(tensor.shape).at(0)).try_into().unwrap();
let C: i32 = (*(tensor.shape).at(1)).try_into().unwrap();
let H: u32 = (*(tensor.shape).at(2));
let W: u32 = (*(tensor.shape).at(3));
let tmpshape = array![
b,
C,
(H / blocksize).try_into().unwrap(),
blocksize_i32,
(W / blocksize).try_into().unwrap(),
blocksize_i32
];
let reshaped = (tensor).reshape(target_shape: tmpshape.span(), allowzero: false);
let transposed = reshaped.transpose(axes: array![0, 3, 5, 1, 2, 4].span());
let finalshape = array![
b,
C * blocksize_i32 * blocksize_i32,
(H / blocksize).try_into().unwrap(),
(W / blocksize).try_into().unwrap()
];
transposed.reshape(target_shape: finalshape.span(), allowzero: false)
}
|
use orion::numbers::NumberTrait;
use orion::operators::tensor::core::{Tensor, TensorTrait};
/// Cf: NNTrait::thresholded_relu docstring
fn thresholded_relu<
T,
MAG,
impl TTensor: TensorTrait<T>,
impl TNumber: NumberTrait<T, MAG>,
impl TPartialOrd: PartialOrd<T>,
impl TCopy: Copy<T>,
impl TDrop: Drop<T>
>(
mut z: Tensor<T>, alpha: @T
) -> Tensor<T> {
let mut data_result: Array<T> = array![];
loop {
match z.data.pop_front() {
Option::Some(item) => {
if (*item) <= (*alpha) {
data_result.append(NumberTrait::zero());
} else {
data_result.append(*item);
};
},
Option::None => { break; }
};
};
TensorTrait::new(z.shape, data_result.span())
}
|
mod nn_i8;
mod nn_i32;
mod nn_u32;
mod nn_fp8x23;
mod nn_fp16x16;
mod nn_fp64x64;
mod nn_fp32x32;
|
use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16;
use orion::operators::tensor::implementations::tensor_fp16x16::{
FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorAdd
};
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
FP16x16WImpl, FP16x16WTryIntoFP16x16, FP16x16W, FP16x16IntoFP16x16W
};
use orion::operators::tensor::implementations::tensor_fp16x16wide::{
FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd
};
impl FP16x16NN of NNTrait<FP16x16> {
fn relu(tensor: @Tensor<FP16x16>) -> Tensor<FP16x16> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<FP16x16>) -> Tensor<FP16x16> {
functional::sigmoid::sigmoid(*tensor)
}
fn softmax(tensor: @Tensor<FP16x16>, axis: Option<i32>) -> Tensor<FP16x16> {
functional::softmax::softmaxWide::<FP16x16, u32, FP16x16W, u64>(tensor, axis)
}
fn softmax_zero(tensor: @Tensor<FP16x16>, axis: usize) -> Tensor<FP16x16> {
functional::softmax_zero::softmaxWide_zero::<FP16x16, u32, FP16x16W, u64>(tensor, axis)
}
fn logsoftmax(tensor: @Tensor<FP16x16>, axis: usize) -> Tensor<FP16x16> {
functional::logsoftmax::logsoftmaxWide::<FP16x16, u32, FP16x16W, u64>(tensor, axis)
}
fn softsign(tensor: @Tensor<FP16x16>) -> Tensor<FP16x16> {
functional::softsign::softsign(*tensor)
}
fn softplus(tensor: @Tensor<FP16x16>) -> Tensor<FP16x16> {
functional::softplus::softplus(*tensor)
}
fn linear(
inputs: Tensor<FP16x16>, weights: Tensor<FP16x16>, bias: Tensor<FP16x16>
) -> Tensor<FP16x16> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<FP16x16>, alpha: @FP16x16) -> Tensor<FP16x16> {
functional::leaky_relu::leaky_relu(*inputs, alpha)
}
fn thresholded_relu(tensor: @Tensor<FP16x16>, alpha: @FP16x16) -> Tensor<FP16x16> { |
functional::thresholded_relu::thresholded_relu(*tensor, alpha)
}
fn hard_sigmoid(tensor: @Tensor<FP16x16>, alpha: @FP16x16, beta: @FP16x16) -> Tensor<FP16x16> {
functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta)
}
fn depth_to_space(
tensor: @Tensor<FP16x16>, blocksize: usize, mode: felt252
) -> Tensor<FP16x16> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<FP16x16>, blocksize: usize) -> Tensor<FP16x16> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<FP16x16>,
B: Tensor<FP16x16>,
C: Option<Tensor<FP16x16>>,
alpha: Option<FP16x16>,
beta: Option<FP16x16>,
transA: bool,
transB: bool
) -> Tensor<FP16x16> {
functional::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<FP16x16>,
grid: @Tensor<FP16x16>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<FP16x16> {
functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode)
}
fn col2im(
data: @Tensor<FP16x16>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP16x16> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<FP16x16>,
W: @Tensor<FP16x16>,
B: Option<@Tensor<FP16x16>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: O |
ption<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP16x16> {
functional::conv_transpose::conv_transpose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides
)
}
fn conv(
X: @Tensor<FP16x16>,
W: @Tensor<FP16x16>,
B: Option<Span<FP16x16>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP16x16> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
} |
use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::numbers::fixed_point::implementations::fp32x32::core::{FP32x32, FP32x32Impl};
use orion::operators::tensor::implementations::tensor_fp32x32::{
FP32x32Tensor, FP32x32TensorDiv, FP32x32TensorAdd
};
impl FP32x32NN of NNTrait<FP32x32> {
fn relu(tensor: @Tensor<FP32x32>) -> Tensor<FP32x32> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<FP32x32>) -> Tensor<FP32x32> {
functional::sigmoid::sigmoid(*tensor)
}
fn softmax(tensor: @Tensor<FP32x32>, axis: Option<i32>) -> Tensor<FP32x32> {
functional::softmax::softmax(tensor, axis)
}
fn softmax_zero(tensor: @Tensor<FP32x32>, axis: usize) -> Tensor<FP32x32> {
functional::softmax_zero::softmax_zero(tensor, axis)
}
fn logsoftmax(tensor: @Tensor<FP32x32>, axis: usize) -> Tensor<FP32x32> {
functional::logsoftmax::logsoftmax(tensor, axis)
}
fn softsign(tensor: @Tensor<FP32x32>) -> Tensor<FP32x32> {
functional::softsign::softsign(*tensor)
}
fn softplus(tensor: @Tensor<FP32x32>) -> Tensor<FP32x32> {
functional::softplus::softplus(*tensor)
}
fn linear(
inputs: Tensor<FP32x32>, weights: Tensor<FP32x32>, bias: Tensor<FP32x32>
) -> Tensor<FP32x32> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<FP32x32>, alpha: @FP32x32) -> Tensor<FP32x32> {
functional::leaky_relu::leaky_relu(*inputs, alpha)
}
fn thresholded_relu(tensor: @Tensor<FP32x32>, alpha: @FP32x32) -> Tensor<FP32x32> {
functional::thresholded_relu::thresholded_relu(*tensor, alpha)
}
fn hard_sigmoid(tensor: @Tensor<FP32x32>, alpha: @FP32x32, beta: @FP32x32) -> Tensor<FP32x32> {
functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta)
}
fn depth_to_space(
tensor: @Tensor<FP32x32>, blocksize: usize, mode: felt252
) -> Tensor<FP32x32 |
> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<FP32x32>, blocksize: usize) -> Tensor<FP32x32> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<FP32x32>,
B: Tensor<FP32x32>,
C: Option<Tensor<FP32x32>>,
alpha: Option<FP32x32>,
beta: Option<FP32x32>,
transA: bool,
transB: bool
) -> Tensor<FP32x32> {
functional::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<FP32x32>,
grid: @Tensor<FP32x32>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<FP32x32> {
functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode)
}
fn col2im(
data: @Tensor<FP32x32>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP32x32> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<FP32x32>,
W: @Tensor<FP32x32>,
B: Option<@Tensor<FP32x32>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP32x32> {
functional::conv_transpose::conv_transpose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides |
)
}
fn conv(
X: @Tensor<FP32x32>,
W: @Tensor<FP32x32>,
B: Option<Span<FP32x32>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP32x32> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
} |
use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::numbers::fixed_point::implementations::fp64x64::core::{FP64x64, FP64x64Impl};
use orion::operators::tensor::implementations::tensor_fp64x64::{
FP64x64Tensor, FP64x64TensorDiv, FP64x64TensorAdd
};
impl FP64x64NN of NNTrait<FP64x64> {
fn relu(tensor: @Tensor<FP64x64>) -> Tensor<FP64x64> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<FP64x64>) -> Tensor<FP64x64> {
functional::sigmoid::sigmoid(*tensor)
}
fn softmax(tensor: @Tensor<FP64x64>, axis: Option<i32>) -> Tensor<FP64x64> {
functional::softmax::softmax(tensor, axis)
}
fn softmax_zero(tensor: @Tensor<FP64x64>, axis: usize) -> Tensor<FP64x64> {
functional::softmax_zero::softmax_zero(tensor, axis)
}
fn logsoftmax(tensor: @Tensor<FP64x64>, axis: usize) -> Tensor<FP64x64> {
functional::logsoftmax::logsoftmax(tensor, axis)
}
fn softsign(tensor: @Tensor<FP64x64>) -> Tensor<FP64x64> {
functional::softsign::softsign(*tensor)
}
fn softplus(tensor: @Tensor<FP64x64>) -> Tensor<FP64x64> {
functional::softplus::softplus(*tensor)
}
fn linear(
inputs: Tensor<FP64x64>, weights: Tensor<FP64x64>, bias: Tensor<FP64x64>
) -> Tensor<FP64x64> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<FP64x64>, alpha: @FP64x64) -> Tensor<FP64x64> {
functional::leaky_relu::leaky_relu(*inputs, alpha)
}
fn thresholded_relu(tensor: @Tensor<FP64x64>, alpha: @FP64x64) -> Tensor<FP64x64> {
functional::thresholded_relu::thresholded_relu(*tensor, alpha)
}
fn hard_sigmoid(tensor: @Tensor<FP64x64>, alpha: @FP64x64, beta: @FP64x64) -> Tensor<FP64x64> {
functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta)
}
fn depth_to_space(
tensor: @Tensor<FP64x64>, blocksize: usize, mode: felt252
) -> Tensor<FP64x64 |
> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<FP64x64>, blocksize: usize) -> Tensor<FP64x64> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<FP64x64>,
B: Tensor<FP64x64>,
C: Option<Tensor<FP64x64>>,
alpha: Option<FP64x64>,
beta: Option<FP64x64>,
transA: bool,
transB: bool
) -> Tensor<FP64x64> {
functional::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<FP64x64>,
grid: @Tensor<FP64x64>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<FP64x64> {
functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode)
}
fn col2im(
data: @Tensor<FP64x64>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP64x64> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<FP64x64>,
W: @Tensor<FP64x64>,
B: Option<@Tensor<FP64x64>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP64x64> {
functional::conv_transpose::conv_transpose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides |
)
}
fn conv(
X: @Tensor<FP64x64>,
W: @Tensor<FP64x64>,
B: Option<Span<FP64x64>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP64x64> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
} |
use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23;
use orion::operators::tensor::implementations::tensor_fp8x23::{
FP8x23Tensor, FP8x23TensorDiv, FP8x23TensorAdd
};
use orion::numbers::fixed_point::implementations::fp8x23wide::core::{
FP8x23WImpl, FP8x23WTryIntoFP8x23, FP8x23W, FP8x23IntoFP8x23W
};
use orion::operators::tensor::implementations::tensor_fp8x23wide::{FP8x23WTensor};
impl FP8x23NN of NNTrait<FP8x23> {
fn relu(tensor: @Tensor<FP8x23>) -> Tensor<FP8x23> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<FP8x23>) -> Tensor<FP8x23> {
functional::sigmoid::sigmoid(*tensor)
}
fn softmax(tensor: @Tensor<FP8x23>, axis: Option<i32>) -> Tensor<FP8x23> {
functional::softmax::softmaxWide::<FP8x23, u32, FP8x23W, u64>(tensor, axis)
}
fn softmax_zero(tensor: @Tensor<FP8x23>, axis: usize) -> Tensor<FP8x23> {
functional::softmax_zero::softmaxWide_zero::<FP8x23, u32, FP8x23W, u64>(tensor, axis)
}
fn logsoftmax(tensor: @Tensor<FP8x23>, axis: usize) -> Tensor<FP8x23> {
functional::logsoftmax::logsoftmaxWide::<FP8x23, u32, FP8x23W, u64>(tensor, axis)
}
fn softsign(tensor: @Tensor<FP8x23>) -> Tensor<FP8x23> {
functional::softsign::softsign(*tensor)
}
fn softplus(tensor: @Tensor<FP8x23>) -> Tensor<FP8x23> {
functional::softplus::softplus(*tensor)
}
fn linear(
inputs: Tensor<FP8x23>, weights: Tensor<FP8x23>, bias: Tensor<FP8x23>
) -> Tensor<FP8x23> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<FP8x23>, alpha: @FP8x23) -> Tensor<FP8x23> {
functional::leaky_relu::leaky_relu(*inputs, alpha)
}
fn thresholded_relu(tensor: @Tensor<FP8x23>, alpha: @FP8x23) -> Tensor<FP8x23> {
functional::thresholded_relu::thresholded_relu(*tensor, alpha)
}
fn hard_si |
gmoid(tensor: @Tensor<FP8x23>, alpha: @FP8x23, beta: @FP8x23) -> Tensor<FP8x23> {
functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta)
}
fn depth_to_space(tensor: @Tensor<FP8x23>, blocksize: usize, mode: felt252) -> Tensor<FP8x23> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<FP8x23>, blocksize: usize) -> Tensor<FP8x23> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<FP8x23>,
B: Tensor<FP8x23>,
C: Option<Tensor<FP8x23>>,
alpha: Option<FP8x23>,
beta: Option<FP8x23>,
transA: bool,
transB: bool
) -> Tensor<FP8x23> {
functional::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<FP8x23>,
grid: @Tensor<FP8x23>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<FP8x23> {
functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode)
}
fn col2im(
data: @Tensor<FP8x23>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP8x23> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<FP8x23>,
W: @Tensor<FP8x23>,
B: Option<@Tensor<FP8x23>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP8x23> {
functional::conv_transpose::conv_tr |
anspose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides
)
}
fn conv(
X: @Tensor<FP8x23>,
W: @Tensor<FP8x23>,
B: Option<Span<FP8x23>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<FP8x23> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
} |
use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::operators::tensor::implementations::tensor_i32::{I32Tensor, I32TensorAdd};
impl I32NN of NNTrait<i32> {
fn relu(tensor: @Tensor<i32>) -> Tensor<i32> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn softmax(tensor: @Tensor<i32>, axis: Option<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn softmax_zero(tensor: @Tensor<i32>, axis: usize) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn logsoftmax(tensor: @Tensor<i32>, axis: usize) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn softsign(tensor: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn softplus(tensor: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn linear(inputs: Tensor<i32>, weights: Tensor<i32>, bias: Tensor<i32>) -> Tensor<i32> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<i32>, alpha: @i32) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn thresholded_relu(tensor: @Tensor<i32>, alpha: @i32) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn hard_sigmoid(tensor: @Tensor<i32>, alpha: @i32, beta: @i32) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn depth_to_space(tensor: @Tensor<i32>, blocksize: usize, mode: felt252) -> Tensor<i32> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<i32>, blocksize: usize) -> Tensor<i32> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<i32>,
B: Tensor<i32>,
C: Option<Tensor<i32>>,
alpha: Option<i32>,
beta: Option<i32>,
transA: bool,
transB: |
bool
) -> Tensor<i32> {
functional::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<i32>,
grid: @Tensor<i32>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<i32> {
panic(array!['not supported!'])
}
fn col2im(
data: @Tensor<i32>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<i32> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<i32>,
W: @Tensor<i32>,
B: Option<@Tensor<i32>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<i32> {
functional::conv_transpose::conv_transpose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides
)
}
fn conv(
X: @Tensor<i32>,
W: @Tensor<i32>,
B: Option<Span<i32>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<i32> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
} |
use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::operators::tensor::implementations::tensor_i8::{I8Tensor, I8TensorAdd};
impl I8NN of NNTrait<i8> {
fn relu(tensor: @Tensor<i8>) -> Tensor<i8> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn softmax(tensor: @Tensor<i8>, axis: Option<i32>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn softmax_zero(tensor: @Tensor<i8>, axis: usize) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn logsoftmax(tensor: @Tensor<i8>, axis: usize) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn softsign(tensor: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn softplus(tensor: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn linear(inputs: Tensor<i8>, weights: Tensor<i8>, bias: Tensor<i8>) -> Tensor<i8> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<i8>, alpha: @i8) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn thresholded_relu(tensor: @Tensor<i8>, alpha: @i8) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn hard_sigmoid(tensor: @Tensor<i8>, alpha: @i8, beta: @i8) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn depth_to_space(tensor: @Tensor<i8>, blocksize: usize, mode: felt252) -> Tensor<i8> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<i8>, blocksize: usize) -> Tensor<i8> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<i8>,
B: Tensor<i8>,
C: Option<Tensor<i8>>,
alpha: Option<i8>,
beta: Option<i8>,
transA: bool,
transB: bool
) -> Tensor<i8> {
functio |
nal::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<i8>,
grid: @Tensor<i8>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<i8> {
panic(array!['not supported!'])
}
fn col2im(
data: @Tensor<i8>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<i8> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<i8>,
W: @Tensor<i8>,
B: Option<@Tensor<i8>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<i8> {
functional::conv_transpose::conv_transpose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides
)
}
fn conv(
X: @Tensor<i8>,
W: @Tensor<i8>,
B: Option<Span<i8>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<i8> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
} |
use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::operators::tensor::implementations::tensor_u32::{U32Tensor, U32TensorAdd};
impl U32NN of NNTrait<u32> {
fn relu(tensor: @Tensor<u32>) -> Tensor<u32> {
functional::relu::relu(*tensor)
}
fn sigmoid(tensor: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn softmax(tensor: @Tensor<u32>, axis: Option<i32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn softmax_zero(tensor: @Tensor<u32>, axis: usize) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn logsoftmax(tensor: @Tensor<u32>, axis: usize) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn softsign(tensor: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn softplus(tensor: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn linear(inputs: Tensor<u32>, weights: Tensor<u32>, bias: Tensor<u32>) -> Tensor<u32> {
functional::linear::linear(inputs, weights, bias)
}
fn leaky_relu(inputs: @Tensor<u32>, alpha: @u32) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn thresholded_relu(tensor: @Tensor<u32>, alpha: @u32) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn hard_sigmoid(tensor: @Tensor<u32>, alpha: @u32, beta: @u32) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn depth_to_space(tensor: @Tensor<u32>, blocksize: usize, mode: felt252) -> Tensor<u32> {
functional::depth_to_space::depth_to_space(*tensor, blocksize, mode)
}
fn space_to_depth(tensor: @Tensor<u32>, blocksize: usize) -> Tensor<u32> {
functional::space_to_depth::space_to_depth(*tensor, blocksize)
}
fn gemm(
A: Tensor<u32>,
B: Tensor<u32>,
C: Option<Tensor<u32>>,
alpha: Option<u32>,
beta: Option<u32>,
transA: bool,
transB: |
bool
) -> Tensor<u32> {
functional::gemm::gemm(A, B, C, alpha, beta, transA, transB)
}
fn grid_sample(
X: @Tensor<u32>,
grid: @Tensor<u32>,
align_corner: Option<usize>,
mode: Option<functional::grid_sample::MODE>,
padding_mode: Option<functional::grid_sample::PADDING_MODE>,
) -> Tensor<u32> {
panic(array!['not supported!'])
}
fn col2im(
data: @Tensor<u32>,
image_shape: Span<usize>,
block_shape: Span<usize>,
dilations: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<u32> {
functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,)
}
fn conv_transpose(
X: @Tensor<u32>,
W: @Tensor<u32>,
B: Option<@Tensor<u32>>,
auto_pad: Option<functional::conv_transpose::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
output_padding: Option<Span<usize>>,
output_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<u32> {
functional::conv_transpose::conv_transpose(
X,
W,
B,
auto_pad,
dilations,
group,
kernel_shape,
output_padding,
output_shape,
pads,
strides
)
}
fn conv(
X: @Tensor<u32>,
W: @Tensor<u32>,
B: Option<Span<u32>>,
auto_pad: Option<functional::conv::AUTO_PAD>,
dilations: Option<Span<usize>>,
group: Option<usize>,
kernel_shape: Option<Span<usize>>,
pads: Option<Span<usize>>,
strides: Option<Span<usize>>,
) -> Tensor<u32> {
functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides)
}
} |
mod core;
mod implementations;
mod functional;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::implementations::sequence_fp8x23::FP8x23Sequence;
use orion::operators::sequence::implementations::sequence_fp8x23wide::FP8x23WSequence;
use orion::operators::sequence::implementations::sequence_fp16x16::FP16x16Sequence;
use orion::operators::sequence::implementations::sequence_fp16x16wide::FP16x16WSequence;
use orion::operators::sequence::implementations::sequence_i8::I8Sequence;
use orion::operators::sequence::implementations::sequence_i32::I32Sequence;
use orion::operators::sequence::implementations::sequence_u32::U32Sequence;
use orion::operators::sequence::implementations::sequence_bool::BoolSequence;
|
use orion::operators::tensor::core::Tensor;
trait SequenceTrait<T> {
fn sequence_construct(tensors: Array<Tensor<T>>) -> Array<Tensor<T>>;
fn sequence_empty() -> Array<Tensor<T>>;
fn sequence_length(self: Array<Tensor<T>>) -> Tensor<u32>;
fn sequence_insert(
self: Array<Tensor<T>>, tensor: @Tensor<T>, position: Option<Tensor<i32>>
) -> Array<Tensor<T>>;
fn sequence_at(sequence: Array<Tensor<T>>, position: Tensor<i32>) -> Tensor<T>;
fn sequence_erase(
sequence: Array<Tensor<T>>, position: Option<Tensor<i32>>
) -> Array<Tensor<T>>; |
fn concat_from_sequence(
sequence: Array<Tensor<T>>, axis: i32, new_axis: Option<usize>
) -> Tensor<T>;
} |
mod sequence_construct;
mod sequence_empty;
mod sequence_at;
mod sequence_erase;
mod sequence_insert;
mod sequence_length;
mod concat_from_sequence;
|
use orion::operators::tensor::helpers::replace_index;
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::math::concat::concat;
use orion::numbers::{NumberTrait, I32IntoU32};
fn concat_from_sequence<
T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,
>(
sequence: Array<Tensor<T>>, axis: i32, new_axis: Option<usize>
) -> Tensor<T> {
let new_axis: usize = match new_axis {
Option::Some(val) => {
assert(val == 0 || val == 1, 'new_axis must be 0 or 1');
val
},
Option::None => 0
};
let first_tensor = *sequence.at(0);
let r = first_tensor.shape.len();
if new_axis == 0 {
concat_without_new_axis(sequence, axis, r)
} else {
concat_with_new_axis(sequence, axis, r)
}
}
fn concat_without_new_axis<
T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,
>(
sequence: Array<Tensor<T>>, axis: i32, r: usize
) -> Tensor<T> {
let axis_is_negative: bool = axis < 0;
let mut axis_value: u32 = axis.into();
assert(
(!axis_is_negative && axis_value <= r - 1) || (axis_is_negative && axis_value <= r),
'Out of bounds for dimension'
);
if axis_is_negative {
axis_value = r - axis_value
}
concat(sequence.span(), axis_value)
}
fn concat_with_new_axis<
T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,
>(
sequence: Array<Tensor<T>>, axis: i32, r: usize
) -> Tensor<T> {
let axis_is_negative: bool = axis < 0;
let mut axis_value: u32 = axis.into();
assert(
(!axis_is_negative && axis_value <= r) || (axis_is_negative && axis_value <= r + 1),
'Out of bounds for dimension'
);
if axis_is_negative {
if axis_value > r {
axis_value = 0
} else {
axis_value = r - axis_value
}
}
let mut input_sequence_copy = sequence;
let mut reshaped_sequence: Array<Tensor<T>> = |
array![];
loop {
match input_sequence_copy.pop_front() {
Option::Some(input_sequence_value) => {
let mut reshaped_tensor = add_new_dimension(input_sequence_value, axis_value);
reshaped_sequence.append(reshaped_tensor);
},
Option::None => { break; }
};
};
concat(reshaped_sequence.span(), axis_value)
}
fn add_new_dimension<
T, impl TTensorTrait: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>,
>(
mut tensor: Tensor<T>, axis: usize
) -> Tensor<T> {
let mut tensor_shape = tensor.shape;
let mut new_tensor_shape: Array<usize> = array![];
let mut tensor_shape_counter: usize = 0;
loop {
match tensor_shape.pop_front() {
Option::Some(tensor_shape_value) => {
if tensor_shape_counter == axis {
new_tensor_shape.append(1);
}
new_tensor_shape.append(*tensor_shape_value);
tensor_shape_counter += 1;
},
Option::None => { break; }
};
};
if axis >= tensor.shape.len() {
new_tensor_shape.append(1);
}
TensorTrait::<T>::new(new_tensor_shape.span(), tensor.data)
} |
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::numbers::{NumberTrait, I32IntoU32, U32IntoI32};
/// Cf: SequenceTrait::sequence_at docstring
fn sequence_at<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
sequence: Array<Tensor<T>>, position: Tensor<i32>
) -> Tensor<T> {
assert(
position.shape.len() == 0 && position.data.len().into() == 1, 'Position must be a scalar'
);
let position_value_i32: i32 = *position.data.at(0);
let is_negative: bool = position_value_i32 < 0;
let position_value: u32 = position_value_i32.into();
assert(
(!is_negative && position_value <= sequence.len() - 1)
|| (is_negative && position_value <= sequence.len()),
'Position out of bounds'
);
if !is_negative {
*sequence.at(position_value)
} else {
let normalized_position_value = sequence.len() - position_value;
*sequence.at(normalized_position_value)
}
}
|
use orion::operators::tensor::{TensorTrait, Tensor};
/// Cf: SequenceTrait::sequence_construct docstring
fn sequence_construct<T, impl TDrop: Drop<T>>(tensors: Array<Tensor<T>>) -> Array<Tensor<T>> {
assert(tensors.len() >= 1, 'Input tensors must be >= 1');
tensors
}
|
use orion::operators::tensor::{TensorTrait, Tensor};
/// Cf: SequenceTrait::sequence_empty docstring
fn sequence_empty<T, impl TTensorTrait: TensorTrait<T>, impl TDrop: Drop<T>>() -> Array<Tensor<T>> {
let mut sequence = array![];
let mut shape: Array<usize> = array![];
shape.append(0);
let mut data: Array<T> = array![];
let tensor = TensorTrait::new(shape.span(), data.span());
sequence.append(tensor);
sequence
}
|
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::I32Tensor;
use orion::numbers::{NumberTrait, I32IntoU32};
/// Cf: SequenceTrait::sequence_erase docstring
fn sequence_erase<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
sequence: Array<Tensor<T>>, position: Option<Tensor<i32>>
) -> Array<Tensor<T>> {
let position: Tensor<i32> = match position {
Option::Some(p) => p,
Option::None => {
let mut shape: Array<usize> = array![];
let mut data: Array<i32> = array![];
data.append(-1_i32);
TensorTrait::<i32>::new(shape.span(), data.span())
}
};
assert(position.shape.len() == 0 && position.data.len() == 1, 'Position must be a scalar');
let position_value_i32: i32 = *position.data.at(0);
let is_negative: bool = position_value_i32 < 0;
let mut position_value: u32 = position_value_i32.into();
assert(
(!is_negative && position_value <= sequence.len() - 1)
|| (is_negative && position_value <= sequence.len()),
'Position out of bounds'
);
if is_negative {
position_value = sequence.len() - position_value;
}
let mut input_sequence_copy = sequence;
let mut output_sequence: Array<Tensor<T>> = array![];
let mut tensor_counter: usize = 0;
loop {
match input_sequence_copy.pop_front() {
Option::Some(input_sequence_value) => {
if tensor_counter == position_value {
tensor_counter += 1;
continue;
}
output_sequence.append(input_sequence_value);
tensor_counter += 1;
},
Option::None => { break; }
};
};
output_sequence
}
|
use orion::operators::tensor::core::{Tensor, TensorTrait};
use orion::operators::tensor::I32Tensor;
use orion::numbers::{NumberTrait, I32IntoU32};
/// Cf: SequenceTrait::sequence_insert docstring
fn sequence_insert<T, impl TTensor: TensorTrait<T>, impl TCopy: Copy<T>, impl TDrop: Drop<T>>(
self: Array<Tensor<T>>, tensor: @Tensor<T>, position: Option<Tensor<i32>>
) -> Array<Tensor<T>> {
let position: Tensor<i32> = match position {
Option::Some(p) => p,
Option::None => {
let mut shape: Array<usize> = array![];
let mut data: Array<i32> = array![];
data.append(-1_i32);
TensorTrait::<i32>::new(shape.span(), data.span())
},
};
assert(position.shape.len() == 0 && position.data.len() == 1, 'Position must be a scalar');
let position_value_i32: i32 = *position.data.at(0);
let is_negative: bool = position_value_i32 < 0;
let mut position_value: u32 = position_value_i32.into();
assert(
(!is_negative && position_value <= self.len() - 1)
|| (is_negative && position_value <= self.len()),
'Position out of bounds'
);
if is_negative {
position_value = self.len() - position_value;
}
let mut new_sequence: Array<Tensor<T>> = array![];
let mut inserted = false;
let mut self_copy = self;
loop {
match self_copy.pop_front() {
Option::Some(t) => {
if position_value == 0 && inserted == false {
new_sequence.append(*tensor);
inserted = true;
}
new_sequence.append(t);
if !inserted {
position_value -= 1;
}
},
Option::None => { break; },
};
};
new_sequence
}
|
use orion::operators::tensor::{TensorTrait, Tensor};
/// Cf: SequenceTrait::sequence_length docstring
fn sequence_length<T, impl TDrop: Drop<T>>(self: Array<Tensor<T>>) -> Tensor<u32> {
let mut shape: Array<usize> = array![];
let mut result: Array<usize> = array![];
result.append(self.len());
Tensor::<u32> { shape: shape.span(), data: result.span(), }
}
|
mod sequence_bool;
mod sequence_i8;
mod sequence_i32;
mod sequence_u32;
mod sequence_fp8x23;
mod sequence_fp8x23wide;
mod sequence_fp16x16;
mod sequence_fp16x16wide;
mod sequence_fp32x32;
mod sequence_fp64x64;
|
use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::operators::tensor::implementations::tensor_bool::BoolTensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl BoolSequence of SequenceTrait<bool> {
fn sequence_construct(tensors: Array<Tensor<bool>>) -> Array<Tensor<bool>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<bool>> {
functional::sequence_empty::sequence_empty::<bool>()
}
fn sequence_length(self: Array<Tensor<bool>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<bool>>, position: Tensor<i32>) -> Tensor<bool> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<bool>>, position: Option<Tensor<i32>>
) -> Array<Tensor<bool>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<bool>>, tensor: @Tensor<bool>, position: Option<Tensor<i32>>
) -> Array<Tensor<bool>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<bool>>, axis: i32, new_axis: Option<usize>
) -> Tensor<bool> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
|
use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16;
use orion::operators::tensor::implementations::tensor_fp16x16::FP16x16Tensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl FP16x16Sequence of SequenceTrait<FP16x16> {
fn sequence_construct(tensors: Array<Tensor<FP16x16>>) -> Array<Tensor<FP16x16>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<FP16x16>> {
functional::sequence_empty::sequence_empty::<FP16x16>()
}
fn sequence_length(self: Array<Tensor<FP16x16>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<FP16x16>>, position: Tensor<i32>) -> Tensor<FP16x16> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<FP16x16>>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP16x16>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<FP16x16>>, tensor: @Tensor<FP16x16>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP16x16>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<FP16x16>>, axis: i32, new_axis: Option<usize>
) -> Tensor<FP16x16> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
|
use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::numbers::fixed_point::implementations::fp16x16wide::core::FP16x16W;
use orion::operators::tensor::implementations::tensor_fp16x16wide::FP16x16WTensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl FP16x16WSequence of SequenceTrait<FP16x16W> {
fn sequence_construct(tensors: Array<Tensor<FP16x16W>>) -> Array<Tensor<FP16x16W>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<FP16x16W>> {
functional::sequence_empty::sequence_empty::<FP16x16W>()
}
fn sequence_length(self: Array<Tensor<FP16x16W>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<FP16x16W>>, position: Tensor<i32>) -> Tensor<FP16x16W> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<FP16x16W>>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP16x16W>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<FP16x16W>>, tensor: @Tensor<FP16x16W>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP16x16W>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<FP16x16W>>, axis: i32, new_axis: Option<usize>
) -> Tensor<FP16x16W> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
|
use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::numbers::fixed_point::implementations::fp32x32::core::FP32x32;
use orion::operators::tensor::implementations::tensor_fp32x32::FP32x32Tensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl FP32x32Sequence of SequenceTrait<FP32x32> {
fn sequence_construct(tensors: Array<Tensor<FP32x32>>) -> Array<Tensor<FP32x32>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<FP32x32>> {
functional::sequence_empty::sequence_empty::<FP32x32>()
}
fn sequence_length(self: Array<Tensor<FP32x32>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<FP32x32>>, position: Tensor<i32>) -> Tensor<FP32x32> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<FP32x32>>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP32x32>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<FP32x32>>, tensor: @Tensor<FP32x32>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP32x32>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<FP32x32>>, axis: i32, new_axis: Option<usize>
) -> Tensor<FP32x32> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
|
use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::numbers::fixed_point::implementations::fp64x64::core::FP64x64;
use orion::operators::tensor::implementations::tensor_fp64x64::FP64x64Tensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl FP64x64Sequence of SequenceTrait<FP64x64> {
fn sequence_construct(tensors: Array<Tensor<FP64x64>>) -> Array<Tensor<FP64x64>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<FP64x64>> {
functional::sequence_empty::sequence_empty::<FP64x64>()
}
fn sequence_length(self: Array<Tensor<FP64x64>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<FP64x64>>, position: Tensor<i32>) -> Tensor<FP64x64> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<FP64x64>>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP64x64>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<FP64x64>>, tensor: @Tensor<FP64x64>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP64x64>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<FP64x64>>, axis: i32, new_axis: Option<usize>
) -> Tensor<FP64x64> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
|
use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23;
use orion::operators::tensor::implementations::tensor_fp8x23::FP8x23Tensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl FP8x23Sequence of SequenceTrait<FP8x23> {
fn sequence_construct(tensors: Array<Tensor<FP8x23>>) -> Array<Tensor<FP8x23>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<FP8x23>> {
functional::sequence_empty::sequence_empty::<FP8x23>()
}
fn sequence_length(self: Array<Tensor<FP8x23>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<FP8x23>>, position: Tensor<i32>) -> Tensor<FP8x23> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<FP8x23>>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP8x23>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<FP8x23>>, tensor: @Tensor<FP8x23>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP8x23>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<FP8x23>>, axis: i32, new_axis: Option<usize>
) -> Tensor<FP8x23> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
|
use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::numbers::fixed_point::implementations::fp8x23wide::core::FP8x23W;
use orion::operators::tensor::implementations::tensor_fp8x23wide::FP8x23WTensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl FP8x23WSequence of SequenceTrait<FP8x23W> {
fn sequence_construct(tensors: Array<Tensor<FP8x23W>>) -> Array<Tensor<FP8x23W>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<FP8x23W>> {
functional::sequence_empty::sequence_empty::<FP8x23W>()
}
fn sequence_length(self: Array<Tensor<FP8x23W>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<FP8x23W>>, position: Tensor<i32>) -> Tensor<FP8x23W> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<FP8x23W>>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP8x23W>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<FP8x23W>>, tensor: @Tensor<FP8x23W>, position: Option<Tensor<i32>>
) -> Array<Tensor<FP8x23W>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<FP8x23W>>, axis: i32, new_axis: Option<usize>
) -> Tensor<FP8x23W> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
|
use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl I32Sequence of SequenceTrait<i32> {
fn sequence_construct(tensors: Array<Tensor<i32>>) -> Array<Tensor<i32>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<i32>> {
functional::sequence_empty::sequence_empty::<i32>()
}
fn sequence_length(self: Array<Tensor<i32>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<i32>>, position: Tensor<i32>) -> Tensor<i32> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<i32>>, position: Option<Tensor<i32>>
) -> Array<Tensor<i32>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<i32>>, tensor: @Tensor<i32>, position: Option<Tensor<i32>>
) -> Array<Tensor<i32>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<i32>>, axis: i32, new_axis: Option<usize>
) -> Tensor<i32> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
|
use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::operators::tensor::implementations::tensor_i8::I8Tensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl I8Sequence of SequenceTrait<i8> {
fn sequence_construct(tensors: Array<Tensor<i8>>) -> Array<Tensor<i8>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<i8>> {
functional::sequence_empty::sequence_empty::<i8>()
}
fn sequence_length(self: Array<Tensor<i8>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<i8>>, position: Tensor<i32>) -> Tensor<i8> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<i8>>, position: Option<Tensor<i32>>
) -> Array<Tensor<i8>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<i8>>, tensor: @Tensor<i8>, position: Option<Tensor<i32>>
) -> Array<Tensor<i8>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<i8>>, axis: i32, new_axis: Option<usize>
) -> Tensor<i8> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
|
use orion::operators::tensor::core::Tensor;
use orion::operators::sequence::core::SequenceTrait;
use orion::operators::sequence::functional;
use orion::operators::tensor::implementations::tensor_u32::U32Tensor;
use orion::operators::tensor::implementations::tensor_i32::I32Tensor;
impl U32Sequence of SequenceTrait<u32> {
fn sequence_construct(tensors: Array<Tensor<u32>>) -> Array<Tensor<u32>> {
functional::sequence_construct::sequence_construct(tensors)
}
fn sequence_empty() -> Array<Tensor<u32>> {
functional::sequence_empty::sequence_empty::<u32>()
}
fn sequence_length(self: Array<Tensor<u32>>) -> Tensor<u32> {
functional::sequence_length::sequence_length(self)
}
fn sequence_at(sequence: Array<Tensor<u32>>, position: Tensor<i32>) -> Tensor<u32> {
functional::sequence_at::sequence_at(sequence, position)
}
fn sequence_erase(
sequence: Array<Tensor<u32>>, position: Option<Tensor<i32>>
) -> Array<Tensor<u32>> {
functional::sequence_erase::sequence_erase(sequence, position)
}
fn sequence_insert(
self: Array<Tensor<u32>>, tensor: @Tensor<u32>, position: Option<Tensor<i32>>
) -> Array<Tensor<u32>> {
functional::sequence_insert::sequence_insert(self, tensor, position)
}
fn concat_from_sequence(
sequence: Array<Tensor<u32>>, axis: i32, new_axis: Option<usize>
) -> Tensor<u32> {
functional::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}
}
|
mod core;
mod helpers;
mod math;
mod linalg;
mod quantization;
mod implementations;
mod manipulation;
mod ml;
use orion::operators::tensor::core::{Tensor, TensorSerde, TensorTrait};
use orion::operators::tensor::implementations::tensor_fp8x23::{
FP8x23Tensor, FP8x23TensorAdd, FP8x23TensorSub, FP8x23TensorMul, FP8x23TensorDiv,
FP8x23TensorPartialEq,
};
use orion::operators::tensor::implementations::tensor_fp32x32::{
FP32x32Tensor, FP32x32TensorAdd, FP32x32TensorSub, FP32x32TensorMul, FP32x32TensorDiv,
FP32x32TensorPartialEq,
};
use orion::operators::tensor::implementations::tensor_fp16x16::{
FP16x16Tensor, FP16x16TensorAdd, FP16x16TensorSub, FP16x16TensorMul, FP16x16TensorDiv,
FP16x16TensorPartialEq,
};
use orion::operators::tensor::implementations::tensor_i8::{
I8Tensor, I8TensorAdd, I8TensorSub, I8TensorMul, I8TensorDiv, I8TensorPartialEq,
};
use orion::operators::tensor::implementations::tensor_i32::{
I32Tensor, I32TensorAdd, I32TensorSub, I32TensorMul, I32TensorDiv, I32TensorPartialEq,
TensorI8IntoTensorI32
};
use orion::operators::tensor::implementations::tensor_u32::{
U32Tensor, U32TensorAdd, U32TensorSub, U32TensorMul, U32TensorDiv, U32TensorPartialEq
};
use orion::operators::tensor::implementations::tensor_bool::{BoolTensor, BoolTensorPartialEq};
use orion::operators::tensor::implementations::tensor_complex64::{
Complex64Tensor, Complex64TensorAdd, Complex64TensorSub, Complex64TensorMul, Complex64TensorDiv,
Complex64TensorPartialEq,
};
|
use alexandria_data_structures::array_ext::ArrayTraitExt;
use core::array::{ArrayTrait, SpanTrait};
use core::serde::Serde;
use core::option::OptionTrait;
use alexandria_data_structures::array_ext::{SpanTraitExt};
use orion::operators::tensor::helpers::{len_from_shape, check_shape};
use orion::numbers::{NumberTrait, I32IntoU32, U32IntoI32};
struct Tensor<T> {
shape: Span<usize>,
data: Span<T>,
}
impl TensorSerde<T, impl TSerde: Serde<T>, impl TDrop: Drop<T>> of Serde<Tensor<T>> { |
fn serialize(self: @Tensor<T>, ref output: Array<felt252>) {
self.shape.serialize(ref output);
self.data.serialize(ref output);
}
fn deserialize(ref serialized: Span<felt252>) -> Option<Tensor<T>> {
let shape: Span<usize> = Serde::<Span<usize>>::deserialize(ref serialized)?;
let data: Span<T> = Serde::<Span<T>>::deserialize(ref serialized)?;
Option::Some(Tensor { shape, data })
}
}
trait TensorTrait<T> {
fn new(shape: Span<usize>, data: Span<T>) -> Tensor<T>;
fn at(self: @Tensor<T>, indices: Span<usize>) -> T;
fn min_in_tensor(self: @Tensor<T>) -> T;
fn add(lhs: Tensor<T>, rhs: Tensor<T>) -> Tensor<T>;
fn sub(lhs: Tensor<T>, rhs: Tensor<T>) -> Tensor<T>;
fn mul(lhs: Tensor<T>, rhs: Tensor<T>) -> Tensor<T>;
fn div(lhs: Tensor<T>, rhs: Tensor<T>) -> Tensor<T>;
fn min(tensors: Span<Tensor<T>>) -> Tensor<T>; |
fn max_in_tensor(self: @Tensor<T>) -> T;
fn max(tensors: Span<Tensor<T>>) -> Tensor<T>;
fn stride(self: @Tensor<T>) -> Span<usize>;
fn ravel_index(self: @Tensor<T>, indices: Span<usize>) -> usize;
fn unravel_index(self: @Tensor<T>, index: usize) -> Span<usize>;
fn reshape(self: @Tensor<T>, target_shape: Span<i32>, allowzero: bool) -> Tensor<T>;
fn transpose(self: @Tensor<T>, axes: Span<usize>) -> Tensor<T>; |
fn reduce_sum(
self: @Tensor<T>,
axes: Option<Span<i32>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<T>;
fn argmax(
self: @Tensor<T>, axis: i32, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<i32>;
fn argmin(
self: @Tensor<T>, axis: usize, keepdims: Option<bool>, select_last_index: Option<bool>
) -> Tensor<usize>;
fn matmul(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<T>;
fn exp(self: @Tensor<T>) -> Tensor<T>; |
fn log(self: @Tensor<T>) -> Tensor<T>;
fn equal(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
fn greater(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
fn greater_equal(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
fn less(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<i32>; |
fn less_equal(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<i32>;
fn abs(self: @Tensor<T>) -> Tensor<T>;
fn neg(self: @Tensor<T>) -> Tensor<T>;
fn ceil(self: @Tensor<T>) -> Tensor<T>;
fn sin(self: @Tensor<T>) -> Tensor<T>;
fn cos(self: @Tensor<T>) -> Tensor<T>;
fn cumsum(
self: @Tensor<T>, axis: usize, exclusiv |
e: Option<bool>, reverse: Option<bool>
) -> Tensor<T>;
fn flatten(self: @Tensor<T>, axis: usize) -> Tensor<T>;
fn sinh(self: @Tensor<T>) -> Tensor<T>;
fn tanh(self: @Tensor<T>) -> Tensor<T>;
fn cosh(self: @Tensor<T>) -> Tensor<T>;
fn asinh(self: @Tensor<T>) -> Tensor<T>;
fn acosh(self: @Tensor<T>) -> Tensor<T>; |
fn atan(self: @Tensor<T>) -> Tensor<T>;
fn asin(self: @Tensor<T>) -> Tensor<T>;
fn or(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
fn xor(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
fn acos(self: @Tensor<T>) -> Tensor<T>;
fn onehot(
self: @Tensor<T>, depth: usize, axis: Option<usize>, values: Span<usize>
) -> Tensor<T>; |
fn sqrt(self: @Tensor<T>) -> Tensor<T>;
fn concat(tensors: Span<Tensor<T>>, axis: usize,) -> Tensor<T>;
fn quantize_linear(
self: @Tensor<T>, y_scale: @Tensor<T>, y_zero_point: @Tensor<T>
) -> Tensor::<i8>;
fn dequantize_linear(
self: @Tensor<i8>, x_scale: @Tensor<T>, x_zero_point: @Tensor<T>
) -> Tensor::<T>;
fn qlinear_add(
self: @Tensor<i8>,
a_scale: @Tensor<T>,
a_zero_point: @Tensor<T>,
b: @Tensor<i8>,
b_scale: @Tensor<T>,
b_zero_point: @Tensor<T>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>
) -> Tensor::<i8>; |
fn qlinear_mul(
self: @Tensor<i8>,
a_scale: @Tensor<T>,
a_zero_point: @Tensor<T>,
b: @Tensor<i8>,
b_scale: @Tensor<T>,
b_zero_point: @Tensor<T>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>
) -> Tensor::<i8>;
fn qlinear_matmul(
self: @Tensor<i8>,
a_scale: @Tensor<T>,
a_zero_point: @Tensor<T>,
b: @Tensor<i8>,
b_scale: @Tensor<T>,
b_zero_point: @Tensor<T>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>
) -> Tensor::<i8>;
fn qlinear_concat( |
tensors: Span<Tensor<i8>>,
scales: Span<Tensor<T>>,
zero_points: Span<Tensor<T>>,
y_scale: @Tensor<T>,
y_zero_point: @Tensor<T>,
axis: usize
) -> Tensor::<i8>;
fn qlinear_leakyrelu(
self: @Tensor<i8>, a_scale: @Tensor<T>, a_zero_point: @Tensor<T>, alpha: T
) -> Tensor::<i8>;
fn slice(
self: @Tensor<T>,
starts: Span<usize>,
ends: Span<usize>,
axes: Option<Span<usize>>,
steps: Option<Span<usize>>
) -> Tensor<T>;
fn nonzero(self: @Tensor<T>) -> Tensor<usize>;
fn gather(self: @Tensor<T>, indices: Tensor<i32>, axis: Option<i32>) -> Tensor<T>;
fn unsqueeze(self: @Tensor<T>, axes: Span<usize>) -> Tensor<T>; |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.