text
stringlengths 1
2.05k
|
---|
struct Dataset {\n",
" x_values: Tensor<FP16x16>,\n",
" y_values: Tensor<FP16x16>,\n",
"}\n",
"\n",
"
"impl DataPreprocessing of DatasetTrait {\n",
" fn normalize_dataset(ref self: Dataset) -> Dataset {\n",
" let mut x_values = TensorTrait::<FP16x16>::new(array![1].span(), array![FixedTrait::new(0, false)].span());\n",
" let mut y_values = TensorTrait::<FP16x16>::new(array![1].span(), array![FixedTrait::new(0, false)].span());\n",
"
" if self.x_values.shape.len() > 1 {\n",
" x_values = normalize_feature_data(self.x_values);\n",
" y_values = normalize_label_data(self.y_values);\n",
" }\n",
"
" if self.x_values.shape.len() == 1 {\n",
" x_values = normalize_label_data(self.x_values);\n",
" y_values = normalize_label_data(self.y_values);\n",
" }\n",
"\n",
" return Dataset { x_values, y_values };\n",
" }\n",
"}\n",
"\n",
"
"fn normalize_feature_data(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {\n",
" let mut x_min_array = ArrayTrait::<FP16x16>::new();\n",
" let mut x_max_array = ArrayTrait::<FP16x16>::new();\n",
" let mut x_range_array = ArrayTrait::<FP16x16>::new();\n",
" let mut normalized_array = ArrayTrait::<FP16x16>::new();\n",
"
" let transposed_tensor = tensor_data.transpose(axes: array![1, 0].span());\n",
" let tensor_shape = transposed_tensor.shape;\n",
" let tensor_row_len = *tensor_shape.at(0);
" let tensor_column_len = *tensor_shape.at(1);
"
" let mut i: u32 = 0;\n",
" loop {\n",
" if i >= tensor_row_len {\n",
" break ();\n",
" }\n",
" let mut transposed_tensor_row = get_tensor_data_by_row(transposed_tensor, i);\n",
" x_max_array.append(transposed_tensor_row.max_in_tensor());\n",
" x_min_array.append(transposed_tensor_row.min |
_in_tensor());\n",
" x_range_array\n",
" .append(transposed_tensor_row.max_in_tensor() - transposed_tensor_row.min_in_tensor());\n",
" i += 1;\n",
" };\n",
"
" let mut x_min = TensorTrait::<\n",
" FP16x16\n",
" >::new(shape: array![1, tensor_row_len].span(), data: x_min_array.span());\n",
" let mut x_range = TensorTrait::<\n",
" FP16x16\n",
" >::new(shape: array![1, tensor_row_len].span(), data: x_range_array.span());\n",
" let normalized_tensor = (tensor_data - x_min) / x_range;\n",
" return normalized_tensor;\n",
"}\n",
"\n",
"
"fn normalize_label_data(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {\n",
" let mut tensor_data_ = tensor_data;\n",
" let mut normalized_array = ArrayTrait::<FP16x16>::new();\n",
" let mut range = tensor_data.max_in_tensor() - tensor_data.min_in_tensor();\n",
"
" let mut i: u32 = 0;\n",
"\n",
" loop {\n",
" match tensor_data_.data.pop_front() {\n",
" Option::Some(tensor_val) => {\n",
" let mut diff = *tensor_val - tensor_data.min_in_tensor();\n",
" normalized_array.append(diff / range);\n",
" i += 1;\n",
" },\n",
" Option::None(_) => { break; }\n",
" };\n",
" };\n",
"
" let mut normalized_tensor = TensorTrait::<\n",
" FP16x16\n",
" >::new(shape: array![tensor_data.data.len()].span(), data: normalized_array.span());\n",
" return normalized_tensor;\n",
"}\n",
"\n"
]
},
{
"cell_type": "markdown",
"id": "12784736",
"metadata": {},
"source": [
"
"\n",
"Implement the Multiple Linear Regression functions"
]
},
{
"cell_type": "code",
"execution_count": 43,
"id": "ea7c8acc",
"metadata": {},
"outputs": [],
"source": [
"os.makedirs(f'multiple_linear_regression_aave/src/model/', exis |
t_ok=True)"
]
},
{
"cell_type": "code",
"execution_count": 44,
"id": "543d3c63",
"metadata": {},
"outputs": [],
"source": [
"! touch multiple_linear_regression_aave/src/model/multiple_linear_regression_model.cairo"
]
},
{
"cell_type": "code",
"execution_count": 45,
"id": "24d21d37",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Overwriting multiple_linear_regression_aave/src/model/multiple_linear_regression_model.cairo\n"
]
}
],
"source": [
"%%writefile multiple_linear_regression_aave/src/model/multiple_linear_regression_model.cairo\n",
"\n",
"use orion::operators::tensor::{\n",
" Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd, FP16x16TensorSub, FP16x16TensorAdd,\n",
" FP16x16TensorDiv, FP16x16TensorMul\n",
"};\n",
"use orion::numbers::{FP16x16, FixedTrait};\n",
"use multiple_linear_regresion::data_preprocessing::{Dataset, DatasetTrait};\n",
"use multiple_linear_regresion::helper_functions::{\n",
" get_tensor_data_by_row, transpose_tensor, calculate_mean, calculate_r_score,\n",
" normalize_user_x_inputs, rescale_predictions\n",
"};\n",
"\n",
"\n",
"
" |
struct MultipleLinearRegressionModel {\n",
" coefficients: Tensor<FP16x16>\n",
"}\n",
"\n",
"
"impl RegressionOperation of MultipleLinearRegressionModelTrait {\n",
"
" fn predict(\n",
" ref self: MultipleLinearRegressionModel, feature_inputs: Tensor<FP16x16>\n",
" ) -> Tensor<FP16x16> {\n",
"
" let mut prediction_result = TensorTrait::<\n",
" FP16x16\n",
" >::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());\n",
"\n",
" let mut result = ArrayTrait::<FP16x16>::new();\n",
"
" if feature_inputs.shape.len() > 1 {\n",
" let feature_values = add_bias_term(feature_inputs, 1);\n",
" let mut data_len: u32 = *feature_values.shape.at(0);\n",
" let mut i: u32 = 0;\n",
" loop {\n",
" if i >= data_len {\n",
" break ();\n",
" }\n",
" let feature_row_values = get_tensor_data_by_row(feature_values, i);\n",
" let predicted_values = feature_row_values.matmul(@self.coefficients);\n",
" result.append(*predicted_values.data.at(0));\n",
" i += 1;\n",
" };\n",
" prediction_result =\n",
" TensorTrait::<\n",
" FP16x16\n",
" >::new(shape: array![result.len()].span(), data: result.span());\n",
" }\n",
"\n",
"
" if feature_inputs.shape.len() == 1 && self.coefficients.data.len() > 1 {\n",
" let feature_values = add_bias_term(feature_inputs, 1);\n",
" prediction_result = feature_values.matmul(@self.coefficients);\n",
" }\n",
"\n",
" return prediction_result;\n",
" }\n",
"}\n",
"\n",
"fn MultipleLinearRegression(dataset: Dataset) -> MultipleLinearRegressionModel {\n",
" let x |
_values_tranposed = transpose_tensor(dataset.x_values);\n",
" let x_values_tranposed_with_bias = add_bias_term(x_values_tranposed, 0);\n",
" let decorrelated_x_features = decorrelate_x_features(x_values_tranposed_with_bias);\n",
" let coefficients = compute_gradients(\n",
" decorrelated_x_features, dataset.y_values, x_values_tranposed_with_bias\n",
" );\n",
" return MultipleLinearRegressionModel { coefficients };\n",
"}\n",
"\n",
"
"fn add_bias_term(x_feature: Tensor<FP16x16>, axis: u32) -> Tensor<FP16x16> {\n",
" let mut x_feature_ = x_feature;\n",
" let mut tensor_with_bias = TensorTrait::<\n",
" FP16x16\n",
" >::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());\n",
" let mut result = ArrayTrait::<FP16x16>::new();\n",
"
" if x_feature.shape.len() > 1 {\n",
" let mut index: u32 = 0;\n",
" if axis == 1 {\n",
" index = 0;\n",
" } else {\n",
" index = 1;\n",
" }\n",
" let data_len = *x_feature.shape.at(index);
" let mut i: u32 = 0;\n",
" loop {\n",
" if i >= data_len {\n",
" break ();\n",
" }\n",
" result\n",
" .append(FixedTrait::new(65536, false));
" i += 1;\n",
" };\n",
" if axis == 0 {\n",
" let res_tensor = TensorTrait::new(\n",
" shape: array![1, data_len].span(), data: result.span()\n",
" );\n",
" tensor_with_bias =\n",
" TensorTrait::concat(tensors: array![x_feature, res_tensor].span(), axis: axis);\n",
" } else {\n",
" let res_tensor = TensorTrait::new(\n",
" shape: array![data_len, 1].span(), data: result.span()\n",
" );\n",
" tensor_with_bias =\n",
" TensorTrait:: |
concat(tensors: array![x_feature, res_tensor].span(), axis: axis);\n",
" }\n",
" }\n",
"
" if x_feature.shape.len() == 1 {\n",
" let mut j: u32 = 0;\n",
" loop {\n",
" match x_feature_.data.pop_front() {\n",
" Option::Some(x_val) => {\n",
" result.append(*x_val);\n",
" j += 1;\n",
" },\n",
" Option::None(_) => { break; }\n",
" };\n",
" };\n",
" result.append(FixedTrait::new(65536, false));
" tensor_with_bias =\n",
" TensorTrait::<FP16x16>::new(shape: array![result.len()].span(), data: result.span());\n",
" }\n",
" return tensor_with_bias;\n",
"}\n",
"\n",
"
"fn decorrelate_x_features(x_feature_data: Tensor<FP16x16>) -> Tensor<FP16x16> {\n",
" let mut input_tensor = x_feature_data;\n",
"\n",
" let mut i: u32 = 0;\n",
" loop {\n",
" if i >= *x_feature_data.shape.at(0) {\n",
" break ();\n",
" }\n",
" let mut placeholder = ArrayTrait::<FP16x16>::new();\n",
" let mut feature_row_values = get_tensor_data_by_row(input_tensor, i);\n",
" let mut feature_squared = feature_row_values.matmul(@feature_row_values);\n",
"
" if *feature_squared.data.at(0) == FixedTrait::new(0, false) {\n",
" feature_squared =\n",
" TensorTrait::<\n",
" FP16x16\n",
" >::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());\n",
" }\n",
"
" let mut j: u32 = i + 1;\n",
" loop {\n",
" if j >= *x_feature_data.shape.at(0) {\n",
" break ();\n",
" }\n",
" let mut remaining_tensor_values = get_tensor_data_by_row(input_tensor, j);\n",
" let feature_cross_pro |
duct = feature_row_values.matmul(@remaining_tensor_values);\n",
" let feature_gradients = feature_cross_product / feature_squared;\n",
" remaining_tensor_values = remaining_tensor_values\n",
" - (feature_row_values\n",
" * feature_gradients);
"
" let mut k: u32 = 0;\n",
" loop {\n",
" if k >= remaining_tensor_values.data.len() {\n",
" break ();\n",
" }\n",
" placeholder.append(*remaining_tensor_values.data.at(k));\n",
" k += 1;\n",
" };\n",
"\n",
" j += 1;\n",
" };\n",
"
" let mut decorrelated_tensor = TensorTrait::new(\n",
" shape: array![*x_feature_data.shape.at(0) - 1 - i, *x_feature_data.shape.at(1)].span(),\n",
" data: placeholder.span()\n",
" );\n",
" let mut original_tensor = input_tensor\n",
" .slice(\n",
" starts: array![0, 0].span(),\n",
" ends: array![i + 1, *x_feature_data.shape.at(1)].span(),\n",
" axes: Option::None(()),\n",
" steps: Option::Some(array![1, 1].span())\n",
" );\n",
" input_tensor =\n",
" TensorTrait::concat(\n",
" tensors: array![original_tensor, decorrelated_tensor].span(), axis: 0\n",
" );\n",
" i += 1;\n",
" };\n",
" return input_tensor;\n",
"}\n",
"\n",
"
"fn compute_gradients(\n",
" decorrelated_x_features: Tensor<FP16x16>,\n",
" y_values: Tensor<FP16x16>,\n",
" original_x_tensor_values: Tensor<FP16x16>\n",
") -> Tensor<FP16x16> {\n",
" let mut gradient_values_flipped = TensorTrait::<\n",
" FP16x16\n",
" >::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());\n",
"\n |
",
" let mut result = ArrayTrait::<FP16x16>::new();\n",
" let mut tensor_y_vals = y_values;\n",
" let mut i: u32 = *decorrelated_x_features.shape.at(0);\n",
"
" loop {\n",
" if i <= 0 {\n",
" break ();\n",
" }\n",
" let index_val = i - 1;\n",
" let mut decorelated_feature_row_values = get_tensor_data_by_row(\n",
" decorrelated_x_features, index_val\n",
" );
" let mut decorelated_features_squared = decorelated_feature_row_values\n",
" .matmul(@decorelated_feature_row_values);\n",
" let mut feature_label_cross_product = tensor_y_vals\n",
" .matmul(@decorelated_feature_row_values);
"
" if *decorelated_features_squared.data.at(0) == FixedTrait::new(0, false) {\n",
" decorelated_features_squared =\n",
" TensorTrait::<\n",
" FP16x16\n",
" >::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());\n",
" }\n",
"
" let mut single_gradient_value = feature_label_cross_product\n",
" / decorelated_features_squared;
" result.append(*single_gradient_value.data.at(0));\n",
"
" let mut original_x_tensor_row_values = get_tensor_data_by_row(\n",
" original_x_tensor_values, index_val\n",
" );\n",
" tensor_y_vals = tensor_y_vals\n",
" - (original_x_tensor_row_values\n",
" * single_gradient_value);
" i -= 1;\n",
" };\n",
"
" let final_gradients = TensorTrait::new(\n",
" shape: array![*decorrelated_x_features.shape.at(0)].span(), data: result.span()\n",
" );\n",
"\n",
" let mut reverse_grad_array = ArrayTrait::<FP16x16>::new();\n",
" let mut data_len: u32 = final_gradients.data.len();\n",
" loop {\n",
" if |
data_len <= 0 {\n",
" break ();\n",
" }\n",
" let temp_val = data_len - 1;\n",
" reverse_grad_array.append(*final_gradients.data.at(temp_val));\n",
" data_len -= 1;\n",
" };\n",
"
" let gradient_values_flipped = TensorTrait::<\n",
" FP16x16\n",
" >::new(shape: array![reverse_grad_array.len()].span(), data: reverse_grad_array.span());\n",
"\n",
" return gradient_values_flipped;\n",
"}\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 46,
"id": "6b37fbe5",
"metadata": {},
"outputs": [],
"source": [
"! touch multiple_linear_regression_aave/src/model.cairo"
]
},
{
"cell_type": "code",
"execution_count": 47,
"id": "22f961a5",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Overwriting multiple_linear_regression_aave/src/model.cairo\n"
]
}
],
"source": [
"%%writefile multiple_linear_regression_aave/src/model.cairo\n",
"mod multiple_linear_regression_model;"
]
},
{
"cell_type": "markdown",
"id": "8c1f41c6",
"metadata": {},
"source": [
"
"\n",
"Running some checks to ensure the model is performing as expected. Some of the checks involve:\n",
"- data normalizations checks\n",
"- tensor shape/dimension check\n",
"- coefficient value and dimension checks \n",
"- model accuracy deviance checks"
]
},
{
"cell_type": "code",
"execution_count": 48,
"id": "dfb70ccd",
"metadata": {},
"outputs": [],
"source": [
"! touch multiple_linear_regression_aave/src/test.cairo"
]
},
{
"cell_type": "code",
"execution_count": 49,
"id": "4dd10050",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Overwriting multiple_linear_regression_aave/src/test.cairo\n"
]
}
],
"source": [
"%%writefile multiple_linear |
_regression_aave/src/test.cairo\n",
"\n",
"\n",
"\n",
"
"use debug::PrintTrait;\n",
"use array::{ArrayTrait, SpanTrait};\n",
"\n",
"use multiple_linear_regresion::datasets::aave_data::aave_x_features::aave_x_features;\n",
"use multiple_linear_regresion::datasets::aave_data::aave_y_labels::aave_y_labels; \n",
"use multiple_linear_regresion::datasets::user_inputs_data::aave_weth_revenue_data_input::{aave_weth_revenue_data_input }; \n",
"\n",
"use multiple_linear_regresion::model::multiple_linear_regression_model::{\n",
" MultipleLinearRegressionModel, MultipleLinearRegression, MultipleLinearRegressionModelTrait\n",
"};\n",
"use multiple_linear_regresion::data_preprocessing::{Dataset, DatasetTrait};\n",
"use multiple_linear_regresion::helper_functions::{get_tensor_data_by_row, transpose_tensor, calculate_mean , \n",
"calculate_r_score, normalize_user_x_inputs, rescale_predictions};\n",
"\n",
"use orion::numbers::{FP16x16, FixedTrait};\n",
"\n",
"\n",
"use orion::operators::tensor::{\n",
" Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd, \n",
" FP16x16TensorSub, FP16x16TensorAdd, FP16x16TensorDiv, FP16x16TensorMul};\n",
"\n",
"
"
" |
fn multiple_linear_regression_test() {\n",
"\n",
"\n",
"
"\n",
"let mut main_x_vals = aave_x_features();\n",
"let mut main_y_vals = aave_y_labels();\n",
"let mut dataset = Dataset{x_values: main_x_vals,y_values:main_y_vals};\n",
"let mut normalized_dataset = dataset.normalize_dataset();\n",
"let mut model = MultipleLinearRegression(normalized_dataset);\n",
"let mut model_coefficients = model.coefficients;\n",
"let mut reconstructed_ys = model.predict (normalized_dataset.x_values);\n",
"let mut r_squared_score = calculate_r_score(normalized_dataset.y_values,reconstructed_ys);\n",
"r_squared_score.print(); \n",
"\n",
"
"assert(normalized_dataset.x_values.max_in_tensor() <= FixedTrait::new(65536, false), 'normalized x not between 0-1');\n",
"assert(normalized_dataset.x_values.min_in_tensor() >= FixedTrait::new(0, false), 'normalized x not between 0-1');\n",
"assert(normalized_dataset.y_values.max_in_tensor() <= FixedTrait::new(65536, false), 'normalized y not between 0-1');\n",
"assert(normalized_dataset.x_values.min_in_tensor() >= FixedTrait::new(0, false), 'normalized y not between 0-1');\n",
"
"assert(normalized_dataset.x_values.data.len()== main_x_vals.data.len() && \n",
"normalized_dataset.y_values.data.len()== main_y_vals.data.len() , 'normalized data shape mismatch');\n",
"
"assert(model.coefficients.data.len() == *main_x_vals.shape.at(1)+1, 'coefficient data shape mismatch');\n",
"
"assert(r_squared_score >= FixedTrait::new(62259, false), 'AAVE model acc. less than 95%');\n",
"\n",
"
"let last_7_days_aave_data = aave_weth_revenue_data_input();\n",
"let last_7_days_aave_data_normalized = normalize_user_x_inputs(last_7_days_aave_data, main_x_vals );\n",
"let mut forecast_results = model.predict (last_7_days_aave_data_normalized); \n",
"let mut rescale_forecasts = rescale_predictions(forecast_results, main_y_vals);
"(*rescale_forecasts.data.at(0)).print(); \n",
"(*rescale_fo |
recasts.data.at(1)).print(); \n",
"(*rescale_forecasts.data.at(2)).print(); \n",
"(*rescale_forecasts.data.at(5)).print(); \n",
"(*rescale_forecasts.data.at(6)).print(); \n",
"}\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4ae8fd10",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
} |
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd, FP16x16TensorSub, FP16x16TensorAdd,
FP16x16TensorDiv, FP16x16TensorMul
};
use orion::numbers::{FP16x16, FixedTrait};
use linear_regresion::helper_functions::{
get_tensor_data_by_row, transpose_tensor, calculate_mean, calculate_r_score,
normalize_user_x_inputs, rescale_predictions
}; |
struct Dataset {
x_values: Tensor<FP16x16>,
y_values: Tensor<FP16x16>,
}
impl DataPreprocessing of DatasetTrait {
fn normalize_dataset(ref self: Dataset) -> Dataset {
let mut x_values = TensorTrait::<FP16x16>::new(array![1].span(), array![FixedTrait::new(0, false)].span());
let mut y_values = TensorTrait::<FP16x16>::new(array![1].span(), array![FixedTrait::new(0, false)].span());
if self.x_values.shape.len() > 1 {
x_values = normalize_feature_data(self.x_values);
y_values = normalize_label_data(self.y_values);
}
if self.x_values.shape.len() == 1 {
x_values = normalize_label_data(self.x_values);
y_values = normalize_label_data(self.y_values);
}
return Dataset { x_values, y_values };
}
}
fn normalize_feature_data(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let mut x_min_array = ArrayTrait::<FP16x16>::new();
let mut x_max_array = ArrayTrait::<FP16x16>::new();
let mut x_range_array = ArrayTrait::<FP16x16>::new();
let mut normalized_array = ArrayTrait::<FP16x16>::new();
let transposed_tensor = tensor_data.transpose(axes: array![1, 0].span());
let tensor_shape = transposed_tensor.shape;
let tensor_row_len = *tensor_shape.at(0);
let tensor_column_len = *tensor_shape.at(1);
let mut i: u32 = 0;
loop {
if i >= tensor_row_len {
break ();
}
let mut transposed_tensor_row = get_tensor_data_by_row(transposed_tensor, i);
x_max_array.append(transposed_tensor_row.max_in_tensor());
x_min_array.append(transposed_tensor_row.min_in_tensor());
x_range_array
.append(transposed_tensor_row.max_in_tensor() - transposed_tensor_row.min_in_tensor());
i += 1;
};
let mut x_min = TensorTrait::<
FP16x16
>::new(shape: array![1, tensor_row_len].span(), data: x_min_array.span());
let mut x_range = TensorTrait::<
FP16x16
>::new(shape |
: array![1, tensor_row_len].span(), data: x_range_array.span());
let normalized_tensor = (tensor_data - x_min) / x_range;
return normalized_tensor;
}
fn normalize_label_data(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let mut tensor_data_ = tensor_data;
let mut normalized_array = ArrayTrait::<FP16x16>::new();
let mut range = tensor_data.max_in_tensor() - tensor_data.min_in_tensor();
let mut i: u32 = 0;
loop {
match tensor_data_.data.pop_front() {
Option::Some(tensor_val) => {
let mut diff = *tensor_val - tensor_data.min_in_tensor();
normalized_array.append(diff / range);
i += 1;
},
Option::None(_) => { break; }
};
};
let mut normalized_tensor = TensorTrait::<
FP16x16
>::new(shape: array![tensor_data.data.len()].span(), data: normalized_array.span());
return normalized_tensor;
} |
mod linear_data;
|
mod x_feature_data;
mod y_label_data;
|
use array::ArrayTrait;
use orion::numbers::fixed_point::implementations::fp16x16::core::{FP16x16Impl, FP16x16PartialEq };
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};
use orion::numbers::{FP16x16, FixedTrait};
fn x_feature_data() -> Tensor<FP16x16> {
let tensor = TensorTrait::<FP16x16>::new(
shape: array![50].span(),
data: array![
FixedTrait::new(90639, false ),
FixedTrait::new(12581, true ),
FixedTrait::new(33595, false ),
FixedTrait::new(92893, false ),
FixedTrait::new(64841, false ),
FixedTrait::new(21784, false ),
FixedTrait::new(93600, false ),
FixedTrait::new(139107, false ),
FixedTrait::new(46680, true ),
FixedTrait::new(148678, true ),
FixedTrait::new(55700, false ),
FixedTrait::new(63442, false ),
FixedTrait::new(16625, false ),
FixedTrait::new(15088, false ),
FixedTrait::new(109945, false ),
FixedTrait::new(22098, false ),
FixedTrait::new(28923, false ),
FixedTrait::new(55032, true ),
FixedTrait::new(29968, false ),
FixedTrait::new(17353, false ),
FixedTrait::new(126, true ),
FixedTrait::new(6705, true ),
FixedTrait::new(81234, true ),
FixedTrait::new(38498, true ),
FixedTrait::new(75536, true ),
FixedTrait::new(984, true ),
FixedTrait::new(45491, true ),
FixedTrait::new(88496, false ),
FixedTrait::new(8992, false ),
FixedTrait::new(28549, false ),
FixedTrait::new(61676, true ),
FixedTrait::new(54096, true ),
FixedTrait::new(91046, false ),
FixedTrait::new(53660, false ),
FixedTrait::new(6145, true ),
FixedTrait::new(26994, false ),
FixedTrait::new(90657, false ),
FixedTrait::new(21638, true ),
FixedTrait::new(50848, false ),
FixedTrait::new(4550, true ),
FixedTrait::new(7560, true ),
FixedTrait::new(41550, false ),
FixedTrait::new(200, false ),
FixedTrait::new(102341, false ),
FixedTrait::new(25789, false ),
FixedTrait::new(9158, false ),
FixedTrait::new(102276, true ),
Fixe |
dTrait::new(76823, true ),
FixedTrait::new(69440, true ),
FixedTrait::new(17547, true ),
].span()
);
return tensor;
} |
use array::ArrayTrait;
use orion::numbers::fixed_point::implementations::fp16x16::core::{FP16x16Impl, FP16x16PartialEq };
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};
use orion::numbers::{FP16x16, FixedTrait};
fn y_label_data() -> Tensor<FP16x16> {
let tensor = TensorTrait::<FP16x16>::new(
shape: array![50].span(),
data: array![
FixedTrait::new(7282724, false ),
FixedTrait::new(6435011, false ),
FixedTrait::new(6662231, false ),
FixedTrait::new(7271410, false ),
FixedTrait::new(7099095, false ),
FixedTrait::new(6751687, false ),
FixedTrait::new(7403695, false ),
FixedTrait::new(7831893, false ),
FixedTrait::new(6135683, false ),
FixedTrait::new(5448106, false ),
FixedTrait::new(6992113, false ),
FixedTrait::new(7129256, false ),
FixedTrait::new(6678313, false ),
FixedTrait::new(6524452, false ),
FixedTrait::new(7538849, false ),
FixedTrait::new(6685568, false ),
FixedTrait::new(6749158, false ),
FixedTrait::new(6149931, false ),
FixedTrait::new(6876758, false ),
FixedTrait::new(6623147, false ),
FixedTrait::new(6679189, false ),
FixedTrait::new(6578635, false ),
FixedTrait::new(5894520, false ),
FixedTrait::new(6161430, false ),
FixedTrait::new(5887716, false ),
FixedTrait::new(6440009, false ),
FixedTrait::new(6209384, false ),
FixedTrait::new(7208597, false ),
FixedTrait::new(6679473, false ),
FixedTrait::new(6809111, false ),
FixedTrait::new(6068970, false ),
FixedTrait::new(6089744, false ),
FixedTrait::new(7360056, false ),
FixedTrait::new(6971060, false ),
FixedTrait::new(6419231, false ),
FixedTrait::new(6780044, false ),
FixedTrait::new(7279453, false ),
FixedTrait::new(6350620, false ),
FixedTrait::new(7023820, false ),
FixedTrait::new(6568475, false ),
FixedTrait::new(6528424, false ),
FixedTrait::new(6936953, false ),
FixedTrait::new(6511689, false ),
FixedTrait::new(7367935, false ),
F |
ixedTrait::new(6860285, false ),
FixedTrait::new(6800462, false ),
FixedTrait::new(5650037, false ),
FixedTrait::new(5915425, false ),
FixedTrait::new(5913912, false ),
FixedTrait::new(6491295, false ),
].span()
);
return tensor;
} |
use debug::PrintTrait;
use array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd, FP16x16TensorSub, FP16x16TensorAdd,
FP16x16TensorDiv, FP16x16TensorMul
};
use orion::numbers::{FP16x16, FixedTrait};
fn get_tensor_data_by_row(tensor_data: Tensor<FP16x16>, row_index: u32,) -> Tensor<FP16x16> {
let column_len = *tensor_data.shape.at(1);
let mut result = ArrayTrait::<FP16x16>::new();
let mut i: u32 = 0;
loop {
if i >= column_len {
break ();
}
result.append(tensor_data.at(indices: array![row_index, i].span()));
i += 1;
};
let resultant_tensor = TensorTrait::<
FP16x16
>::new(array![column_len].span(), data: result.span());
return resultant_tensor;
}
fn transpose_tensor(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let tensor_transposed = tensor_data.transpose(axes: array![1, 0].span());
return tensor_transposed;
}
fn calculate_mean(tensor_data: Tensor<FP16x16>) -> FP16x16 {
let tensor_size = FixedTrait::<FP16x16>::new_unscaled(tensor_data.data.len(), false);
let cumulated_sum = tensor_data.cumsum(0, Option::None(()), Option::None(()));
let sum_result = cumulated_sum.data[tensor_data.data.len() - 1];
let mean = *sum_result / tensor_size;
return mean;
}
fn calculate_r_score(Y_values: Tensor<FP16x16>, Y_pred_values: Tensor<FP16x16>) -> FP16x16 {
let mut Y_values_ = Y_values;
let mean_y_value = calculate_mean(Y_values);
let mut squared_diff_shape = array::ArrayTrait::new();
squared_diff_shape.append(Y_values.data.len());
let mut squared_diff_vals = array::ArrayTrait::new();
let mut squared_mean_diff_shape = array::ArrayTrait::new();
squared_mean_diff_shape.append(Y_values.data.len());
let mut squared_mean_diff_vals = array::ArrayTrait::new();
let mut i: u32 = 0;
loop {
match Y_values_.data.pop_front() {
Option::Some(y_value) => { |
let diff_pred = *y_value - *Y_pred_values.data.at(i);
let squared_diff = diff_pred * diff_pred;
squared_diff_vals.append(squared_diff);
let diff_mean = *y_value - mean_y_value;
let squared_mean_diff = diff_mean * diff_mean;
squared_mean_diff_vals.append(squared_mean_diff);
i += 1;
},
Option::None(_) => { break; }
}
};
let squared_diff_tensor = TensorTrait::<
FP16x16
>::new(squared_diff_shape.span(), squared_diff_vals.span());
let squared_mean_diff_tensor = TensorTrait::<
FP16x16
>::new(squared_mean_diff_shape.span(), squared_mean_diff_vals.span());
let sum_squared_diff = squared_diff_tensor.cumsum(0, Option::None(()), Option::None(()));
let sum_squared_mean_diff = squared_mean_diff_tensor
.cumsum(0, Option::None(()), Option::None(()));
let r_score = FixedTrait::new_unscaled(1, false)
- *sum_squared_diff.data.at(Y_values.data.len() - 1)
/ *sum_squared_mean_diff.data.at(Y_values.data.len() - 1);
return r_score;
}
fn normalize_user_x_inputs(
x_inputs: Tensor<FP16x16>, original_x_values: Tensor<FP16x16>
) -> Tensor<FP16x16> {
let mut x_inputs_normalized = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
let mut x_min = ArrayTrait::<FP16x16>::new();
let mut x_max = ArrayTrait::<FP16x16>::new();
let mut x_range = ArrayTrait::<FP16x16>::new();
let mut result = ArrayTrait::<FP16x16>::new();
if original_x_values.shape.len() > 1 {
let transposed_tensor = original_x_values.transpose(axes: array![1, 0].span());
let data_len = *transposed_tensor.shape.at(0);
let mut i: u32 = 0;
loop {
if i >= data_len {
break ();
}
let mut transposed_tensor_row = get_tensor_data_by_row(transposed_tensor, i);
x_min.append(transposed_ |
tensor_row.min_in_tensor());
x_max.append(transposed_tensor_row.max_in_tensor());
x_range
.append(
transposed_tensor_row.max_in_tensor() - transposed_tensor_row.min_in_tensor()
);
i += 1;
};
let mut x_min_tensor = TensorTrait::new(shape: array![data_len].span(), data: x_min.span());
let mut x_max_tensor = TensorTrait::new(shape: array![data_len].span(), data: x_max.span());
let mut x_range_tensor = TensorTrait::new(
shape: array![data_len].span(), data: x_range.span()
);
if x_inputs.shape.len() > 1 {
let mut j: u32 = 0;
loop {
if j >= *x_inputs.shape.at(0) {
break ();
};
let mut row_data = get_tensor_data_by_row(x_inputs, j);
let mut norm_row_data = (row_data - x_min_tensor) / x_range_tensor;
let mut k: u32 = 0;
loop {
if k >= norm_row_data.data.len() {
break ();
};
result.append(*norm_row_data.data.at(k));
k += 1;
};
j += 1;
};
x_inputs_normalized =
TensorTrait::<
FP16x16
>::new(
array![*x_inputs.shape.at(0), *x_inputs.shape.at(1)].span(), data: result.span()
);
};
if x_inputs.shape.len() == 1 {
x_inputs_normalized = (x_inputs - x_min_tensor) / x_range_tensor;
};
}
if original_x_values.shape.len() == 1 {
let mut x_min_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![original_x_values.min_in_tensor()].span());
let mut x_max_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![original_x_values.max_in_tensor()].span() |
);
let mut x_range_tensor = TensorTrait::<
FP16x16
>::new(
shape: array![1].span(),
data: array![original_x_values.max_in_tensor() - original_x_values.min_in_tensor()]
.span()
);
let mut diff = ((x_inputs - x_min_tensor));
x_inputs_normalized = ((x_inputs - x_min_tensor)) / x_range_tensor;
};
return x_inputs_normalized;
}
fn rescale_predictions(
prediction_result: Tensor<FP16x16>, y_values: Tensor<FP16x16>
) -> Tensor<FP16x16> {
let mut rescale_predictions = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
let mut y_min_array = ArrayTrait::<FP16x16>::new();
let mut y_max_array = ArrayTrait::<FP16x16>::new();
let mut y_range_array = ArrayTrait::<FP16x16>::new();
let mut y_max = y_values.max_in_tensor();
let mut y_min = y_values.min_in_tensor();
let mut y_range = y_values.max_in_tensor() - y_values.min_in_tensor();
let y_min_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![y_min].span());
let y_max_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![y_max].span());
let y_range_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![y_range].span());
rescale_predictions = (prediction_result * y_range_tensor) + y_min_tensor;
return rescale_predictions;
} |
mod test;
mod data_preprocessing;
mod helper_functions;
mod datasets;
mod model; |
mod linear_regression_model;
|
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd, FP16x16TensorSub, FP16x16TensorAdd,
FP16x16TensorDiv, FP16x16TensorMul
};
use orion::numbers::{FP16x16, FixedTrait};
use linear_regresion::data_preprocessing::{Dataset, DatasetTrait};
use linear_regresion::helper_functions::{
get_tensor_data_by_row, transpose_tensor, calculate_mean, calculate_r_score,
normalize_user_x_inputs, rescale_predictions
}; |
struct LinearRegressionModel {
gradient: Tensor<FP16x16>,
bias: Tensor<FP16x16>
}
impl RegressionOperation of LinearRegressionModelTrait {
fn predict(ref self: LinearRegressionModel, x_input: Tensor<FP16x16>) -> Tensor<FP16x16> {
let gradient = self.gradient;
let bias = self.bias;
let mut prediction = (gradient * x_input) + bias;
return prediction;
}
}
fn LinearRegression(dataset: Dataset) -> LinearRegressionModel {
let gradient = compute_gradient(dataset);
let bias = compute_bias(dataset);
return LinearRegressionModel { gradient, bias };
}
fn compute_mean(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let tensor_size = FixedTrait::<FP16x16>::new_unscaled(tensor_data.data.len(), false);
let cumulated_sum = tensor_data.cumsum(0, Option::None(()), Option::None(()));
let sum_result = cumulated_sum.data[tensor_data.data.len() - 1];
let mean = *sum_result / tensor_size;
let mut result_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![mean].span());
return result_tensor;
}
fn deviation_from_mean(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let mut tensor_data_ = tensor_data;
let mean_value = calculate_mean(tensor_data);
let mut tensor_shape = array::ArrayTrait::new();
tensor_shape.append(tensor_data.data.len());
let mut deviation_values = array::ArrayTrait::new();
let mut i: u32 = 0;
loop {
match tensor_data_.data.pop_front() {
Option::Some(tensor_val) => {
let distance_from_mean = *tensor_val - mean_value;
deviation_values.append(distance_from_mean);
i += 1;
},
Option::None(_) => { break; }
};
};
let distance_from_mean_tensor = TensorTrait::<
FP16x16
>::new(tensor_shape.span(), deviation_values.span());
return distance_from_mean_tensor;
}
fn compute_gradient(dataset: Dataset) -> Tensor<FP16x16> {
let x_deviation = deviati |
on_from_mean(dataset.x_values);
let y_deviation = deviation_from_mean(dataset.y_values);
let x_y_covariance = x_deviation.matmul(@y_deviation);
let x_variance = x_deviation.matmul(@x_deviation);
let beta_value = x_y_covariance / x_variance;
return beta_value;
}
fn compute_bias(dataset: Dataset) -> Tensor<FP16x16> {
let x_mean = compute_mean(dataset.x_values);
let y_mean = compute_mean(dataset.y_values);
let gradient = compute_gradient(dataset);
let mx = gradient * x_mean;
let intercept = y_mean - mx;
return intercept;
} |
use debug::PrintTrait;
use array::{ArrayTrait, SpanTrait};
use linear_regresion::datasets::linear_data::x_feature_data::x_feature_data;
use linear_regresion::datasets::linear_data::y_label_data::y_label_data;
use orion::numbers::{FP16x16, FixedTrait};
use linear_regresion::model::linear_regression_model::{
LinearRegressionModel, compute_mean, LinearRegression, LinearRegressionModelTrait
};
use linear_regresion::data_preprocessing::{Dataset, DatasetTrait};
use linear_regresion::helper_functions::{get_tensor_data_by_row, transpose_tensor, calculate_mean ,
calculate_r_score, normalize_user_x_inputs, rescale_predictions};
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd,
FP16x16TensorSub, FP16x16TensorAdd, FP16x16TensorDiv, FP16x16TensorMul}; |
fn multiple_linear_regression_test() {
let mut main_x_vals = x_feature_data();
let mut main_y_vals = y_label_data();
let dataset = Dataset{x_values: main_x_vals,y_values:main_y_vals};
let mut model = LinearRegression(dataset);
let gradient = model.gradient;
let mut reconstructed_ys = model.predict(main_x_vals);
let mut r_squared_score = calculate_r_score(main_y_vals,reconstructed_ys);
r_squared_score.print();
assert(model.gradient.data.len() == 1, 'gradient data shape mismatch');
assert(model.bias.data.len() == 1, 'bias data shape mismatch');
assert(r_squared_score >= FixedTrait::new(62259, false), 'Linear model acc. less than 95%');
let mut user_value = TensorTrait::<FP16x16>::new(shape: array![2].span(), data: array![FixedTrait::new(65536, false), FixedTrait::new(65536, true)].span());
let mut prediction_results = model.predict(user_value);
(*prediction_results.data.at(0)).print();
(*prediction_results.data.at(1)).print();
} |
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd, FP16x16TensorSub, FP16x16TensorAdd,
FP16x16TensorDiv, FP16x16TensorMul
};
use orion::numbers::{FP16x16, FixedTrait};
use multiple_linear_regresion::helper_functions::{
get_tensor_data_by_row, transpose_tensor, calculate_mean, calculate_r_score,
normalize_user_x_inputs, rescale_predictions
}; |
struct Dataset {
x_values: Tensor<FP16x16>,
y_values: Tensor<FP16x16>,
}
impl DataPreprocessing of DatasetTrait {
fn normalize_dataset(ref self: Dataset) -> Dataset {
let mut x_values = TensorTrait::<FP16x16>::new(array![1].span(), array![FixedTrait::new(0, false)].span());
let mut y_values = TensorTrait::<FP16x16>::new(array![1].span(), array![FixedTrait::new(0, false)].span());
if self.x_values.shape.len() > 1 {
x_values = normalize_feature_data(self.x_values);
y_values = normalize_label_data(self.y_values);
}
if self.x_values.shape.len() == 1 {
x_values = normalize_label_data(self.x_values);
y_values = normalize_label_data(self.y_values);
}
return Dataset { x_values, y_values };
}
}
fn normalize_feature_data(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let mut x_min_array = ArrayTrait::<FP16x16>::new();
let mut x_max_array = ArrayTrait::<FP16x16>::new();
let mut x_range_array = ArrayTrait::<FP16x16>::new();
let mut normalized_array = ArrayTrait::<FP16x16>::new();
let transposed_tensor = tensor_data.transpose(axes: array![1, 0].span());
let tensor_shape = transposed_tensor.shape;
let tensor_row_len = *tensor_shape.at(0);
let tensor_column_len = *tensor_shape.at(1);
let mut i: u32 = 0;
loop {
if i >= tensor_row_len {
break ();
}
let mut transposed_tensor_row = get_tensor_data_by_row(transposed_tensor, i);
x_max_array.append(transposed_tensor_row.max_in_tensor());
x_min_array.append(transposed_tensor_row.min_in_tensor());
x_range_array
.append(transposed_tensor_row.max_in_tensor() - transposed_tensor_row.min_in_tensor());
i += 1;
};
let mut x_min = TensorTrait::<
FP16x16
>::new(shape: array![1, tensor_row_len].span(), data: x_min_array.span());
let mut x_range = TensorTrait::<
FP16x16
>::new(shape |
: array![1, tensor_row_len].span(), data: x_range_array.span());
let normalized_tensor = (tensor_data - x_min) / x_range;
return normalized_tensor;
}
fn normalize_label_data(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let mut tensor_data_ = tensor_data;
let mut normalized_array = ArrayTrait::<FP16x16>::new();
let mut range = tensor_data.max_in_tensor() - tensor_data.min_in_tensor();
let mut i: u32 = 0;
loop {
match tensor_data_.data.pop_front() {
Option::Some(tensor_val) => {
let mut diff = *tensor_val - tensor_data.min_in_tensor();
normalized_array.append(diff / range);
i += 1;
},
Option::None(_) => { break; }
};
};
let mut normalized_tensor = TensorTrait::<
FP16x16
>::new(shape: array![tensor_data.data.len()].span(), data: normalized_array.span());
return normalized_tensor;
} |
mod aave_data;
mod user_inputs_data;
|
mod aave_x_features;
mod aave_y_labels;
|
use array::ArrayTrait;
use orion::numbers::fixed_point::implementations::fp16x16::core::{FP16x16Impl, FP16x16PartialEq };
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};
use orion::numbers::{FP16x16, FixedTrait};
fn aave_x_features() -> Tensor<FP16x16> {
let tensor = TensorTrait::<FP16x16>::new(
shape: array![24,9].span(),
data: array![
FixedTrait::new(61, false ),
FixedTrait::new(484966, false ),
FixedTrait::new(812646, false ),
FixedTrait::new(13369344, false ),
FixedTrait::new(3604, false ),
FixedTrait::new(7798784, false ),
FixedTrait::new(1880883, false ),
FixedTrait::new(5006950, false ),
FixedTrait::new(220856320, false ),
FixedTrait::new(87, false ),
FixedTrait::new(488243, false ),
FixedTrait::new(812646, false ),
FixedTrait::new(13434880, false ),
FixedTrait::new(3604, false ),
FixedTrait::new(7798784, false ),
FixedTrait::new(1880883, false ),
FixedTrait::new(5006950, false ),
FixedTrait::new(220856320, false ),
FixedTrait::new(114, false ),
FixedTrait::new(525598, false ),
FixedTrait::new(812646, false ),
FixedTrait::new(13565952, false ),
FixedTrait::new(3604, false ),
FixedTrait::new(7798784, false ),
FixedTrait::new(1887436, false ),
FixedTrait::new(5013504, false ),
FixedTrait::new(217579519, false ),
FixedTrait::new(138, false ),
FixedTrait::new(628490, false ),
FixedTrait::new(838860, false ),
FixedTrait::new(13893632, false ),
FixedTrait::new(3604, false ),
FixedTrait::new(8126463, false ),
FixedTrait::new(1874329, false ),
FixedTrait::new(5046272, false ),
FixedTrait::new(208404480, false ),
FixedTrait::new(1, false ),
FixedTrait::new(655360, false ),
FixedTrait::new(924057, false ),
FixedTrait::new(14090240, false ),
FixedTrait::new(3768, false ),
FixedTrait::new(8388608, false ),
FixedTrait::new(1880883, false ),
FixedTrait::new(5065932, false ),
FixedTrait::new(206438400, false |
),
FixedTrait::new(25, false ),
FixedTrait::new(688128, false ),
FixedTrait::new(924057, false ),
FixedTrait::new(14155776, false ),
FixedTrait::new(3768, false ),
FixedTrait::new(8454144, false ),
FixedTrait::new(1893990, false ),
FixedTrait::new(5065932, false ),
FixedTrait::new(204472320, false ),
FixedTrait::new(50, false ),
FixedTrait::new(681574, false ),
FixedTrait::new(924057, false ),
FixedTrait::new(14286848, false ),
FixedTrait::new(3768, false ),
FixedTrait::new(8585216, false ),
FixedTrait::new(1900544, false ),
FixedTrait::new(5072486, false ),
FixedTrait::new(205127680, false ),
FixedTrait::new(76, false ),
FixedTrait::new(640942, false ),
FixedTrait::new(924057, false ),
FixedTrait::new(14352384, false ),
FixedTrait::new(3768, false ),
FixedTrait::new(8650752, false ),
FixedTrait::new(1933312, false ),
FixedTrait::new(5072486, false ),
FixedTrait::new(209059840, false ),
FixedTrait::new(100, false ),
FixedTrait::new(747110, false ),
FixedTrait::new(924057, false ),
FixedTrait::new(14483456, false ),
FixedTrait::new(3768, false ),
FixedTrait::new(8716288, false ),
FixedTrait::new(1939865, false ),
FixedTrait::new(5072486, false ),
FixedTrait::new(201195519, false ),
FixedTrait::new(126, false ),
FixedTrait::new(650117, false ),
FixedTrait::new(989593, false ),
FixedTrait::new(14614528, false ),
FixedTrait::new(3768, false ),
FixedTrait::new(8781824, false ),
FixedTrait::new(1966080, false ),
FixedTrait::new(5079040, false ),
FixedTrait::new(209059840, false ),
FixedTrait::new(152, false ),
FixedTrait::new(645529, false ),
FixedTrait::new(989593, false ),
FixedTrait::new(14876672, false ),
FixedTrait::new(3768, false ),
FixedTrait::new(8978432, false ),
FixedTrait::new(1979187, false ),
FixedTrait::new(5085593, false ),
FixedTrait::new(209715200, false ),
FixedTrait::new(1, false ), |
FixedTrait::new(653393, false ),
FixedTrait::new(1002700, false ),
FixedTrait::new(14876672, false ),
FixedTrait::new(3951, false ),
FixedTrait::new(8978432, false ),
FixedTrait::new(1959526, false ),
FixedTrait::new(5111808, false ),
FixedTrait::new(209059840, false ),
FixedTrait::new(26, false ),
FixedTrait::new(614072, false ),
FixedTrait::new(1009254, false ),
FixedTrait::new(15007744, false ),
FixedTrait::new(3951, false ),
FixedTrait::new(9043968, false ),
FixedTrait::new(1926758, false ),
FixedTrait::new(5157683, false ),
FixedTrait::new(211025919, false ),
FixedTrait::new(54, false ),
FixedTrait::new(523632, false ),
FixedTrait::new(1009254, false ),
FixedTrait::new(15073280, false ),
FixedTrait::new(3951, false ),
FixedTrait::new(9043968, false ),
FixedTrait::new(2011955, false ),
FixedTrait::new(5203558, false ),
FixedTrait::new(220856320, false ),
FixedTrait::new(78, false ),
FixedTrait::new(688128, false ),
FixedTrait::new(1009254, false ),
FixedTrait::new(15138816, false ),
FixedTrait::new(3951, false ),
FixedTrait::new(9109504, false ),
FixedTrait::new(1861222, false ),
FixedTrait::new(5360844, false ),
FixedTrait::new(203816960, false ),
FixedTrait::new(102, false ),
FixedTrait::new(688128, false ),
FixedTrait::new(1028915, false ),
FixedTrait::new(15204352, false ),
FixedTrait::new(3951, false ),
FixedTrait::new(9109504, false ),
FixedTrait::new(1861222, false ),
FixedTrait::new(5367398, false ),
FixedTrait::new(203816960, false ),
FixedTrait::new(126, false ),
FixedTrait::new(694681, false ),
FixedTrait::new(1028915, false ),
FixedTrait::new(15204352, false ),
FixedTrait::new(3951, false ),
FixedTrait::new(9109504, false ),
FixedTrait::new(1861222, false ),
FixedTrait::new(5367398, false ),
FixedTrait::new(203161600, false ),
FixedTrait::new(151, false ),
FixedTrait::new(681574, |
false ),
FixedTrait::new(1028915, false ),
FixedTrait::new(15466496, false ),
FixedTrait::new(3951, false ),
FixedTrait::new(9371648, false ),
FixedTrait::new(1848115, false ),
FixedTrait::new(5452595, false ),
FixedTrait::new(203161600, false ),
FixedTrait::new(1, false ),
FixedTrait::new(591790, false ),
FixedTrait::new(1048576, false ),
FixedTrait::new(15663104, false ),
FixedTrait::new(4128, false ),
FixedTrait::new(9568256, false ),
FixedTrait::new(1985740, false ),
FixedTrait::new(5485363, false ),
FixedTrait::new(214302719, false ),
FixedTrait::new(29, false ),
FixedTrait::new(565575, false ),
FixedTrait::new(1048576, false ),
FixedTrait::new(15859712, false ),
FixedTrait::new(4128, false ),
FixedTrait::new(9764864, false ),
FixedTrait::new(2025062, false ),
FixedTrait::new(5505024, false ),
FixedTrait::new(217579519, false ),
FixedTrait::new(57, false ),
FixedTrait::new(681574, false ),
FixedTrait::new(1048576, false ),
FixedTrait::new(16187392, false ),
FixedTrait::new(4128, false ),
FixedTrait::new(9961472, false ),
FixedTrait::new(1979187, false ),
FixedTrait::new(5583667, false ),
FixedTrait::new(207093760, false ),
FixedTrait::new(83, false ),
FixedTrait::new(547225, false ),
FixedTrait::new(1048576, false ),
FixedTrait::new(16580607, false ),
FixedTrait::new(4128, false ),
FixedTrait::new(10223616, false ),
FixedTrait::new(1998848, false ),
FixedTrait::new(5681971, false ),
FixedTrait::new(218234879, false ),
FixedTrait::new(110, false ),
FixedTrait::new(753664, false ),
FixedTrait::new(1048576, false ),
FixedTrait::new(16777216, false ),
FixedTrait::new(4128, false ),
FixedTrait::new(10289152, false ),
FixedTrait::new(1966080, false ),
FixedTrait::new(5754060, false ),
FixedTrait::new(201195519, false ),
FixedTrait::new(135, false ),
FixedTrait::new(747110, false ),
FixedTrait::new( |
1048576, false ),
FixedTrait::new(16842752, false ),
FixedTrait::new(4128, false ),
FixedTrait::new(10289152, false ),
FixedTrait::new(1992294, false ),
FixedTrait::new(5780275, false ),
FixedTrait::new(202506239, false ),
].span()
);
return tensor;
} |
use array::ArrayTrait;
use orion::numbers::fixed_point::implementations::fp16x16::core::{FP16x16Impl, FP16x16PartialEq };
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};
use orion::numbers::{FP16x16, FixedTrait};
fn aave_y_labels() -> Tensor<FP16x16> {
let tensor = TensorTrait::<FP16x16>::new(
shape: array![24].span(),
data: array![
FixedTrait::new(5072486, false ),
FixedTrait::new(5072486, false ),
FixedTrait::new(5079040, false ),
FixedTrait::new(5085593, false ),
FixedTrait::new(5111808, false ),
FixedTrait::new(5157683, false ),
FixedTrait::new(5203558, false ),
FixedTrait::new(5360844, false ),
FixedTrait::new(5367398, false ),
FixedTrait::new(5367398, false ),
FixedTrait::new(5452595, false ),
FixedTrait::new(5485363, false ),
FixedTrait::new(5505024, false ),
FixedTrait::new(5583667, false ),
FixedTrait::new(5681971, false ),
FixedTrait::new(5754060, false ),
FixedTrait::new(5780275, false ),
FixedTrait::new(5852364, false ),
FixedTrait::new(5891686, false ),
FixedTrait::new(5963776, false ),
FixedTrait::new(6035865, false ),
FixedTrait::new(6134169, false ),
FixedTrait::new(6153830, false ),
FixedTrait::new(6180044, false ),
].span()
);
return tensor;
} |
mod aave_weth_revenue_data_input;
|
use array::ArrayTrait;
use orion::numbers::fixed_point::implementations::fp16x16::core::{FP16x16Impl, FP16x16PartialEq };
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};
use orion::numbers::{FP16x16, FixedTrait};
fn aave_weth_revenue_data_input() -> Tensor<FP16x16> {
let tensor = TensorTrait::<FP16x16>::new(
shape: array![7,9].span(),
data: array![
FixedTrait::new(160, false ),
FixedTrait::new(786432, false ),
FixedTrait::new(1048576, false ),
FixedTrait::new(16973824, false ),
FixedTrait::new(4128, false ),
FixedTrait::new(10354688, false ),
FixedTrait::new(1952972, false ),
FixedTrait::new(5852364, false ),
FixedTrait::new(198574079, false ),
FixedTrait::new(185, false ),
FixedTrait::new(681574, false ),
FixedTrait::new(1048576, false ),
FixedTrait::new(17170432, false ),
FixedTrait::new(4128, false ),
FixedTrait::new(10420224, false ),
FixedTrait::new(1959526, false ),
FixedTrait::new(5891686, false ),
FixedTrait::new(207093760, false ),
FixedTrait::new(211, false ),
FixedTrait::new(688128, false ),
FixedTrait::new(1055129, false ),
FixedTrait::new(17301504, false ),
FixedTrait::new(4128, false ),
FixedTrait::new(10420224, false ),
FixedTrait::new(1952972, false ),
FixedTrait::new(5963776, false ),
FixedTrait::new(206438400, false ),
FixedTrait::new(236, false ),
FixedTrait::new(707788, false ),
FixedTrait::new(1055129, false ),
FixedTrait::new(17367040, false ),
FixedTrait::new(4128, false ),
FixedTrait::new(10420224, false ),
FixedTrait::new(1907097, false ),
FixedTrait::new(6035865, false ),
FixedTrait::new(203161600, false ),
FixedTrait::new(261, false ),
FixedTrait::new(792985, false ),
FixedTrait::new(1061683, false ),
FixedTrait::new(17432576, false ),
FixedTrait::new(4128, false ),
FixedTrait::new(10420224, false ),
FixedTrait::new(1880883, false ),
FixedTrait::new(6134169, false ),
FixedT |
rait::new(195952639, false ),
FixedTrait::new(285, false ),
FixedTrait::new(792985, false ),
FixedTrait::new(1061683, false ),
FixedTrait::new(17432576, false ),
FixedTrait::new(4128, false ),
FixedTrait::new(10420224, false ),
FixedTrait::new(1880883, false ),
FixedTrait::new(6153830, false ),
FixedTrait::new(195952639, false ),
FixedTrait::new(308, false ),
FixedTrait::new(792985, false ),
FixedTrait::new(1061683, false ),
FixedTrait::new(17498112, false ),
FixedTrait::new(4128, false ),
FixedTrait::new(10420224, false ),
FixedTrait::new(1887436, false ),
FixedTrait::new(6180044, false ),
FixedTrait::new(196607999, false ),
].span()
);
return tensor;
} |
use debug::PrintTrait;
use array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd, FP16x16TensorSub, FP16x16TensorAdd,
FP16x16TensorDiv, FP16x16TensorMul
};
use orion::numbers::{FP16x16, FixedTrait};
fn get_tensor_data_by_row(tensor_data: Tensor<FP16x16>, row_index: u32,) -> Tensor<FP16x16> {
let column_len = *tensor_data.shape.at(1);
let mut result = ArrayTrait::<FP16x16>::new();
let mut i: u32 = 0;
loop {
if i >= column_len {
break ();
}
result.append(tensor_data.at(indices: array![row_index, i].span()));
i += 1;
};
let resultant_tensor = TensorTrait::<
FP16x16
>::new(array![column_len].span(), data: result.span());
return resultant_tensor;
}
fn transpose_tensor(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let tensor_transposed = tensor_data.transpose(axes: array![1, 0].span());
return tensor_transposed;
}
fn calculate_mean(tensor_data: Tensor<FP16x16>) -> FP16x16 {
let tensor_size = FixedTrait::<FP16x16>::new_unscaled(tensor_data.data.len(), false);
let cumulated_sum = tensor_data.cumsum(0, Option::None(()), Option::None(()));
let sum_result = cumulated_sum.data[tensor_data.data.len() - 1];
let mean = *sum_result / tensor_size;
return mean;
}
fn calculate_r_score(Y_values: Tensor<FP16x16>, Y_pred_values: Tensor<FP16x16>) -> FP16x16 {
let mut Y_values_ = Y_values;
let mean_y_value = calculate_mean(Y_values);
let mut squared_diff_shape = array::ArrayTrait::new();
squared_diff_shape.append(Y_values.data.len());
let mut squared_diff_vals = array::ArrayTrait::new();
let mut squared_mean_diff_shape = array::ArrayTrait::new();
squared_mean_diff_shape.append(Y_values.data.len());
let mut squared_mean_diff_vals = array::ArrayTrait::new();
let mut i: u32 = 0;
loop {
match Y_values_.data.pop_front() {
Option::Some(y_value) => { |
let diff_pred = *y_value - *Y_pred_values.data.at(i);
let squared_diff = diff_pred * diff_pred;
squared_diff_vals.append(squared_diff);
let diff_mean = *y_value - mean_y_value;
let squared_mean_diff = diff_mean * diff_mean;
squared_mean_diff_vals.append(squared_mean_diff);
i += 1;
},
Option::None(_) => { break; }
}
};
let squared_diff_tensor = TensorTrait::<
FP16x16
>::new(squared_diff_shape.span(), squared_diff_vals.span());
let squared_mean_diff_tensor = TensorTrait::<
FP16x16
>::new(squared_mean_diff_shape.span(), squared_mean_diff_vals.span());
let sum_squared_diff = squared_diff_tensor.cumsum(0, Option::None(()), Option::None(()));
let sum_squared_mean_diff = squared_mean_diff_tensor
.cumsum(0, Option::None(()), Option::None(()));
let r_score = FixedTrait::new_unscaled(1, false)
- *sum_squared_diff.data.at(Y_values.data.len() - 1)
/ *sum_squared_mean_diff.data.at(Y_values.data.len() - 1);
return r_score;
}
fn normalize_user_x_inputs(
x_inputs: Tensor<FP16x16>, original_x_values: Tensor<FP16x16>
) -> Tensor<FP16x16> {
let mut x_inputs_normalized = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
let mut x_min = ArrayTrait::<FP16x16>::new();
let mut x_max = ArrayTrait::<FP16x16>::new();
let mut x_range = ArrayTrait::<FP16x16>::new();
let mut result = ArrayTrait::<FP16x16>::new();
if original_x_values.shape.len() > 1 {
let transposed_tensor = original_x_values.transpose(axes: array![1, 0].span());
let data_len = *transposed_tensor.shape.at(0);
let mut i: u32 = 0;
loop {
if i >= data_len {
break ();
}
let mut transposed_tensor_row = get_tensor_data_by_row(transposed_tensor, i);
x_min.append(transposed_ |
tensor_row.min_in_tensor());
x_max.append(transposed_tensor_row.max_in_tensor());
x_range
.append(
transposed_tensor_row.max_in_tensor() - transposed_tensor_row.min_in_tensor()
);
i += 1;
};
let mut x_min_tensor = TensorTrait::new(shape: array![data_len].span(), data: x_min.span());
let mut x_max_tensor = TensorTrait::new(shape: array![data_len].span(), data: x_max.span());
let mut x_range_tensor = TensorTrait::new(
shape: array![data_len].span(), data: x_range.span()
);
if x_inputs.shape.len() > 1 {
let mut j: u32 = 0;
loop {
if j >= *x_inputs.shape.at(0) {
break ();
};
let mut row_data = get_tensor_data_by_row(x_inputs, j);
let mut norm_row_data = (row_data - x_min_tensor) / x_range_tensor;
let mut k: u32 = 0;
loop {
if k >= norm_row_data.data.len() {
break ();
};
result.append(*norm_row_data.data.at(k));
k += 1;
};
j += 1;
};
x_inputs_normalized =
TensorTrait::<
FP16x16
>::new(
array![*x_inputs.shape.at(0), *x_inputs.shape.at(1)].span(), data: result.span()
);
};
if x_inputs.shape.len() == 1 {
x_inputs_normalized = (x_inputs - x_min_tensor) / x_range_tensor;
};
}
if original_x_values.shape.len() == 1 {
let mut x_min_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![original_x_values.min_in_tensor()].span());
let mut x_max_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![original_x_values.max_in_tensor()].span() |
);
let mut x_range_tensor = TensorTrait::<
FP16x16
>::new(
shape: array![1].span(),
data: array![original_x_values.max_in_tensor() - original_x_values.min_in_tensor()]
.span()
);
let mut diff = ((x_inputs - x_min_tensor));
x_inputs_normalized = ((x_inputs - x_min_tensor)) / x_range_tensor;
};
return x_inputs_normalized;
}
fn rescale_predictions(
prediction_result: Tensor<FP16x16>, y_values: Tensor<FP16x16>
) -> Tensor<FP16x16> {
let mut rescale_predictions = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
let mut y_min_array = ArrayTrait::<FP16x16>::new();
let mut y_max_array = ArrayTrait::<FP16x16>::new();
let mut y_range_array = ArrayTrait::<FP16x16>::new();
let mut y_max = y_values.max_in_tensor();
let mut y_min = y_values.min_in_tensor();
let mut y_range = y_values.max_in_tensor() - y_values.min_in_tensor();
let y_min_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![y_min].span());
let y_max_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![y_max].span());
let y_range_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![y_range].span());
rescale_predictions = (prediction_result * y_range_tensor) + y_min_tensor;
return rescale_predictions;
} |
mod test;
mod data_preprocessing;
mod helper_functions;
mod datasets;
mod model; |
mod multiple_linear_regression_model;
|
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd, FP16x16TensorSub, FP16x16TensorAdd,
FP16x16TensorDiv, FP16x16TensorMul
};
use orion::numbers::{FP16x16, FixedTrait};
use multiple_linear_regresion::data_preprocessing::{Dataset, DatasetTrait};
use multiple_linear_regresion::helper_functions::{
get_tensor_data_by_row, transpose_tensor, calculate_mean, calculate_r_score,
normalize_user_x_inputs, rescale_predictions
}; |
struct MultipleLinearRegressionModel {
coefficients: Tensor<FP16x16>
}
impl RegressionOperation of MultipleLinearRegressionModelTrait {
fn predict(
ref self: MultipleLinearRegressionModel, feature_inputs: Tensor<FP16x16>
) -> Tensor<FP16x16> {
let mut prediction_result = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
let mut result = ArrayTrait::<FP16x16>::new();
if feature_inputs.shape.len() > 1 {
let feature_values = add_bias_term(feature_inputs, 1);
let mut data_len: u32 = *feature_values.shape.at(0);
let mut i: u32 = 0;
loop {
if i >= data_len {
break ();
}
let feature_row_values = get_tensor_data_by_row(feature_values, i);
let predicted_values = feature_row_values.matmul(@self.coefficients);
result.append(*predicted_values.data.at(0));
i += 1;
};
prediction_result =
TensorTrait::<
FP16x16
>::new(shape: array![result.len()].span(), data: result.span());
}
if feature_inputs.shape.len() == 1 && self.coefficients.data.len() > 1 {
let feature_values = add_bias_term(feature_inputs, 1);
prediction_result = feature_values.matmul(@self.coefficients);
}
return prediction_result;
}
}
fn MultipleLinearRegression(dataset: Dataset) -> MultipleLinearRegressionModel {
let x_values_tranposed = transpose_tensor(dataset.x_values);
let x_values_tranposed_with_bias = add_bias_term(x_values_tranposed, 0);
let decorrelated_x_features = decorrelate_x_features(x_values_tranposed_with_bias);
let coefficients = compute_gradients(
decorrelated_x_features, dataset.y_values, x_values_tranposed_with_bias
);
return MultipleLinearRegressionModel { coefficien |
ts };
}
fn add_bias_term(x_feature: Tensor<FP16x16>, axis: u32) -> Tensor<FP16x16> {
let mut x_feature_ = x_feature;
let mut tensor_with_bias = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
let mut result = ArrayTrait::<FP16x16>::new();
if x_feature.shape.len() > 1 {
let mut index: u32 = 0;
if axis == 1 {
index = 0;
} else {
index = 1;
}
let data_len = *x_feature.shape.at(index);
let mut i: u32 = 0;
loop {
if i >= data_len {
break ();
}
result
.append(FixedTrait::new(65536, false));
i += 1;
};
if axis == 0 {
let res_tensor = TensorTrait::new(
shape: array![1, data_len].span(), data: result.span()
);
tensor_with_bias =
TensorTrait::concat(tensors: array![x_feature, res_tensor].span(), axis: axis);
} else {
let res_tensor = TensorTrait::new(
shape: array![data_len, 1].span(), data: result.span()
);
tensor_with_bias =
TensorTrait::concat(tensors: array![x_feature, res_tensor].span(), axis: axis);
}
}
if x_feature.shape.len() == 1 {
let mut j: u32 = 0;
loop {
match x_feature_.data.pop_front() {
Option::Some(x_val) => {
result.append(*x_val);
j += 1;
},
Option::None(_) => { break; }
};
};
result.append(FixedTrait::new(65536, false));
tensor_with_bias =
TensorTrait::<FP16x16>::new(shape: array![result.len()].span(), data: result.span());
}
return tensor_with_bias;
}
fn decorrelate_x_features(x_feature_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let mut input_tensor = x_feature_data;
let mut i: u32 = 0;
loo |
p {
if i >= *x_feature_data.shape.at(0) {
break ();
}
let mut placeholder = ArrayTrait::<FP16x16>::new();
let mut feature_row_values = get_tensor_data_by_row(input_tensor, i);
let mut feature_squared = feature_row_values.matmul(@feature_row_values);
if *feature_squared.data.at(0) == FixedTrait::new(0, false) {
feature_squared =
TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
}
let mut j: u32 = i + 1;
loop {
if j >= *x_feature_data.shape.at(0) {
break ();
}
let mut remaining_tensor_values = get_tensor_data_by_row(input_tensor, j);
let feature_cross_product = feature_row_values.matmul(@remaining_tensor_values);
let feature_gradients = feature_cross_product / feature_squared;
remaining_tensor_values = remaining_tensor_values
- (feature_row_values
* feature_gradients);
let mut k: u32 = 0;
loop {
if k >= remaining_tensor_values.data.len() {
break ();
}
placeholder.append(*remaining_tensor_values.data.at(k));
k += 1;
};
j += 1;
};
let mut decorrelated_tensor = TensorTrait::new(
shape: array![*x_feature_data.shape.at(0) - 1 - i, *x_feature_data.shape.at(1)].span(),
data: placeholder.span()
);
let mut original_tensor = input_tensor
.slice(
starts: array![0, 0].span(),
ends: array![i + 1, *x_feature_data.shape.at(1)].span(),
axes: Option::None(()),
steps: Option::Some(array![1, 1].span())
);
input_tensor =
TensorTrait::concat(
tensors: array![original_ |
tensor, decorrelated_tensor].span(), axis: 0
);
i += 1;
};
return input_tensor;
}
fn compute_gradients(
decorrelated_x_features: Tensor<FP16x16>,
y_values: Tensor<FP16x16>,
original_x_tensor_values: Tensor<FP16x16>
) -> Tensor<FP16x16> {
let mut gradient_values_flipped = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
let mut result = ArrayTrait::<FP16x16>::new();
let mut tensor_y_vals = y_values;
let mut i: u32 = *decorrelated_x_features.shape.at(0);
loop {
if i <= 0 {
break ();
}
let index_val = i - 1;
let mut decorelated_feature_row_values = get_tensor_data_by_row(
decorrelated_x_features, index_val
);
let mut decorelated_features_squared = decorelated_feature_row_values
.matmul(@decorelated_feature_row_values);
let mut feature_label_cross_product = tensor_y_vals
.matmul(@decorelated_feature_row_values);
if *decorelated_features_squared.data.at(0) == FixedTrait::new(0, false) {
decorelated_features_squared =
TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
}
let mut single_gradient_value = feature_label_cross_product
/ decorelated_features_squared;
result.append(*single_gradient_value.data.at(0));
let mut original_x_tensor_row_values = get_tensor_data_by_row(
original_x_tensor_values, index_val
);
tensor_y_vals = tensor_y_vals
- (original_x_tensor_row_values
* single_gradient_value);
i -= 1;
};
let final_gradients = TensorTrait::new(
shape: array![*decorrelated_x_features.shape.at(0)].span(), data: result.span()
);
let mut reverse_grad_array = ArrayTrait::<FP16x16>::new();
let m |
ut data_len: u32 = final_gradients.data.len();
loop {
if data_len <= 0 {
break ();
}
let temp_val = data_len - 1;
reverse_grad_array.append(*final_gradients.data.at(temp_val));
data_len -= 1;
};
let gradient_values_flipped = TensorTrait::<
FP16x16
>::new(shape: array![reverse_grad_array.len()].span(), data: reverse_grad_array.span());
return gradient_values_flipped;
} |
use debug::PrintTrait;
use array::{ArrayTrait, SpanTrait};
use multiple_linear_regresion::datasets::aave_data::aave_x_features::aave_x_features;
use multiple_linear_regresion::datasets::aave_data::aave_y_labels::aave_y_labels;
use multiple_linear_regresion::datasets::user_inputs_data::aave_weth_revenue_data_input::{aave_weth_revenue_data_input };
use multiple_linear_regresion::model::multiple_linear_regression_model::{
MultipleLinearRegressionModel, MultipleLinearRegression, MultipleLinearRegressionModelTrait
};
use multiple_linear_regresion::data_preprocessing::{Dataset, DatasetTrait};
use multiple_linear_regresion::helper_functions::{get_tensor_data_by_row, transpose_tensor, calculate_mean ,
calculate_r_score, normalize_user_x_inputs, rescale_predictions};
use orion::numbers::{FP16x16, FixedTrait};
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd,
FP16x16TensorSub, FP16x16TensorAdd, FP16x16TensorDiv, FP16x16TensorMul}; |
fn multiple_linear_regression_test() {
let mut main_x_vals = aave_x_features();
let mut main_y_vals = aave_y_labels();
let mut dataset = Dataset{x_values: main_x_vals,y_values:main_y_vals};
let mut normalized_dataset = dataset.normalize_dataset();
let mut model = MultipleLinearRegression(normalized_dataset);
let mut model_coefficients = model.coefficients;
let mut reconstructed_ys = model.predict (normalized_dataset.x_values);
let mut r_squared_score = calculate_r_score(normalized_dataset.y_values,reconstructed_ys);
r_squared_score.print();
assert(normalized_dataset.x_values.max_in_tensor() <= FixedTrait::new(65536, false), 'normalized x not between 0-1');
assert(normalized_dataset.x_values.min_in_tensor() >= FixedTrait::new(0, false), 'normalized x not between 0-1');
assert(normalized_dataset.y_values.max_in_tensor() <= FixedTrait::new(65536, false), 'normalized y not between 0-1');
assert(normalized_dataset.x_values.min_in_tensor() >= FixedTrait::new(0, false), 'normalized y not between 0-1');
assert(normalized_dataset.x_values.data.len()== main_x_vals.data.len() &&
normalized_dataset.y_values.data.len()== main_y_vals.data.len() , 'normalized data shape mismatch');
assert(model.coefficients.data.len() == *main_x_vals.shape.at(1)+1, 'coefficient data shape mismatch');
assert(r_squared_score >= FixedTrait::new(62259, false), 'AAVE model acc. less than 95%');
let last_7_days_aave_data = aave_weth_revenue_data_input();
let last_7_days_aave_data_normalized = normalize_user_x_inputs(last_7_days_aave_data, main_x_vals );
let mut forecast_results = model.predict (last_7_days_aave_data_normalized);
let mut rescale_forecasts = rescale_predictions(forecast_results, main_y_vals);
(*rescale_forecasts.data.at(0)).print();
(*rescale_forecasts.data.at(1)).print();
(*rescale_forecasts.data.at(2)).print();
(*rescale_forecasts.data.at(5)).print();
(*rescale_forecasts.data.at(6)).print();
} |
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd, FP16x16TensorSub, FP16x16TensorAdd,
FP16x16TensorDiv, FP16x16TensorMul
};
use orion::numbers::{FP16x16, FixedTrait};
use multiple_linear_regresion::helper_functions::{
get_tensor_data_by_row, transpose_tensor, calculate_mean, calculate_r_score,
normalize_user_x_inputs, rescale_predictions
}; |
struct Dataset {
x_values: Tensor<FP16x16>,
y_values: Tensor<FP16x16>,
}
impl DataPreprocessing of DatasetTrait {
fn normalize_dataset(ref self: Dataset) -> Dataset {
let mut x_values = TensorTrait::<FP16x16>::new(array![1].span(), array![FixedTrait::new(0, false)].span());
let mut y_values = TensorTrait::<FP16x16>::new(array![1].span(), array![FixedTrait::new(0, false)].span());
if self.x_values.shape.len() > 1 {
x_values = normalize_feature_data(self.x_values);
y_values = normalize_label_data(self.y_values);
}
if self.x_values.shape.len() == 1 {
x_values = normalize_label_data(self.x_values);
y_values = normalize_label_data(self.y_values);
}
return Dataset { x_values, y_values };
}
}
fn normalize_feature_data(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let mut x_min_array = ArrayTrait::<FP16x16>::new();
let mut x_max_array = ArrayTrait::<FP16x16>::new();
let mut x_range_array = ArrayTrait::<FP16x16>::new();
let mut normalized_array = ArrayTrait::<FP16x16>::new();
let transposed_tensor = tensor_data.transpose(axes: array![1, 0].span());
let tensor_shape = transposed_tensor.shape;
let tensor_row_len = *tensor_shape.at(0);
let tensor_column_len = *tensor_shape.at(1);
let mut i: u32 = 0;
loop {
if i >= tensor_row_len {
break ();
}
let mut transposed_tensor_row = get_tensor_data_by_row(transposed_tensor, i);
x_max_array.append(transposed_tensor_row.max_in_tensor());
x_min_array.append(transposed_tensor_row.min_in_tensor());
x_range_array
.append(transposed_tensor_row.max_in_tensor() - transposed_tensor_row.min_in_tensor());
i += 1;
};
let mut x_min = TensorTrait::<
FP16x16
>::new(shape: array![1, tensor_row_len].span(), data: x_min_array.span());
let mut x_range = TensorTrait::<
FP16x16
>::new(shape |
: array![1, tensor_row_len].span(), data: x_range_array.span());
let normalized_tensor = (tensor_data - x_min) / x_range;
return normalized_tensor;
}
fn normalize_label_data(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let mut tensor_data_ = tensor_data;
let mut normalized_array = ArrayTrait::<FP16x16>::new();
let mut range = tensor_data.max_in_tensor() - tensor_data.min_in_tensor();
let mut i: u32 = 0;
loop {
match tensor_data_.data.pop_front() {
Option::Some(tensor_val) => {
let mut diff = *tensor_val - tensor_data.min_in_tensor();
normalized_array.append(diff / range);
i += 1;
},
Option::None(_) => { break; }
};
};
let mut normalized_tensor = TensorTrait::<
FP16x16
>::new(shape: array![tensor_data.data.len()].span(), data: normalized_array.span());
return normalized_tensor;
} |
mod boston_data;
mod user_inputs_data;
|
mod boston_x_features;
mod boston_y_labels;
|
use array::ArrayTrait;
use orion::numbers::fixed_point::implementations::fp16x16::core::{FP16x16Impl, FP16x16PartialEq };
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};
use orion::numbers::{FP16x16, FixedTrait};
fn boston_x_features() -> Tensor<FP16x16> {
let tensor = TensorTrait::<FP16x16>::new(
shape: array![50,11].span(),
data: array![
FixedTrait::new(26719, false ),
FixedTrait::new(0, false ),
FixedTrait::new(406323, false ),
FixedTrait::new(65536, false ),
FixedTrait::new(33226, false ),
FixedTrait::new(403963, false ),
FixedTrait::new(5983436, false ),
FixedTrait::new(199753, false ),
FixedTrait::new(524288, false ),
FixedTrait::new(20119552, false ),
FixedTrait::new(1140326, false ),
FixedTrait::new(17588, false ),
FixedTrait::new(0, false ),
FixedTrait::new(635043, false ),
FixedTrait::new(0, false ),
FixedTrait::new(38338, false ),
FixedTrait::new(379715, false ),
FixedTrait::new(4626841, false ),
FixedTrait::new(189575, false ),
FixedTrait::new(393216, false ),
FixedTrait::new(25624576, false ),
FixedTrait::new(1258291, false ),
FixedTrait::new(3512, false ),
FixedTrait::new(1376256, false ),
FixedTrait::new(369623, false ),
FixedTrait::new(0, false ),
FixedTrait::new(28770, false ),
FixedTrait::new(426704, false ),
FixedTrait::new(1382809, false ),
FixedTrait::new(446608, false ),
FixedTrait::new(262144, false ),
FixedTrait::new(15925248, false ),
FixedTrait::new(1101004, false ),
FixedTrait::new(731407, false ),
FixedTrait::new(0, false ),
FixedTrait::new(1186201, false ),
FixedTrait::new(0, false ),
FixedTrait::new(48496, false ),
FixedTrait::new(434438, false ),
FixedTrait::new(6199705, false ),
FixedTrait::new(139244, false ),
FixedTrait::new(1572864, false ),
FixedTrait::new(43646976, false ),
FixedTrait::new(1323827, false ),
FixedTrait::new(151643, false ),
FixedTrait::new(0, f |
alse ),
FixedTrait::new(1283194, false ),
FixedTrait::new(0, false ),
FixedTrait::new(39649, false ),
FixedTrait::new(385351, false ),
FixedTrait::new(6376652, false ),
FixedTrait::new(156545, false ),
FixedTrait::new(327680, false ),
FixedTrait::new(26411008, false ),
FixedTrait::new(963379, false ),
FixedTrait::new(637283, false ),
FixedTrait::new(0, false ),
FixedTrait::new(1186201, false ),
FixedTrait::new(0, false ),
FixedTrait::new(48496, false ),
FixedTrait::new(419823, false ),
FixedTrait::new(6370099, false ),
FixedTrait::new(135338, false ),
FixedTrait::new(1572864, false ),
FixedTrait::new(43646976, false ),
FixedTrait::new(1323827, false ),
FixedTrait::new(6860, false ),
FixedTrait::new(2621440, false ),
FixedTrait::new(420085, false ),
FixedTrait::new(65536, false ),
FixedTrait::new(29294, false ),
FixedTrait::new(476250, false ),
FixedTrait::new(3211264, false ),
FixedTrait::new(313733, false ),
FixedTrait::new(262144, false ),
FixedTrait::new(16646144, false ),
FixedTrait::new(1153433, false ),
FixedTrait::new(1598672, false ),
FixedTrait::new(0, false ),
FixedTrait::new(1186201, false ),
FixedTrait::new(0, false ),
FixedTrait::new(45875, false ),
FixedTrait::new(304873, false ),
FixedTrait::new(6553600, false ),
FixedTrait::new(96154, false ),
FixedTrait::new(1572864, false ),
FixedTrait::new(43646976, false ),
FixedTrait::new(1323827, false ),
FixedTrait::new(264661, false ),
FixedTrait::new(0, false ),
FixedTrait::new(1186201, false ),
FixedTrait::new(0, false ),
FixedTrait::new(34865, false ),
FixedTrait::new(408223, false ),
FixedTrait::new(5944115, false ),
FixedTrait::new(203115, false ),
FixedTrait::new(1572864, false ),
FixedTrait::new(43646976, false ),
FixedTrait::new(1323827, false ),
FixedTrait::new(10624, false ),
FixedTrait::new(1310720, false ),
FixedTrait::new(4 |
56130, false ),
FixedTrait::new(0, false ),
FixedTrait::new(30408, false ),
FixedTrait::new(408944, false ),
FixedTrait::new(1068236, false ),
FixedTrait::new(290258, false ),
FixedTrait::new(196608, false ),
FixedTrait::new(14614528, false ),
FixedTrait::new(1218969, false ),
FixedTrait::new(6768, false ),
FixedTrait::new(1638400, false ),
FixedTrait::new(336199, false ),
FixedTrait::new(0, false ),
FixedTrait::new(29687, false ),
FixedTrait::new(388431, false ),
FixedTrait::new(3093299, false ),
FixedTrait::new(454295, false ),
FixedTrait::new(524288, false ),
FixedTrait::new(18612224, false ),
FixedTrait::new(1291059, false ),
FixedTrait::new(40077, false ),
FixedTrait::new(1310720, false ),
FixedTrait::new(260177, false ),
FixedTrait::new(0, false ),
FixedTrait::new(42401, false ),
FixedTrait::new(570425, false ),
FixedTrait::new(5695078, false ),
FixedTrait::new(118030, false ),
FixedTrait::new(327680, false ),
FixedTrait::new(17301504, false ),
FixedTrait::new(851968, false ),
FixedTrait::new(527944, false ),
FixedTrait::new(0, false ),
FixedTrait::new(1186201, false ),
FixedTrait::new(0, false ),
FixedTrait::new(38273, false ),
FixedTrait::new(355663, false ),
FixedTrait::new(6252134, false ),
FixedTrait::new(159239, false ),
FixedTrait::new(1572864, false ),
FixedTrait::new(43646976, false ),
FixedTrait::new(1323827, false ),
FixedTrait::new(14030, false ),
FixedTrait::new(1441792, false ),
FixedTrait::new(384040, false ),
FixedTrait::new(0, false ),
FixedTrait::new(28246, false ),
FixedTrait::new(421920, false ),
FixedTrait::new(583270, false ),
FixedTrait::new(484750, false ),
FixedTrait::new(458752, false ),
FixedTrait::new(21626880, false ),
FixedTrait::new(1251737, false ),
FixedTrait::new(22353, false ),
FixedTrait::new(0, false ),
FixedTrait::new(483655, false ),
FixedTrait::new |
(0, false ),
FixedTrait::new(32309, false ),
FixedTrait::new(420413, false ),
FixedTrait::new(2627993, false ),
FixedTrait::new(309402, false ),
FixedTrait::new(327680, false ),
FixedTrait::new(18808832, false ),
FixedTrait::new(1284505, false ),
FixedTrait::new(51800, false ),
FixedTrait::new(0, false ),
FixedTrait::new(648806, false ),
FixedTrait::new(0, false ),
FixedTrait::new(35651, false ),
FixedTrait::new(401211, false ),
FixedTrait::new(3460300, false ),
FixedTrait::new(173034, false ),
FixedTrait::new(262144, false ),
FixedTrait::new(19922944, false ),
FixedTrait::new(1205862, false ),
FixedTrait::new(1998, false ),
FixedTrait::new(3604480, false ),
FixedTrait::new(247726, false ),
FixedTrait::new(0, false ),
FixedTrait::new(31719, false ),
FixedTrait::new(450494, false ),
FixedTrait::new(1841561, false ),
FixedTrait::new(423716, false ),
FixedTrait::new(327680, false ),
FixedTrait::new(24248320, false ),
FixedTrait::new(1153433, false ),
FixedTrait::new(22898, false ),
FixedTrait::new(0, false ),
FixedTrait::new(648806, false ),
FixedTrait::new(0, false ),
FixedTrait::new(35651, false ),
FixedTrait::new(391380, false ),
FixedTrait::new(5026611, false ),
FixedTrait::new(203325, false ),
FixedTrait::new(262144, false ),
FixedTrait::new(19922944, false ),
FixedTrait::new(1205862, false ),
FixedTrait::new(24195, false ),
FixedTrait::new(0, false ),
FixedTrait::new(648806, false ),
FixedTrait::new(0, false ),
FixedTrait::new(35651, false ),
FixedTrait::new(430374, false ),
FixedTrait::new(5721292, false ),
FixedTrait::new(236080, false ),
FixedTrait::new(262144, false ),
FixedTrait::new(19922944, false ),
FixedTrait::new(1205862, false ),
FixedTrait::new(623485, false ),
FixedTrait::new(0, false ),
FixedTrait::new(1186201, false ),
FixedTrait::new(0, false ),
FixedTrait::new(46727, false |
),
FixedTrait::new(440926, false ),
FixedTrait::new(6166937, false ),
FixedTrait::new(163584, false ),
FixedTrait::new(1572864, false ),
FixedTrait::new(43646976, false ),
FixedTrait::new(1323827, false ),
FixedTrait::new(52606, false ),
FixedTrait::new(0, false ),
FixedTrait::new(533463, false ),
FixedTrait::new(0, false ),
FixedTrait::new(35258, false ),
FixedTrait::new(357564, false ),
FixedTrait::new(2398617, false ),
FixedTrait::new(248807, false ),
FixedTrait::new(262144, false ),
FixedTrait::new(20119552, false ),
FixedTrait::new(1376256, false ),
FixedTrait::new(3709, false ),
FixedTrait::new(0, false ),
FixedTrait::new(223477, false ),
FixedTrait::new(0, false ),
FixedTrait::new(32047, false ),
FixedTrait::new(459210, false ),
FixedTrait::new(5655756, false ),
FixedTrait::new(224244, false ),
FixedTrait::new(131072, false ),
FixedTrait::new(17694720, false ),
FixedTrait::new(1166540, false ),
FixedTrait::new(28554, false ),
FixedTrait::new(0, false ),
FixedTrait::new(694026, false ),
FixedTrait::new(65536, false ),
FixedTrait::new(32047, false ),
FixedTrait::new(350224, false ),
FixedTrait::new(6553600, false ),
FixedTrait::new(253952, false ),
FixedTrait::new(262144, false ),
FixedTrait::new(18153472, false ),
FixedTrait::new(1218969, false ),
FixedTrait::new(34087, false ),
FixedTrait::new(1310720, false ),
FixedTrait::new(260177, false ),
FixedTrait::new(0, false ),
FixedTrait::new(42401, false ),
FixedTrait::new(550371, false ),
FixedTrait::new(5996544, false ),
FixedTrait::new(149979, false ),
FixedTrait::new(327680, false ),
FixedTrait::new(17301504, false ),
FixedTrait::new(851968, false ),
FixedTrait::new(802632, false ),
FixedTrait::new(0, false ),
FixedTrait::new(1186201, false ),
FixedTrait::new(0, false ),
FixedTrait::new(38273, false ),
FixedTrait::new(382533, false ), |
FixedTrait::new(3912499, false ),
FixedTrait::new(130914, false ),
FixedTrait::new(1572864, false ),
FixedTrait::new(43646976, false ),
FixedTrait::new(1323827, false ),
FixedTrait::new(17654, false ),
FixedTrait::new(0, false ),
FixedTrait::new(648806, false ),
FixedTrait::new(0, false ),
FixedTrait::new(35651, false ),
FixedTrait::new(410648, false ),
FixedTrait::new(5426380, false ),
FixedTrait::new(213830, false ),
FixedTrait::new(262144, false ),
FixedTrait::new(19922944, false ),
FixedTrait::new(1205862, false ),
FixedTrait::new(2988, false ),
FixedTrait::new(0, false ),
FixedTrait::new(910295, false ),
FixedTrait::new(65536, false ),
FixedTrait::new(36044, false ),
FixedTrait::new(385875, false ),
FixedTrait::new(3670016, false ),
FixedTrait::new(203954, false ),
FixedTrait::new(327680, false ),
FixedTrait::new(18087936, false ),
FixedTrait::new(1074790, false ),
FixedTrait::new(3787, false ),
FixedTrait::new(0, false ),
FixedTrait::new(161218, false ),
FixedTrait::new(0, false ),
FixedTrait::new(31981, false ),
FixedTrait::new(457441, false ),
FixedTrait::new(3827302, false ),
FixedTrait::new(185401, false ),
FixedTrait::new(196608, false ),
FixedTrait::new(12648448, false ),
FixedTrait::new(1166540, false ),
FixedTrait::new(54084, false ),
FixedTrait::new(1310720, false ),
FixedTrait::new(260177, false ),
FixedTrait::new(0, false ),
FixedTrait::new(42401, false ),
FixedTrait::new(480182, false ),
FixedTrait::new(6193152, false ),
FixedTrait::new(136236, false ),
FixedTrait::new(327680, false ),
FixedTrait::new(17301504, false ),
FixedTrait::new(851968, false ),
FixedTrait::new(227690, false ),
FixedTrait::new(0, false ),
FixedTrait::new(1186201, false ),
FixedTrait::new(65536, false ),
FixedTrait::new(47054, false ),
FixedTrait::new(575406, false ),
FixedTrait::new(5432934, false ), |
FixedTrait::new(124826, false ),
FixedTrait::new(1572864, false ),
FixedTrait::new(43646976, false ),
FixedTrait::new(1323827, false ),
FixedTrait::new(35685, false ),
FixedTrait::new(0, false ),
FixedTrait::new(1434583, false ),
FixedTrait::new(0, false ),
FixedTrait::new(40894, false ),
FixedTrait::new(403111, false ),
FixedTrait::new(6415974, false ),
FixedTrait::new(109359, false ),
FixedTrait::new(262144, false ),
FixedTrait::new(28639232, false ),
FixedTrait::new(1389363, false ),
FixedTrait::new(9209, false ),
FixedTrait::new(0, false ),
FixedTrait::new(694026, false ),
FixedTrait::new(0, false ),
FixedTrait::new(32047, false ),
FixedTrait::new(417792, false ),
FixedTrait::new(2116812, false ),
FixedTrait::new(258565, false ),
FixedTrait::new(262144, false ),
FixedTrait::new(18153472, false ),
FixedTrait::new(1218969, false ),
FixedTrait::new(3069, false ),
FixedTrait::new(0, false ),
FixedTrait::new(223477, false ),
FixedTrait::new(0, false ),
FixedTrait::new(32047, false ),
FixedTrait::new(420544, false ),
FixedTrait::new(4331929, false ),
FixedTrait::new(202656, false ),
FixedTrait::new(131072, false ),
FixedTrait::new(17694720, false ),
FixedTrait::new(1166540, false ),
FixedTrait::new(4016, false ),
FixedTrait::new(1310720, false ),
FixedTrait::new(218234, false ),
FixedTrait::new(65536, false ),
FixedTrait::new(29025, false ),
FixedTrait::new(501022, false ),
FixedTrait::new(3257139, false ),
FixedTrait::new(341567, false ),
FixedTrait::new(327680, false ),
FixedTrait::new(14155776, false ),
FixedTrait::new(976486, false ),
FixedTrait::new(63974, false ),
FixedTrait::new(0, false ),
FixedTrait::new(1434583, false ),
FixedTrait::new(0, false ),
FixedTrait::new(40894, false ),
FixedTrait::new(377290, false ),
FixedTrait::new(6448742, false ),
FixedTrait::new(153747, false ),
FixedT |
rait::new(262144, false ),
FixedTrait::new(28639232, false ),
FixedTrait::new(1389363, false ),
FixedTrait::new(14556, false ),
FixedTrait::new(0, false ),
FixedTrait::new(656015, false ),
FixedTrait::new(0, false ),
FixedTrait::new(35848, false ),
FixedTrait::new(399245, false ),
FixedTrait::new(6252134, false ),
FixedTrait::new(166985, false ),
FixedTrait::new(393216, false ),
FixedTrait::new(28311552, false ),
FixedTrait::new(1166540, false ),
FixedTrait::new(11358, false ),
FixedTrait::new(0, false ),
FixedTrait::new(635043, false ),
FixedTrait::new(0, false ),
FixedTrait::new(38338, false ),
FixedTrait::new(374013, false ),
FixedTrait::new(3538944, false ),
FixedTrait::new(156087, false ),
FixedTrait::new(393216, false ),
FixedTrait::new(25624576, false ),
FixedTrait::new(1258291, false ),
FixedTrait::new(3758, false ),
FixedTrait::new(0, false ),
FixedTrait::new(294256, false ),
FixedTrait::new(0, false ),
FixedTrait::new(29425, false ),
FixedTrait::new(434503, false ),
FixedTrait::new(3676569, false ),
FixedTrait::new(290829, false ),
FixedTrait::new(196608, false ),
FixedTrait::new(16187392, false ),
FixedTrait::new(1212416, false ),
FixedTrait::new(11143, false ),
FixedTrait::new(819200, false ),
FixedTrait::new(515768, false ),
FixedTrait::new(0, false ),
FixedTrait::new(34340, false ),
FixedTrait::new(393478, false ),
FixedTrait::new(5629542, false ),
FixedTrait::new(432019, false ),
FixedTrait::new(327680, false ),
FixedTrait::new(20381696, false ),
FixedTrait::new(996147, false ),
FixedTrait::new(2121, false ),
FixedTrait::new(0, false ),
FixedTrait::new(142868, false ),
FixedTrait::new(0, false ),
FixedTrait::new(30015, false ),
FixedTrait::new(458620, false ),
FixedTrait::new(3001548, false ),
FixedTrait::new(397292, false ),
FixedTrait::new(196608, false ),
FixedTrait::new(1454 |
8992, false ),
FixedTrait::new(1225523, false ),
FixedTrait::new(10443, false ),
FixedTrait::new(0, false ),
FixedTrait::new(452853, false ),
FixedTrait::new(0, false ),
FixedTrait::new(29360, false ),
FixedTrait::new(407044, false ),
FixedTrait::new(425984, false ),
FixedTrait::new(374924, false ),
FixedTrait::new(196608, false ),
FixedTrait::new(15269888, false ),
FixedTrait::new(1173094, false ),
FixedTrait::new(3057, false ),
FixedTrait::new(5242880, false ),
FixedTrait::new(99614, false ),
FixedTrait::new(0, false ),
FixedTrait::new(26476, false ),
FixedTrait::new(465764, false ),
FixedTrait::new(2398617, false ),
FixedTrait::new(479002, false ),
FixedTrait::new(131072, false ),
FixedTrait::new(21561344, false ),
FixedTrait::new(825753, false ),
FixedTrait::new(2325, false ),
FixedTrait::new(5242880, false ),
FixedTrait::new(238551, false ),
FixedTrait::new(0, false ),
FixedTrait::new(25690, false ),
FixedTrait::new(385089, false ),
FixedTrait::new(1251737, false ),
FixedTrait::new(604261, false ),
FixedTrait::new(65536, false ),
FixedTrait::new(20643840, false ),
FixedTrait::new(1074790, false ),
FixedTrait::new(4601, false ),
FixedTrait::new(0, false ),
FixedTrait::new(265420, false ),
FixedTrait::new(0, false ),
FixedTrait::new(33423, false ),
FixedTrait::new(394526, false ),
FixedTrait::new(3093299, false ),
FixedTrait::new(232973, false ),
FixedTrait::new(327680, false ),
FixedTrait::new(19398656, false ),
FixedTrait::new(1087897, false ),
FixedTrait::new(2816, false ),
FixedTrait::new(3440640, false ),
FixedTrait::new(348651, false ),
FixedTrait::new(0, false ),
FixedTrait::new(26542, false ),
FixedTrait::new(430243, false ),
FixedTrait::new(1500774, false ),
FixedTrait::new(479540, false ),
FixedTrait::new(393216, false ),
FixedTrait::new(19202048, false ),
FixedTrait::new(1087897, |
false ),
FixedTrait::new(90963, false ),
FixedTrait::new(0, false ),
FixedTrait::new(533463, false ),
FixedTrait::new(0, false ),
FixedTrait::new(35258, false ),
FixedTrait::new(389939, false ),
FixedTrait::new(5373952, false ),
FixedTrait::new(261488, false ),
FixedTrait::new(262144, false ),
FixedTrait::new(20119552, false ),
FixedTrait::new(1376256, false ),
FixedTrait::new(12499, false ),
FixedTrait::new(1441792, false ),
FixedTrait::new(384040, false ),
FixedTrait::new(0, false ),
FixedTrait::new(28246, false ),
FixedTrait::new(440270, false ),
FixedTrait::new(1146880, false ),
FixedTrait::new(512917, false ),
FixedTrait::new(458752, false ),
FixedTrait::new(21626880, false ),
FixedTrait::new(1251737, false ),
FixedTrait::new(12805, false ),
FixedTrait::new(0, false ),
FixedTrait::new(708444, false ),
FixedTrait::new(0, false ),
FixedTrait::new(27066, false ),
FixedTrait::new(409272, false ),
FixedTrait::new(406323, false ),
FixedTrait::new(346508, false ),
FixedTrait::new(262144, false ),
FixedTrait::new(19988480, false ),
FixedTrait::new(1258291, false ),
FixedTrait::new(758769, false ),
FixedTrait::new(0, false ),
FixedTrait::new(1186201, false ),
FixedTrait::new(0, false ),
FixedTrait::new(45875, false ),
FixedTrait::new(330039, false ),
FixedTrait::new(6356992, false ),
FixedTrait::new(115998, false ),
FixedTrait::new(1572864, false ),
FixedTrait::new(43646976, false ),
FixedTrait::new(1323827, false ),
FixedTrait::new(2369, false ),
FixedTrait::new(5242880, false ),
FixedTrait::new(324403, false ),
FixedTrait::new(0, false ),
FixedTrait::new(26935, false ),
FixedTrait::new(434503, false ),
FixedTrait::new(1533542, false ),
FixedTrait::new(335328, false ),
FixedTrait::new(262144, false ),
FixedTrait::new(16056320, false ),
FixedTrait::new(1258291, false ),
].span()
);
return tensor; |
} |
use array::ArrayTrait;
use orion::numbers::fixed_point::implementations::fp16x16::core::{FP16x16Impl, FP16x16PartialEq };
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};
use orion::numbers::{FP16x16, FixedTrait};
fn boston_y_labels() -> Tensor<FP16x16> {
let tensor = TensorTrait::<FP16x16>::new(
shape: array![50].span(),
data: array![
FixedTrait::new(1422131, false ),
FixedTrait::new(1199308, false ),
FixedTrait::new(1638400, false ),
FixedTrait::new(878182, false ),
FixedTrait::new(1251737, false ),
FixedTrait::new(1120665, false ),
FixedTrait::new(2175795, false ),
FixedTrait::new(688128, false ),
FixedTrait::new(1284505, false ),
FixedTrait::new(1651507, false ),
FixedTrait::new(1284505, false ),
FixedTrait::new(3276800, false ),
FixedTrait::new(904396, false ),
FixedTrait::new(1625292, false ),
FixedTrait::new(1638400, false ),
FixedTrait::new(1448345, false ),
FixedTrait::new(2044723, false ),
FixedTrait::new(1330380, false ),
FixedTrait::new(1559756, false ),
FixedTrait::new(976486, false ),
FixedTrait::new(1323827, false ),
FixedTrait::new(1546649, false ),
FixedTrait::new(1310720, false ),
FixedTrait::new(3198156, false ),
FixedTrait::new(668467, false ),
FixedTrait::new(1415577, false ),
FixedTrait::new(1526988, false ),
FixedTrait::new(2437939, false ),
FixedTrait::new(2031616, false ),
FixedTrait::new(1435238, false ),
FixedTrait::new(1166540, false ),
FixedTrait::new(1841561, false ),
FixedTrait::new(1481113, false ),
FixedTrait::new(3014656, false ),
FixedTrait::new(1022361, false ),
FixedTrait::new(1225523, false ),
FixedTrait::new(1428684, false ),
FixedTrait::new(1743257, false ),
FixedTrait::new(1238630, false ),
FixedTrait::new(2188902, false ),
FixedTrait::new(1618739, false ),
FixedTrait::new(1985740, false ),
FixedTrait::new(1369702, false ),
FixedTrait::new(1520435, false ),
Fix |
edTrait::new(1625292, false ),
FixedTrait::new(865075, false ),
FixedTrait::new(1717043, false ),
FixedTrait::new(1533542, false ),
FixedTrait::new(635699, false ),
FixedTrait::new(1828454, false ),
].span()
);
return tensor;
} |
mod user_inputs_boston_data;
|
use array::ArrayTrait;
use orion::numbers::fixed_point::implementations::fp16x16::core::{FP16x16Impl, FP16x16PartialEq };
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};
use orion::numbers::{FP16x16, FixedTrait};
fn user_inputs_boston_data() -> Tensor<FP16x16> {
let tensor = TensorTrait::<FP16x16>::new(
shape: array![11].span(),
data: array![
FixedTrait::new(26719, false ),
FixedTrait::new(0, false ),
FixedTrait::new(406323, false ),
FixedTrait::new(65536, false ),
FixedTrait::new(33226, false ),
FixedTrait::new(403963, false ),
FixedTrait::new(5983436, false ),
FixedTrait::new(199753, false ),
FixedTrait::new(524288, false ),
FixedTrait::new(20119552, false ),
FixedTrait::new(1140326, false ),
].span()
);
return tensor;
} |
use debug::PrintTrait;
use array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd, FP16x16TensorSub, FP16x16TensorAdd,
FP16x16TensorDiv, FP16x16TensorMul
};
use orion::numbers::{FP16x16, FixedTrait};
fn get_tensor_data_by_row(tensor_data: Tensor<FP16x16>, row_index: u32,) -> Tensor<FP16x16> {
let column_len = *tensor_data.shape.at(1);
let mut result = ArrayTrait::<FP16x16>::new();
let mut i: u32 = 0;
loop {
if i >= column_len {
break ();
}
result.append(tensor_data.at(indices: array![row_index, i].span()));
i += 1;
};
let resultant_tensor = TensorTrait::<
FP16x16
>::new(array![column_len].span(), data: result.span());
return resultant_tensor;
}
fn transpose_tensor(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let tensor_transposed = tensor_data.transpose(axes: array![1, 0].span());
return tensor_transposed;
}
fn calculate_mean(tensor_data: Tensor<FP16x16>) -> FP16x16 {
let tensor_size = FixedTrait::<FP16x16>::new_unscaled(tensor_data.data.len(), false);
let cumulated_sum = tensor_data.cumsum(0, Option::None(()), Option::None(()));
let sum_result = cumulated_sum.data[tensor_data.data.len() - 1];
let mean = *sum_result / tensor_size;
return mean;
}
fn calculate_r_score(Y_values: Tensor<FP16x16>, Y_pred_values: Tensor<FP16x16>) -> FP16x16 {
let mut Y_values_ = Y_values;
let mean_y_value = calculate_mean(Y_values);
let mut squared_diff_shape = array::ArrayTrait::new();
squared_diff_shape.append(Y_values.data.len());
let mut squared_diff_vals = array::ArrayTrait::new();
let mut squared_mean_diff_shape = array::ArrayTrait::new();
squared_mean_diff_shape.append(Y_values.data.len());
let mut squared_mean_diff_vals = array::ArrayTrait::new();
let mut i: u32 = 0;
loop {
match Y_values_.data.pop_front() {
Option::Some(y_value) => { |
let diff_pred = *y_value - *Y_pred_values.data.at(i);
let squared_diff = diff_pred * diff_pred;
squared_diff_vals.append(squared_diff);
let diff_mean = *y_value - mean_y_value;
let squared_mean_diff = diff_mean * diff_mean;
squared_mean_diff_vals.append(squared_mean_diff);
i += 1;
},
Option::None(_) => { break; }
}
};
let squared_diff_tensor = TensorTrait::<
FP16x16
>::new(squared_diff_shape.span(), squared_diff_vals.span());
let squared_mean_diff_tensor = TensorTrait::<
FP16x16
>::new(squared_mean_diff_shape.span(), squared_mean_diff_vals.span());
let sum_squared_diff = squared_diff_tensor.cumsum(0, Option::None(()), Option::None(()));
let sum_squared_mean_diff = squared_mean_diff_tensor
.cumsum(0, Option::None(()), Option::None(()));
let r_score = FixedTrait::new_unscaled(1, false)
- *sum_squared_diff.data.at(Y_values.data.len() - 1)
/ *sum_squared_mean_diff.data.at(Y_values.data.len() - 1);
return r_score;
}
fn normalize_user_x_inputs(
x_inputs: Tensor<FP16x16>, original_x_values: Tensor<FP16x16>
) -> Tensor<FP16x16> {
let mut x_inputs_normalized = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
let mut x_min = ArrayTrait::<FP16x16>::new();
let mut x_max = ArrayTrait::<FP16x16>::new();
let mut x_range = ArrayTrait::<FP16x16>::new();
let mut result = ArrayTrait::<FP16x16>::new();
if original_x_values.shape.len() > 1 {
let transposed_tensor = original_x_values.transpose(axes: array![1, 0].span());
let data_len = *transposed_tensor.shape.at(0);
let mut i: u32 = 0;
loop {
if i >= data_len {
break ();
}
let mut transposed_tensor_row = get_tensor_data_by_row(transposed_tensor, i);
x_min.append(transposed_ |
tensor_row.min_in_tensor());
x_max.append(transposed_tensor_row.max_in_tensor());
x_range
.append(
transposed_tensor_row.max_in_tensor() - transposed_tensor_row.min_in_tensor()
);
i += 1;
};
let mut x_min_tensor = TensorTrait::new(shape: array![data_len].span(), data: x_min.span());
let mut x_max_tensor = TensorTrait::new(shape: array![data_len].span(), data: x_max.span());
let mut x_range_tensor = TensorTrait::new(
shape: array![data_len].span(), data: x_range.span()
);
if x_inputs.shape.len() > 1 {
let mut j: u32 = 0;
loop {
if j >= *x_inputs.shape.at(0) {
break ();
};
let mut row_data = get_tensor_data_by_row(x_inputs, j);
let mut norm_row_data = (row_data - x_min_tensor) / x_range_tensor;
let mut k: u32 = 0;
loop {
if k >= norm_row_data.data.len() {
break ();
};
result.append(*norm_row_data.data.at(k));
k += 1;
};
j += 1;
};
x_inputs_normalized =
TensorTrait::<
FP16x16
>::new(
array![*x_inputs.shape.at(0), *x_inputs.shape.at(1)].span(), data: result.span()
);
};
if x_inputs.shape.len() == 1 {
x_inputs_normalized = (x_inputs - x_min_tensor) / x_range_tensor;
};
}
if original_x_values.shape.len() == 1 {
let mut x_min_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![original_x_values.min_in_tensor()].span());
let mut x_max_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![original_x_values.max_in_tensor()].span() |
);
let mut x_range_tensor = TensorTrait::<
FP16x16
>::new(
shape: array![1].span(),
data: array![original_x_values.max_in_tensor() - original_x_values.min_in_tensor()]
.span()
);
let mut diff = ((x_inputs - x_min_tensor));
x_inputs_normalized = ((x_inputs - x_min_tensor)) / x_range_tensor;
};
return x_inputs_normalized;
}
fn rescale_predictions(
prediction_result: Tensor<FP16x16>, y_values: Tensor<FP16x16>
) -> Tensor<FP16x16> {
let mut rescale_predictions = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
let mut y_min_array = ArrayTrait::<FP16x16>::new();
let mut y_max_array = ArrayTrait::<FP16x16>::new();
let mut y_range_array = ArrayTrait::<FP16x16>::new();
let mut y_max = y_values.max_in_tensor();
let mut y_min = y_values.min_in_tensor();
let mut y_range = y_values.max_in_tensor() - y_values.min_in_tensor();
let y_min_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![y_min].span());
let y_max_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![y_max].span());
let y_range_tensor = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![y_range].span());
rescale_predictions = (prediction_result * y_range_tensor) + y_min_tensor;
return rescale_predictions;
} |
mod test;
mod data_preprocessing;
mod helper_functions;
mod datasets;
mod model; |
mod multiple_linear_regression_model;
|
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd, FP16x16TensorSub, FP16x16TensorAdd,
FP16x16TensorDiv, FP16x16TensorMul
};
use orion::numbers::{FP16x16, FixedTrait};
use multiple_linear_regresion::data_preprocessing::{Dataset, DatasetTrait};
use multiple_linear_regresion::helper_functions::{
get_tensor_data_by_row, transpose_tensor, calculate_mean, calculate_r_score,
normalize_user_x_inputs, rescale_predictions
}; |
struct MultipleLinearRegressionModel {
coefficients: Tensor<FP16x16>
}
impl RegressionOperation of MultipleLinearRegressionModelTrait {
fn predict(
ref self: MultipleLinearRegressionModel, feature_inputs: Tensor<FP16x16>
) -> Tensor<FP16x16> {
let mut prediction_result = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
let mut result = ArrayTrait::<FP16x16>::new();
if feature_inputs.shape.len() > 1 {
let feature_values = add_bias_term(feature_inputs, 1);
let mut data_len: u32 = *feature_values.shape.at(0);
let mut i: u32 = 0;
loop {
if i >= data_len {
break ();
}
let feature_row_values = get_tensor_data_by_row(feature_values, i);
let predicted_values = feature_row_values.matmul(@self.coefficients);
result.append(*predicted_values.data.at(0));
i += 1;
};
prediction_result =
TensorTrait::<
FP16x16
>::new(shape: array![result.len()].span(), data: result.span());
}
if feature_inputs.shape.len() == 1 && self.coefficients.data.len() > 1 {
let feature_values = add_bias_term(feature_inputs, 1);
prediction_result = feature_values.matmul(@self.coefficients);
}
return prediction_result;
}
}
fn MultipleLinearRegression(dataset: Dataset) -> MultipleLinearRegressionModel {
let x_values_tranposed = transpose_tensor(dataset.x_values);
let x_values_tranposed_with_bias = add_bias_term(x_values_tranposed, 0);
let decorrelated_x_features = decorrelate_x_features(x_values_tranposed_with_bias);
let coefficients = compute_gradients(
decorrelated_x_features, dataset.y_values, x_values_tranposed_with_bias
);
return MultipleLinearRegressionModel { coefficien |
ts };
}
fn add_bias_term(x_feature: Tensor<FP16x16>, axis: u32) -> Tensor<FP16x16> {
let mut x_feature_ = x_feature;
let mut tensor_with_bias = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
let mut result = ArrayTrait::<FP16x16>::new();
if x_feature.shape.len() > 1 {
let mut index: u32 = 0;
if axis == 1 {
index = 0;
} else {
index = 1;
}
let data_len = *x_feature.shape.at(index);
let mut i: u32 = 0;
loop {
if i >= data_len {
break ();
}
result
.append(FixedTrait::new(65536, false));
i += 1;
};
if axis == 0 {
let res_tensor = TensorTrait::new(
shape: array![1, data_len].span(), data: result.span()
);
tensor_with_bias =
TensorTrait::concat(tensors: array![x_feature, res_tensor].span(), axis: axis);
} else {
let res_tensor = TensorTrait::new(
shape: array![data_len, 1].span(), data: result.span()
);
tensor_with_bias =
TensorTrait::concat(tensors: array![x_feature, res_tensor].span(), axis: axis);
}
}
if x_feature.shape.len() == 1 {
let mut j: u32 = 0;
loop {
match x_feature_.data.pop_front() {
Option::Some(x_val) => {
result.append(*x_val);
j += 1;
},
Option::None(_) => { break; }
};
};
result.append(FixedTrait::new(65536, false));
tensor_with_bias =
TensorTrait::<FP16x16>::new(shape: array![result.len()].span(), data: result.span());
}
return tensor_with_bias;
}
fn decorrelate_x_features(x_feature_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let mut input_tensor = x_feature_data;
let mut i: u32 = 0;
loo |
p {
if i >= *x_feature_data.shape.at(0) {
break ();
}
let mut placeholder = ArrayTrait::<FP16x16>::new();
let mut feature_row_values = get_tensor_data_by_row(input_tensor, i);
let mut feature_squared = feature_row_values.matmul(@feature_row_values);
if *feature_squared.data.at(0) == FixedTrait::new(0, false) {
feature_squared =
TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
}
let mut j: u32 = i + 1;
loop {
if j >= *x_feature_data.shape.at(0) {
break ();
}
let mut remaining_tensor_values = get_tensor_data_by_row(input_tensor, j);
let feature_cross_product = feature_row_values.matmul(@remaining_tensor_values);
let feature_gradients = feature_cross_product / feature_squared;
remaining_tensor_values = remaining_tensor_values
- (feature_row_values
* feature_gradients);
let mut k: u32 = 0;
loop {
if k >= remaining_tensor_values.data.len() {
break ();
}
placeholder.append(*remaining_tensor_values.data.at(k));
k += 1;
};
j += 1;
};
let mut decorrelated_tensor = TensorTrait::new(
shape: array![*x_feature_data.shape.at(0) - 1 - i, *x_feature_data.shape.at(1)].span(),
data: placeholder.span()
);
let mut original_tensor = input_tensor
.slice(
starts: array![0, 0].span(),
ends: array![i + 1, *x_feature_data.shape.at(1)].span(),
axes: Option::None(()),
steps: Option::Some(array![1, 1].span())
);
input_tensor =
TensorTrait::concat(
tensors: array![original_ |
tensor, decorrelated_tensor].span(), axis: 0
);
i += 1;
};
return input_tensor;
}
fn compute_gradients(
decorrelated_x_features: Tensor<FP16x16>,
y_values: Tensor<FP16x16>,
original_x_tensor_values: Tensor<FP16x16>
) -> Tensor<FP16x16> {
let mut gradient_values_flipped = TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
let mut result = ArrayTrait::<FP16x16>::new();
let mut tensor_y_vals = y_values;
let mut i: u32 = *decorrelated_x_features.shape.at(0);
loop {
if i <= 0 {
break ();
}
let index_val = i - 1;
let mut decorelated_feature_row_values = get_tensor_data_by_row(
decorrelated_x_features, index_val
);
let mut decorelated_features_squared = decorelated_feature_row_values
.matmul(@decorelated_feature_row_values);
let mut feature_label_cross_product = tensor_y_vals
.matmul(@decorelated_feature_row_values);
if *decorelated_features_squared.data.at(0) == FixedTrait::new(0, false) {
decorelated_features_squared =
TensorTrait::<
FP16x16
>::new(shape: array![1].span(), data: array![FixedTrait::new(10, false)].span());
}
let mut single_gradient_value = feature_label_cross_product
/ decorelated_features_squared;
result.append(*single_gradient_value.data.at(0));
let mut original_x_tensor_row_values = get_tensor_data_by_row(
original_x_tensor_values, index_val
);
tensor_y_vals = tensor_y_vals
- (original_x_tensor_row_values
* single_gradient_value);
i -= 1;
};
let final_gradients = TensorTrait::new(
shape: array![*decorrelated_x_features.shape.at(0)].span(), data: result.span()
);
let mut reverse_grad_array = ArrayTrait::<FP16x16>::new();
let m |
ut data_len: u32 = final_gradients.data.len();
loop {
if data_len <= 0 {
break ();
}
let temp_val = data_len - 1;
reverse_grad_array.append(*final_gradients.data.at(temp_val));
data_len -= 1;
};
let gradient_values_flipped = TensorTrait::<
FP16x16
>::new(shape: array![reverse_grad_array.len()].span(), data: reverse_grad_array.span());
return gradient_values_flipped;
} |
use debug::PrintTrait;
use array::{ArrayTrait, SpanTrait};
use multiple_linear_regresion::datasets::boston_data::boston_x_features::boston_x_features;
use multiple_linear_regresion::datasets::boston_data::boston_y_labels::boston_y_labels;
use multiple_linear_regresion::datasets::user_inputs_data::user_inputs_boston_data::user_inputs_boston_data;
use orion::numbers::{FP16x16, FixedTrait};
use multiple_linear_regresion::model::multiple_linear_regression_model::{
MultipleLinearRegressionModel, MultipleLinearRegression, MultipleLinearRegressionModelTrait
};
use multiple_linear_regresion::data_preprocessing::{Dataset, DatasetTrait};
use multiple_linear_regresion::helper_functions::{get_tensor_data_by_row, transpose_tensor, calculate_mean ,
calculate_r_score, normalize_user_x_inputs, rescale_predictions};
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd,
FP16x16TensorSub, FP16x16TensorAdd, FP16x16TensorDiv, FP16x16TensorMul}; |
fn multiple_linear_regression_test() {
let mut main_x_vals = boston_x_features();
let mut main_y_vals = boston_y_labels();
let mut dataset = Dataset{x_values: main_x_vals,y_values:main_y_vals};
let mut normalized_dataset = dataset.normalize_dataset();
let mut model = MultipleLinearRegression(normalized_dataset);
let mut model_coefficients = model.coefficients;
let mut reconstructed_ys = model.predict (normalized_dataset.x_values);
let mut r_squared_score = calculate_r_score(normalized_dataset.y_values,reconstructed_ys);
r_squared_score.print();
assert(normalized_dataset.x_values.max_in_tensor() <= FixedTrait::new(65536, false), 'normalized x not between 0-1');
assert(normalized_dataset.x_values.min_in_tensor() >= FixedTrait::new(0, false), 'normalized x not between 0-1');
assert(normalized_dataset.y_values.max_in_tensor() <= FixedTrait::new(65536, false), 'normalized y not between 0-1');
assert(normalized_dataset.x_values.min_in_tensor() >= FixedTrait::new(0, false), 'normalized y not between 0-1');
assert(normalized_dataset.x_values.data.len()== main_x_vals.data.len() &&
normalized_dataset.y_values.data.len()== main_y_vals.data.len() , 'normalized data shape mismatch');
assert(model.coefficients.data.len() == *main_x_vals.shape.at(1)+1, 'coefficient data shape mismatch');
assert(r_squared_score >= FixedTrait::new(55699, false), 'Boston model acc. less than 84%');
let user_input = user_inputs_boston_data();
let mut normalized_user_x_inputs = normalize_user_x_inputs(user_input, main_x_vals) ;
let mut prediction_result = model.predict (normalized_user_x_inputs);
let mut rescale_prediction = rescale_predictions(prediction_result, main_y_vals);
(*rescale_prediction.data.at(0)).print();
} |
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, U32Tensor, U32TensorAdd, FP16x16TensorSub, FP16x16TensorAdd,
FP16x16TensorDiv, FP16x16TensorMul
};
use orion::numbers::{FP16x16, FixedTrait};
use multiple_linear_regresion::helper_functions::{
get_tensor_data_by_row, transpose_tensor, calculate_mean, calculate_r_score,
normalize_user_x_inputs, rescale_predictions
}; |
struct Dataset {
x_values: Tensor<FP16x16>,
y_values: Tensor<FP16x16>,
}
impl DataPreprocessing of DatasetTrait {
fn normalize_dataset(ref self: Dataset) -> Dataset {
let mut x_values = TensorTrait::<FP16x16>::new(array![1].span(), array![FixedTrait::new(0, false)].span());
let mut y_values = TensorTrait::<FP16x16>::new(array![1].span(), array![FixedTrait::new(0, false)].span());
if self.x_values.shape.len() > 1 {
x_values = normalize_feature_data(self.x_values);
y_values = normalize_label_data(self.y_values);
}
if self.x_values.shape.len() == 1 {
x_values = normalize_label_data(self.x_values);
y_values = normalize_label_data(self.y_values);
}
return Dataset { x_values, y_values };
}
}
fn normalize_feature_data(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let mut x_min_array = ArrayTrait::<FP16x16>::new();
let mut x_max_array = ArrayTrait::<FP16x16>::new();
let mut x_range_array = ArrayTrait::<FP16x16>::new();
let mut normalized_array = ArrayTrait::<FP16x16>::new();
let transposed_tensor = tensor_data.transpose(axes: array![1, 0].span());
let tensor_shape = transposed_tensor.shape;
let tensor_row_len = *tensor_shape.at(0);
let tensor_column_len = *tensor_shape.at(1);
let mut i: u32 = 0;
loop {
if i >= tensor_row_len {
break ();
}
let mut transposed_tensor_row = get_tensor_data_by_row(transposed_tensor, i);
x_max_array.append(transposed_tensor_row.max_in_tensor());
x_min_array.append(transposed_tensor_row.min_in_tensor());
x_range_array
.append(transposed_tensor_row.max_in_tensor() - transposed_tensor_row.min_in_tensor());
i += 1;
};
let mut x_min = TensorTrait::<
FP16x16
>::new(shape: array![1, tensor_row_len].span(), data: x_min_array.span());
let mut x_range = TensorTrait::<
FP16x16
>::new(shape |
: array![1, tensor_row_len].span(), data: x_range_array.span());
let normalized_tensor = (tensor_data - x_min) / x_range;
return normalized_tensor;
}
fn normalize_label_data(tensor_data: Tensor<FP16x16>) -> Tensor<FP16x16> {
let mut tensor_data_ = tensor_data;
let mut normalized_array = ArrayTrait::<FP16x16>::new();
let mut range = tensor_data.max_in_tensor() - tensor_data.min_in_tensor();
let mut i: u32 = 0;
loop {
match tensor_data_.data.pop_front() {
Option::Some(tensor_val) => {
let mut diff = *tensor_val - tensor_data.min_in_tensor();
normalized_array.append(diff / range);
i += 1;
},
Option::None(_) => { break; }
};
};
let mut normalized_tensor = TensorTrait::<
FP16x16
>::new(shape: array![tensor_data.data.len()].span(), data: normalized_array.span());
return normalized_tensor;
} |
mod aave_data;
mod boston_data;
mod linear_data;
mod user_inputs_data;
|
mod aave_x_features;
mod aave_y_labels;
|
use array::ArrayTrait;
use orion::numbers::fixed_point::implementations::fp16x16::core::{FP16x16Impl, FP16x16PartialEq};
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};
use orion::numbers::{FP16x16, FixedTrait};
fn aave_x_features() -> Tensor<FP16x16> {
let tensor = TensorTrait::<
FP16x16
>::new(
shape: array![24, 9].span(),
data: array![
FixedTrait::new(61, false),
FixedTrait::new(484966, false),
FixedTrait::new(812646, false),
FixedTrait::new(13369344, false),
FixedTrait::new(3604, false),
FixedTrait::new(7798784, false),
FixedTrait::new(1880883, false),
FixedTrait::new(5006950, false),
FixedTrait::new(220856320, false),
FixedTrait::new(87, false),
FixedTrait::new(488243, false),
FixedTrait::new(812646, false),
FixedTrait::new(13434880, false),
FixedTrait::new(3604, false),
FixedTrait::new(7798784, false),
FixedTrait::new(1880883, false),
FixedTrait::new(5006950, false),
FixedTrait::new(220856320, false),
FixedTrait::new(114, false),
FixedTrait::new(525598, false),
FixedTrait::new(812646, false),
FixedTrait::new(13565952, false),
FixedTrait::new(3604, false),
FixedTrait::new(7798784, false),
FixedTrait::new(1887436, false),
FixedTrait::new(5013504, false),
FixedTrait::new(217579519, false),
FixedTrait::new(138, false),
FixedTrait::new(628490, false),
FixedTrait::new(838860, false),
FixedTrait::new(13893632, false),
FixedTrait::new(3604, false),
FixedTrait::new(8126463, false),
FixedTrait::new(1874329, false),
FixedTrait::new(5046272, false),
FixedTrait::new(208404480, false),
FixedTrait::new(1, false),
FixedTrai |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.