diff --git a/.all-contributorsrc b/.all-contributorsrc
index b10e46074..8a41f9cbe 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -230,7 +230,7 @@
"name": "Mahmoud Mohajer",
"avatar_url": "https://avatars.githubusercontent.com/u/89094323?v=4",
"profile": "https://medium.com/@mohajermahmoud3",
- "contributions": [
+ "contributions": [
"code"
]
},
@@ -251,6 +251,15 @@
"contributions": [
"code"
]
+ },
+ {
+ "login": "akhercha",
+ "name": "akhercha",
+ "avatar_url": "https://avatars.githubusercontent.com/u/22559023?v=4",
+ "profile": "https://t.me/notaihe",
+ "contributions": [
+ "code"
+ ]
}
],
"contributorsPerLine": 7,
diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
index 8c68ba96d..33e343ae4 100644
--- a/.github/workflows/test.yaml
+++ b/.github/workflows/test.yaml
@@ -9,5 +9,5 @@ jobs:
- uses: actions/checkout@v3
- uses: software-mansion/setup-scarb@v1
with:
- scarb-version: "2.3.0"
+ scarb-version: "2.4.0"
- run: scarb test --workspace && scarb fmt --workspace
\ No newline at end of file
diff --git a/README.md b/README.md
index 42dc82c17..59bda0824 100644
--- a/README.md
+++ b/README.md
@@ -23,7 +23,7 @@
# Orion: An Open-source Framework for Validity and ZK ML ✨
-[](#contributors-)
+[](#contributors-)
Orion is an open-source, community-driven framework dedicated to Provable Machine Learning. It provides essential components and a new ONNX runtime for building verifiable Machine Learning models using [STARKs](https://starkware.co/stark/).
@@ -102,6 +102,7 @@ Thanks goes to these wonderful people:
 Mahmoud Mohajer 💻 |
 HappyTomatoo 🐛 |
 Bilgin Koçak 💻 |
+  akhercha 💻 |
diff --git a/Scarb.toml b/Scarb.toml
index bcbb9f9c1..6ebf1fd1f 100644
--- a/Scarb.toml
+++ b/Scarb.toml
@@ -1,13 +1,17 @@
[package]
name = "orion"
-version = "0.1.7"
+version = "0.1.9"
+cairo-version = "2.4.0"
+edition = "2023_10"
description = "ONNX Runtime in Cairo for verifiable ML inference using STARK"
homepage = "https://github.com/gizatechxyz/orion"
[dependencies]
-alexandria_data_structures = { git = "https://github.com/keep-starknet-strange/alexandria.git", rev = "f37d73d" }
-alexandria_sorting = { git = "https://github.com/keep-starknet-strange/alexandria.git", rev = "f37d73d" }
-cubit = { git = "https://github.com/influenceth/cubit.git", rev = "b459053" }
+alexandria_merkle_tree = { git = "https://github.com/keep-starknet-strange/alexandria.git", rev = "01a7690" }
+alexandria_data_structures = { git = "https://github.com/keep-starknet-strange/alexandria.git", rev = "01a7690" }
+alexandria_sorting = { git = "https://github.com/keep-starknet-strange/alexandria.git", rev = "01a7690" }
+# TODO: update to https://github.com/influenceth/cubit & change rev
+cubit = { git = "https://github.com/akhercha/cubit.git", rev = "d3869a3" }
[scripts]
sierra = "cairo-compile . -r"
diff --git a/docs/academy/tutorials/implement-new-operators-in-orion.md b/docs/academy/tutorials/implement-new-operators-in-orion.md
index 24fd698d1..fa9cc08fc 100644
--- a/docs/academy/tutorials/implement-new-operators-in-orion.md
+++ b/docs/academy/tutorials/implement-new-operators-in-orion.md
@@ -208,7 +208,7 @@ trait NNTrait {
/// ## Examples
///
/// ```rust
- /// use array::{ArrayTrait, SpanTrait};
+ /// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, FP8x23};
/// use orion::operators::nn::{NNTrait, FP8x23NN};
diff --git a/docs/academy/tutorials/mnist-classification-with-orion.md b/docs/academy/tutorials/mnist-classification-with-orion.md
index bb85956d5..cec11fde8 100644
--- a/docs/academy/tutorials/mnist-classification-with-orion.md
+++ b/docs/academy/tutorials/mnist-classification-with-orion.md
@@ -392,7 +392,7 @@ os.makedirs('src/generated', exist_ok=True)
for tensor_name, tensor in tensors.items():
with open(os.path.join('src', 'generated', f"{tensor_name}.cairo"), "w") as f:
f.write(
- "use array::ArrayTrait;\n" +
+ "use core::array::ArrayTrait;\n" +
"use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor};\n" +
"use orion::numbers::i32;\n\n" +
"\nfn {0}() -> Tensor ".format(tensor_name) + "{\n" +
@@ -431,7 +431,7 @@ We have just created a file called `lib.cairo`, which contains a module declarat
Here is a file we generated: `fc1_bias.cairo`
```rust
-use array::ArrayTrait;
+use core::array::ArrayTrait;
use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor};
use orion::numbers::i32;
diff --git a/docs/academy/tutorials/verifiable-linear-regression-model-in-orion.md b/docs/academy/tutorials/verifiable-linear-regression-model-in-orion.md
index d69fddb21..1de5b296b 100644
--- a/docs/academy/tutorials/verifiable-linear-regression-model-in-orion.md
+++ b/docs/academy/tutorials/verifiable-linear-regression-model-in-orion.md
@@ -164,7 +164,7 @@ def generate_cairo_files(data, name):
os.makedirs('src/generated', exist_ok=True)
with open(os.path.join('src', 'generated', f"{name}.cairo"), "w") as f:
f.write(
- "use array::ArrayTrait;\n" +
+ "use core::array::ArrayTrait;\n" +
"use orion::operators::tensor::{FP16x16Tensor, TensorTrait, Tensor};\n" +
"use orion::numbers::{FixedTrait, FP16x16, FP16x16Impl};\n"
"\nfn {0}() -> Tensor ".format(name) + "{\n" +
@@ -200,7 +200,7 @@ mod lin_reg_func;
This will tell our compiler to include the separate modules listed above during the compilation of our code. We will be covering each module in detail in the following section, but let’s first review the generated folder files.
```rust
-use array::ArrayTrait;
+use core::array::ArrayTrait;
use orion::operators::tensor::{FP16x16Tensor, TensorTrait, Tensor};
use orion::numbers::{FixedTrait, FP16x16, FP16x16Impl};
@@ -343,7 +343,7 @@ Calculating the y-intercept is fairly simple, we just need to substitute the cal
Now that we have implemented all the necessary functions for the OLS method, we can finally test our linear regression model. We begin by creating a new separate test file named `test.cairo` and import all the necessary Orion libraries including our `X_values` and `y_values` found in the generated folder. We also import all the OLS functions from `lin_reg_func.cairo` file as we will be relying upon them to construct the regression model.
```rust
-use debug::PrintTrait;
+use core::debug::PrintTrait;
use verifiable_linear_regression::generated::X_values::X_values;
use verifiable_linear_regression::generated::Y_values::Y_values;
diff --git a/docs/academy/tutorials/verifiable-support-vector-machine.md b/docs/academy/tutorials/verifiable-support-vector-machine.md
index b1ce9583d..6986539f1 100644
--- a/docs/academy/tutorials/verifiable-support-vector-machine.md
+++ b/docs/academy/tutorials/verifiable-support-vector-machine.md
@@ -196,7 +196,7 @@ def generate_cairo_files(data, name):
with open(os.path.join('src', 'generated', f"{name}.cairo"), "w") as f:
f.write(
- "use array::ArrayTrait;\n" +
+ "use core::array::ArrayTrait;\n" +
"use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};\n" +
"use orion::numbers::{FixedTrait, FP16x16, FP16x16Impl};\n" +
"\n" + f"fn {name}() -> Tensor" + "{\n\n" +
@@ -237,7 +237,7 @@ mod helper;
This will tell our compiler to include the separate modules listed above during the compilation of our code. We will be covering each module in detail in the following section, but let’s first review the generated folder files.
```rust
-use array::ArrayTrait;
+use core::array::ArrayTrait;
use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor};
use orion::numbers::{FixedTrait, FP16x16, FP16x16Impl};
@@ -449,9 +449,9 @@ fn pred(x: @Tensor, w: @Tensor) -> Tensor {
Finally, our `train.cairo` file implements model training using the functions described earlier and is executed as part of our model tests.
```rust
-use debug::PrintTrait;
+use core::debug::PrintTrait;
use traits::TryInto;
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, FP16x16TensorAdd, FP16x16TensorMul, FP16x16TensorSub,
FP16x16TensorDiv
@@ -576,7 +576,7 @@ Now that we have implemented all the necessary functions for SVM, we can finally
```rust
use traits::TryInto;
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{
Tensor, TensorTrait, FP16x16Tensor, FP16x16TensorAdd, FP16x16TensorMul, FP16x16TensorSub,
FP16x16TensorDiv
diff --git a/docs/framework/get-started.md b/docs/framework/get-started.md
index 972ecc2e4..ca8612509 100644
--- a/docs/framework/get-started.md
+++ b/docs/framework/get-started.md
@@ -3,7 +3,7 @@
In this section, we will guide you to start using Orion successfully. We will help you install Cairo 1.0 and add Orion dependency in your project.
{% hint style="info" %}
-Orion supports **Cairo and Scarb v2.3.0**
+Orion supports **Cairo and Scarb v2.4.0**
{% endhint %}
## 📦 Installations
@@ -56,7 +56,7 @@ scarb build
You can now use the `orion` in your files:
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor};
use orion::operators::nn::{NNTrait, I32NN};
diff --git a/docs/framework/operators/neural-network/nn.hard_sigmoid.md b/docs/framework/operators/neural-network/nn.hard_sigmoid.md
index be4338109..eeff0e31b 100644
--- a/docs/framework/operators/neural-network/nn.hard_sigmoid.md
+++ b/docs/framework/operators/neural-network/nn.hard_sigmoid.md
@@ -27,7 +27,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23};
use orion::operators::nn::{NNTrait, FP8x23NN};
diff --git a/docs/framework/operators/neural-network/nn.leaky_relu.md b/docs/framework/operators/neural-network/nn.leaky_relu.md
index 04d671ec3..2aca42881 100644
--- a/docs/framework/operators/neural-network/nn.leaky_relu.md
+++ b/docs/framework/operators/neural-network/nn.leaky_relu.md
@@ -22,7 +22,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23};
use orion::operators::nn::{NNTrait, FP8x23NN};
diff --git a/docs/framework/operators/neural-network/nn.linear.md b/docs/framework/operators/neural-network/nn.linear.md
index 0e37fd268..60d9079c8 100644
--- a/docs/framework/operators/neural-network/nn.linear.md
+++ b/docs/framework/operators/neural-network/nn.linear.md
@@ -23,7 +23,7 @@ A `Tensor` representing the result of the linear transformation.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor};
use orion::operators::nn::{NNTrait, I32NN};
diff --git a/docs/framework/operators/neural-network/nn.logsoftmax.md b/docs/framework/operators/neural-network/nn.logsoftmax.md
index 1aba6b01b..ccd7f8652 100644
--- a/docs/framework/operators/neural-network/nn.logsoftmax.md
+++ b/docs/framework/operators/neural-network/nn.logsoftmax.md
@@ -26,7 +26,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23};
use orion::operators::nn::{NNTrait, FP8x23NN};
diff --git a/docs/framework/operators/neural-network/nn.relu.md b/docs/framework/operators/neural-network/nn.relu.md
index c4f622066..aff9a322a 100644
--- a/docs/framework/operators/neural-network/nn.relu.md
+++ b/docs/framework/operators/neural-network/nn.relu.md
@@ -21,7 +21,7 @@ A `Tensor` with the same shape as the input tensor.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor};
use orion::operators::nn::{NNTrait, I32NN};
diff --git a/docs/framework/operators/neural-network/nn.sigmoid.md b/docs/framework/operators/neural-network/nn.sigmoid.md
index 28503f04a..4dde429eb 100644
--- a/docs/framework/operators/neural-network/nn.sigmoid.md
+++ b/docs/framework/operators/neural-network/nn.sigmoid.md
@@ -25,7 +25,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23};
use orion::operators::nn::{NNTrait, FP8x23NN};
diff --git a/docs/framework/operators/neural-network/nn.softmax.md b/docs/framework/operators/neural-network/nn.softmax.md
index 6b1b0c982..4fb83e0b3 100644
--- a/docs/framework/operators/neural-network/nn.softmax.md
+++ b/docs/framework/operators/neural-network/nn.softmax.md
@@ -26,7 +26,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::operators::nn::{NNTrait, FP8x23NN};
diff --git a/docs/framework/operators/neural-network/nn.softplus.md b/docs/framework/operators/neural-network/nn.softplus.md
index 32caeda7a..b9c1b7603 100644
--- a/docs/framework/operators/neural-network/nn.softplus.md
+++ b/docs/framework/operators/neural-network/nn.softplus.md
@@ -25,7 +25,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23};
use orion::operators::nn::{NNTrait, FP8x23NN};
diff --git a/docs/framework/operators/neural-network/nn.softsign.md b/docs/framework/operators/neural-network/nn.softsign.md
index d80b89d7c..aee3071c4 100644
--- a/docs/framework/operators/neural-network/nn.softsign.md
+++ b/docs/framework/operators/neural-network/nn.softsign.md
@@ -25,7 +25,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23};
use orion::operators::nn::{NNTrait, FP8x23NN};
diff --git a/docs/framework/operators/neural-network/nn.thresholded_relu.md b/docs/framework/operators/neural-network/nn.thresholded_relu.md
index 8c1206ce1..3d2ecc243 100644
--- a/docs/framework/operators/neural-network/nn.thresholded_relu.md
+++ b/docs/framework/operators/neural-network/nn.thresholded_relu.md
@@ -22,7 +22,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23};
use orion::operators::nn::{NNTrait, FP8x23NN};
diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md
index 0f0ed5513..1127918e5 100644
--- a/docs/framework/operators/tensor/README.md
+++ b/docs/framework/operators/tensor/README.md
@@ -137,7 +137,7 @@ Two tensors are “broadcastable” if the following rules hold:
Element-wise add.
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor, U32TensorAdd};
fn element_wise_add_example() -> Tensor {
@@ -158,7 +158,7 @@ fn element_wise_add_example() -> Tensor {
Add two tensors of different shapes but compatible in broadcasting.
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor, U32TensorAdd};
fn broadcasting_add_example() -> Tensor {
diff --git a/docs/framework/operators/tensor/tensor.abs.md b/docs/framework/operators/tensor/tensor.abs.md
index 923c7cedc..0f3e3cc85 100644
--- a/docs/framework/operators/tensor/tensor.abs.md
+++ b/docs/framework/operators/tensor/tensor.abs.md
@@ -19,7 +19,7 @@ the absolute value of all elements in the input tensor.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor};
use orion::numbers::{i32, IntegerTrait};
diff --git a/docs/framework/operators/tensor/tensor.acos.md b/docs/framework/operators/tensor/tensor.acos.md
index 724947e68..b6db3203f 100644
--- a/docs/framework/operators/tensor/tensor.acos.md
+++ b/docs/framework/operators/tensor/tensor.acos.md
@@ -23,7 +23,7 @@ Constrain input and output types to fixed point tensors.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FP8x23, FixedTrait};
diff --git a/docs/framework/operators/tensor/tensor.acosh.md b/docs/framework/operators/tensor/tensor.acosh.md
index 7dbf6ba09..813fd8d7d 100644
--- a/docs/framework/operators/tensor/tensor.acosh.md
+++ b/docs/framework/operators/tensor/tensor.acosh.md
@@ -24,7 +24,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FixedTrait, FP8x23};
diff --git a/docs/framework/operators/tensor/tensor.and.md b/docs/framework/operators/tensor/tensor.and.md
index 2c2b74f16..b26f9bdbb 100644
--- a/docs/framework/operators/tensor/tensor.and.md
+++ b/docs/framework/operators/tensor/tensor.and.md
@@ -25,7 +25,7 @@ A new `Tensor` with the same shape as the broadcasted inputs.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.argmax.md b/docs/framework/operators/tensor/tensor.argmax.md
index 511b05c91..cb3457a45 100644
--- a/docs/framework/operators/tensor/tensor.argmax.md
+++ b/docs/framework/operators/tensor/tensor.argmax.md
@@ -26,7 +26,7 @@ A new `Tensor` instance containing the indices of the maximum values along th
Case 1: argmax with default parameters
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -43,7 +43,7 @@ fn argmax_example() -> Tensor {
Case 2: argmax with keepdims set to false
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -62,7 +62,7 @@ fn argmax_example() -> Tensor {
Case 3: argmax with select_last_index set to true
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.argmin.md b/docs/framework/operators/tensor/tensor.argmin.md
index cfe15e89b..d5f0ae192 100644
--- a/docs/framework/operators/tensor/tensor.argmin.md
+++ b/docs/framework/operators/tensor/tensor.argmin.md
@@ -26,7 +26,7 @@ A new `Tensor` instance containing the indices of the minimum values along th
Case 1: argmin with default parameters
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -44,7 +44,7 @@ fn argmin_example() -> Tensor {
Case 2: argmin with keepdims set to false
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -63,7 +63,7 @@ fn argmin_example() -> Tensor {
Case 3: argmin with select_last_index set to true
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.array_feature_extractor.md b/docs/framework/operators/tensor/tensor.array_feature_extractor.md
index f06bdaf00..f5cec209e 100644
--- a/docs/framework/operators/tensor/tensor.array_feature_extractor.md
+++ b/docs/framework/operators/tensor/tensor.array_feature_extractor.md
@@ -22,7 +22,7 @@ A new `Tensor` of the same shape as the input tensor with selected elements b
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor, U32Tensor};
use orion::numbers::{i32, IntegerTrait};
diff --git a/docs/framework/operators/tensor/tensor.asin.md b/docs/framework/operators/tensor/tensor.asin.md
index 0c29f8735..592519485 100644
--- a/docs/framework/operators/tensor/tensor.asin.md
+++ b/docs/framework/operators/tensor/tensor.asin.md
@@ -23,7 +23,7 @@ Constrain input and output types to fixed point tensors.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FixedTrait, FP8x23};
diff --git a/docs/framework/operators/tensor/tensor.asinh.md b/docs/framework/operators/tensor/tensor.asinh.md
index c0193b8b8..eade59fde 100644
--- a/docs/framework/operators/tensor/tensor.asinh.md
+++ b/docs/framework/operators/tensor/tensor.asinh.md
@@ -24,7 +24,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FixedTrait, FP8x23};
diff --git a/docs/framework/operators/tensor/tensor.at.md b/docs/framework/operators/tensor/tensor.at.md
index 0cc9833d5..b0752f51d 100644
--- a/docs/framework/operators/tensor/tensor.at.md
+++ b/docs/framework/operators/tensor/tensor.at.md
@@ -22,7 +22,7 @@ The `T` value at the specified indices.
# Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.atan.md b/docs/framework/operators/tensor/tensor.atan.md
index f0b7e12ab..cc61d05f6 100644
--- a/docs/framework/operators/tensor/tensor.atan.md
+++ b/docs/framework/operators/tensor/tensor.atan.md
@@ -23,7 +23,7 @@ Constrain input and output types to fixed point tensors.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FixedTrait, FP8x23};
diff --git a/docs/framework/operators/tensor/tensor.binarizer.md b/docs/framework/operators/tensor/tensor.binarizer.md
index f97dcd446..6e1009dfa 100644
--- a/docs/framework/operators/tensor/tensor.binarizer.md
+++ b/docs/framework/operators/tensor/tensor.binarizer.md
@@ -20,7 +20,7 @@ Constrain input and output types to fixed point numbers.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FixedTrait, FP8x23};
diff --git a/docs/framework/operators/tensor/tensor.bitwise_and.md b/docs/framework/operators/tensor/tensor.bitwise_and.md
index 5288ab953..c12793d88 100644
--- a/docs/framework/operators/tensor/tensor.bitwise_and.md
+++ b/docs/framework/operators/tensor/tensor.bitwise_and.md
@@ -25,7 +25,7 @@ A new `Tensor` with the same shape as the broadcasted inputs.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.bitwise_or.md b/docs/framework/operators/tensor/tensor.bitwise_or.md
index ac3ac4f38..37a0b9c03 100644
--- a/docs/framework/operators/tensor/tensor.bitwise_or.md
+++ b/docs/framework/operators/tensor/tensor.bitwise_or.md
@@ -25,7 +25,7 @@ A new `Tensor` with the same shape as the broadcasted inputs.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.bitwise_xor.md b/docs/framework/operators/tensor/tensor.bitwise_xor.md
index 62f1dd0b1..5f3797156 100644
--- a/docs/framework/operators/tensor/tensor.bitwise_xor.md
+++ b/docs/framework/operators/tensor/tensor.bitwise_xor.md
@@ -25,7 +25,7 @@ A new `Tensor` with the same shape as the broadcasted inputs.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.ceil.md b/docs/framework/operators/tensor/tensor.ceil.md
index 33a0bf932..5954e45f8 100644
--- a/docs/framework/operators/tensor/tensor.ceil.md
+++ b/docs/framework/operators/tensor/tensor.ceil.md
@@ -23,7 +23,7 @@ Constrain input and output types to fixed point tensors.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FP8x23, FixedTrait};
diff --git a/docs/framework/operators/tensor/tensor.clip.md b/docs/framework/operators/tensor/tensor.clip.md
index d5dab3fb5..aa54a81a4 100644
--- a/docs/framework/operators/tensor/tensor.clip.md
+++ b/docs/framework/operators/tensor/tensor.clip.md
@@ -19,7 +19,7 @@ Output `Tensor` with clipped input elements.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.concat.md b/docs/framework/operators/tensor/tensor.concat.md
index 0e134a288..70ca6b26f 100644
--- a/docs/framework/operators/tensor/tensor.concat.md
+++ b/docs/framework/operators/tensor/tensor.concat.md
@@ -23,7 +23,7 @@ A new `Tensor` concatenated tensor of the input tensors.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.concat_from_sequence.md b/docs/framework/operators/tensor/tensor.concat_from_sequence.md
index 846322b90..04cacdd62 100644
--- a/docs/framework/operators/tensor/tensor.concat_from_sequence.md
+++ b/docs/framework/operators/tensor/tensor.concat_from_sequence.md
@@ -25,7 +25,7 @@ A new `Tensor` concatenated tensor from the input tensor sequence.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.constant_of_shape.md b/docs/framework/operators/tensor/tensor.constant_of_shape.md
index ca275e375..b06c2793a 100644
--- a/docs/framework/operators/tensor/tensor.constant_of_shape.md
+++ b/docs/framework/operators/tensor/tensor.constant_of_shape.md
@@ -20,7 +20,7 @@ A new `Tensor` instance.
Let's create new u32 Tensor with constant 42.
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{
TensorTrait, // we import the trait
diff --git a/docs/framework/operators/tensor/tensor.cos.md b/docs/framework/operators/tensor/tensor.cos.md
index f9384ec6d..64e06a397 100644
--- a/docs/framework/operators/tensor/tensor.cos.md
+++ b/docs/framework/operators/tensor/tensor.cos.md
@@ -23,7 +23,7 @@ Constrain input and output types to fixed point tensors.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FP8x23, FixedTrait};
diff --git a/docs/framework/operators/tensor/tensor.cosh.md b/docs/framework/operators/tensor/tensor.cosh.md
index 3668e8ca0..f08548d4a 100644
--- a/docs/framework/operators/tensor/tensor.cosh.md
+++ b/docs/framework/operators/tensor/tensor.cosh.md
@@ -24,7 +24,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FixedTrait, FP8x23};
diff --git a/docs/framework/operators/tensor/tensor.cumsum.md b/docs/framework/operators/tensor/tensor.cumsum.md
index c93fa4d94..de34797e0 100644
--- a/docs/framework/operators/tensor/tensor.cumsum.md
+++ b/docs/framework/operators/tensor/tensor.cumsum.md
@@ -26,7 +26,7 @@ A new `Tensor` instance containing the cumulative sum of the input tensor's e
Case 1: cumsum with default parameters
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -43,7 +43,7 @@ fn cumsum_example() -> Tensor {
Case 2: cumsum with exclusive = true
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -60,7 +60,7 @@ fn cumsum_example() -> Tensor {
Case 3: cumsum with exclusive = true and reverse = true
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.dequantize_linear.md b/docs/framework/operators/tensor/tensor.dequantize_linear.md
index 38635349b..b7410f8c4 100644
--- a/docs/framework/operators/tensor/tensor.dequantize_linear.md
+++ b/docs/framework/operators/tensor/tensor.dequantize_linear.md
@@ -30,7 +30,7 @@ fp16x16wide tensor, not supported.
## Examples
```rust
- use array::{ArrayTrait, SpanTrait};
+ use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, I32Tensor};
use orion::numbers::{i8, i32, IntegerTrait};
diff --git a/docs/framework/operators/tensor/tensor.equal.md b/docs/framework/operators/tensor/tensor.equal.md
index 83bb52a5d..6e393c989 100644
--- a/docs/framework/operators/tensor/tensor.equal.md
+++ b/docs/framework/operators/tensor/tensor.equal.md
@@ -27,7 +27,7 @@ A new `Tensor` of booleans (1 if equal, 0 otherwise) with the same shape
Case 1: Compare tensors with same shape
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -49,7 +49,7 @@ fn eq_example() -> Tensor {
Case 2: Compare tensors with different shapes
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.exp.md b/docs/framework/operators/tensor/tensor.exp.md
index 187d9becd..ecaa0a44a 100644
--- a/docs/framework/operators/tensor/tensor.exp.md
+++ b/docs/framework/operators/tensor/tensor.exp.md
@@ -24,7 +24,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FP8x23, FixedTrait};
diff --git a/docs/framework/operators/tensor/tensor.flatten.md b/docs/framework/operators/tensor/tensor.flatten.md
index 0740346b4..6e3537a55 100644
--- a/docs/framework/operators/tensor/tensor.flatten.md
+++ b/docs/framework/operators/tensor/tensor.flatten.md
@@ -41,7 +41,7 @@ fn flatten_example() -> Tensor {
Case 2: flatten with axis 1
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -58,7 +58,7 @@ fn flatten_example() -> Tensor {
Case 3: flatten with axis 2
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.gather.md b/docs/framework/operators/tensor/tensor.gather.md
index 8eb4a81f4..218b01385 100644
--- a/docs/framework/operators/tensor/tensor.gather.md
+++ b/docs/framework/operators/tensor/tensor.gather.md
@@ -23,7 +23,7 @@ A new `Tensor` .
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.gather_elements.md b/docs/framework/operators/tensor/tensor.gather_elements.md
index ad8b6c3bb..9bda94eb6 100644
--- a/docs/framework/operators/tensor/tensor.gather_elements.md
+++ b/docs/framework/operators/tensor/tensor.gather_elements.md
@@ -23,7 +23,7 @@ A new `Tensor` .
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.greater.md b/docs/framework/operators/tensor/tensor.greater.md
index 677bc1220..fb186b8f7 100644
--- a/docs/framework/operators/tensor/tensor.greater.md
+++ b/docs/framework/operators/tensor/tensor.greater.md
@@ -27,7 +27,7 @@ A new `Tensor` of booleans (0 or 1) with the same shape as the broadcaste
Case 1: Compare tensors with same shape
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -49,7 +49,7 @@ fn greater_example() -> Tensor {
Case 2: Compare tensors with different shapes
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.greater_equal.md b/docs/framework/operators/tensor/tensor.greater_equal.md
index 8a2d6637d..1fecfbdc4 100644
--- a/docs/framework/operators/tensor/tensor.greater_equal.md
+++ b/docs/framework/operators/tensor/tensor.greater_equal.md
@@ -27,7 +27,7 @@ A new `Tensor` of booleans (0 or 1) with the same shape as the broadcaste
Case 1: Compare tensors with same shape
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -49,7 +49,7 @@ fn greater_equal_example() -> Tensor {
Case 2: Compare tensors with different shapes
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.identity.md b/docs/framework/operators/tensor/tensor.identity.md
index 902972c84..43a25c564 100644
--- a/docs/framework/operators/tensor/tensor.identity.md
+++ b/docs/framework/operators/tensor/tensor.identity.md
@@ -17,7 +17,7 @@ A new `Tensor` to copy input into.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP16x16Tensor};
diff --git a/docs/framework/operators/tensor/tensor.is_inf.md b/docs/framework/operators/tensor/tensor.is_inf.md
index e77a5fb61..313b4d8b8 100644
--- a/docs/framework/operators/tensor/tensor.is_inf.md
+++ b/docs/framework/operators/tensor/tensor.is_inf.md
@@ -20,7 +20,7 @@ A new `Tensor` instance with entries set to true iff the input tensors cor
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{BoolTensor, TensorTrait, Tensor, U32Tensor};
fn is_inf_example() -> Tensor {
diff --git a/docs/framework/operators/tensor/tensor.is_nan.md b/docs/framework/operators/tensor/tensor.is_nan.md
index 28e5881e0..955edc0f9 100644
--- a/docs/framework/operators/tensor/tensor.is_nan.md
+++ b/docs/framework/operators/tensor/tensor.is_nan.md
@@ -16,7 +16,7 @@ A new `Tensor` instance with entries set to true iff the input tensors cor
## Examples
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{BoolTensor, TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FixedTrait, FP8x23};
diff --git a/docs/framework/operators/tensor/tensor.less.md b/docs/framework/operators/tensor/tensor.less.md
index 0413fa7ea..d5d264d8a 100644
--- a/docs/framework/operators/tensor/tensor.less.md
+++ b/docs/framework/operators/tensor/tensor.less.md
@@ -27,7 +27,7 @@ A new `Tensor` of booleans (0 or 1) with the same shape as the broadcaste
Case 1: Compare tensors with same shape
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -49,7 +49,7 @@ fn less_example() -> Tensor {
Case 2: Compare tensors with different shapes
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.less_equal.md b/docs/framework/operators/tensor/tensor.less_equal.md
index 23d3356fa..c440b39c6 100644
--- a/docs/framework/operators/tensor/tensor.less_equal.md
+++ b/docs/framework/operators/tensor/tensor.less_equal.md
@@ -27,7 +27,7 @@ A new `Tensor` of booleans (0 or 1) with the same shape as the broadcaste
Case 1: Compare tensors with same shape
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -49,7 +49,7 @@ fn less_equal_example() -> Tensor {
Case 2: Compare tensors with different shapes
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.log.md b/docs/framework/operators/tensor/tensor.log.md
index d14245e47..7ed4e9e2a 100644
--- a/docs/framework/operators/tensor/tensor.log.md
+++ b/docs/framework/operators/tensor/tensor.log.md
@@ -24,7 +24,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FP8x23, FixedTrait};
diff --git a/docs/framework/operators/tensor/tensor.matmul.md b/docs/framework/operators/tensor/tensor.matmul.md
index 21e828a76..378d661be 100644
--- a/docs/framework/operators/tensor/tensor.matmul.md
+++ b/docs/framework/operators/tensor/tensor.matmul.md
@@ -29,7 +29,7 @@ A new `Tensor` resulting from the matrix multiplication.
Case 1: Dot product of two vectors (1D \* 1D)
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -47,7 +47,7 @@ fn dot_product_example() -> Tensor {
Case 2: Matrix multiplication (2D \* 2D)
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -69,7 +69,7 @@ fn matrix_mul_example() -> Tensor {
Case 3: Matrix-Vector multiplication (2D x 1D)
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.max.md b/docs/framework/operators/tensor/tensor.max.md
index 966405862..27f1e4eb5 100644
--- a/docs/framework/operators/tensor/tensor.max.md
+++ b/docs/framework/operators/tensor/tensor.max.md
@@ -27,7 +27,7 @@ A new `Tensor` containing the element-wise maximum values
Case 1: Process tensors with same shape
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -46,7 +46,7 @@ fn max_example() -> Tensor {
Case 2: Process tensors with different shapes
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.max_in_tensor.md b/docs/framework/operators/tensor/tensor.max_in_tensor.md
index 08ac0f82f..c2453f9b9 100644
--- a/docs/framework/operators/tensor/tensor.max_in_tensor.md
+++ b/docs/framework/operators/tensor/tensor.max_in_tensor.md
@@ -17,7 +17,7 @@ The maximum `T` value in the tensor.
Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.min.md b/docs/framework/operators/tensor/tensor.min.md
index 31157c2dc..92bc2d150 100644
--- a/docs/framework/operators/tensor/tensor.min.md
+++ b/docs/framework/operators/tensor/tensor.min.md
@@ -27,7 +27,7 @@ A new `Tensor` containing the element-wise minimum values
Case 1: Process tensors with same shape
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -46,7 +46,7 @@ fn min_example() -> Tensor {
Case 2: Process tensors with different shapes
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.min_in_tensor.md b/docs/framework/operators/tensor/tensor.min_in_tensor.md
index 65ac74034..1f17f1062 100644
--- a/docs/framework/operators/tensor/tensor.min_in_tensor.md
+++ b/docs/framework/operators/tensor/tensor.min_in_tensor.md
@@ -17,7 +17,7 @@ The minimum `T` value in the tensor.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.neg.md b/docs/framework/operators/tensor/tensor.neg.md
index f5c9b3816..8b4a1a93e 100644
--- a/docs/framework/operators/tensor/tensor.neg.md
+++ b/docs/framework/operators/tensor/tensor.neg.md
@@ -19,7 +19,7 @@ the negation of all elements in the input tensor.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, I32Tensor};
use orion::numbers::{i32, IntegerTrait};
diff --git a/docs/framework/operators/tensor/tensor.new.md b/docs/framework/operators/tensor/tensor.new.md
index 3d29c9da3..78b328aaf 100644
--- a/docs/framework/operators/tensor/tensor.new.md
+++ b/docs/framework/operators/tensor/tensor.new.md
@@ -24,7 +24,7 @@ A new `Tensor` instance.
Let's create new u32 Tensors.
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{
TensorTrait, // we import the trait
diff --git a/docs/framework/operators/tensor/tensor.nonzero.md b/docs/framework/operators/tensor/tensor.nonzero.md
index c9d8a1942..355660fbd 100644
--- a/docs/framework/operators/tensor/tensor.nonzero.md
+++ b/docs/framework/operators/tensor/tensor.nonzero.md
@@ -17,7 +17,7 @@ A new `Tensor` indices of the elements that are non-zero (in row-major or
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.not.md b/docs/framework/operators/tensor/tensor.not.md
index 0add0277a..a08250171 100644
--- a/docs/framework/operators/tensor/tensor.not.md
+++ b/docs/framework/operators/tensor/tensor.not.md
@@ -19,7 +19,7 @@ the negation of all elements in the input tensor.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, BoolTensor};
use orion::numbers::{i32, IntegerTrait};
diff --git a/docs/framework/operators/tensor/tensor.onehot.md b/docs/framework/operators/tensor/tensor.onehot.md
index 09e88f02d..18cfaefc5 100644
--- a/docs/framework/operators/tensor/tensor.onehot.md
+++ b/docs/framework/operators/tensor/tensor.onehot.md
@@ -28,7 +28,7 @@ Constrain input and output types to fixed point tensors.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FP8x23, FixedTrait};
diff --git a/docs/framework/operators/tensor/tensor.or.md b/docs/framework/operators/tensor/tensor.or.md
index ec92f7bab..ff210a043 100644
--- a/docs/framework/operators/tensor/tensor.or.md
+++ b/docs/framework/operators/tensor/tensor.or.md
@@ -27,7 +27,7 @@ A new `Tensor` of booleans (0 or 1) with the same shape as the broadcaste
Case 1: Compare tensors with same shape
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -48,7 +48,7 @@ fn or_example() -> Tensor {
Case 2: Compare tensors with different shapes
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.pow.md b/docs/framework/operators/tensor/tensor.pow.md
index 0a0ae4ac7..be4bd8149 100644
--- a/docs/framework/operators/tensor/tensor.pow.md
+++ b/docs/framework/operators/tensor/tensor.pow.md
@@ -27,7 +27,7 @@ A new `Tensor` with the same shape as the broadcasted inputs.
Case 1: Compare tensors with same shape
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -48,7 +48,7 @@ fn pow_example() -> Tensor {
Case 2: Compare tensors with different shapes
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.qlinear_add.md b/docs/framework/operators/tensor/tensor.qlinear_add.md
index 998ddd214..508282156 100644
--- a/docs/framework/operators/tensor/tensor.qlinear_add.md
+++ b/docs/framework/operators/tensor/tensor.qlinear_add.md
@@ -37,7 +37,7 @@ fp16x16wide tensor, not supported.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, FP16x16Tensor};
use orion::numbers::{i8, FP16x16, FP16x16Impl, IntegerTrait, FixedTrait};
diff --git a/docs/framework/operators/tensor/tensor.qlinear_concat.md b/docs/framework/operators/tensor/tensor.qlinear_concat.md
index 1eda056fe..6f2b44ff8 100644
--- a/docs/framework/operators/tensor/tensor.qlinear_concat.md
+++ b/docs/framework/operators/tensor/tensor.qlinear_concat.md
@@ -33,7 +33,7 @@ A new `Tensor` concatenated quantized tensor of the dequantized input tensor
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, FP16x16Tensor};
use orion::numbers::{i8, FP16x16, FP16x16Impl, IntegerTrait, FixedTrait};
diff --git a/docs/framework/operators/tensor/tensor.qlinear_leakyrelu.md b/docs/framework/operators/tensor/tensor.qlinear_leakyrelu.md
index 0dfa04498..92b94530f 100644
--- a/docs/framework/operators/tensor/tensor.qlinear_leakyrelu.md
+++ b/docs/framework/operators/tensor/tensor.qlinear_leakyrelu.md
@@ -34,7 +34,7 @@ bool tensor, not supported.
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, FP16x16Tensor};
use orion::numbers::{i8, FP16x16, FP16x16Impl, IntegerTrait, FixedTrait};
diff --git a/docs/framework/operators/tensor/tensor.qlinear_matmul.md b/docs/framework/operators/tensor/tensor.qlinear_matmul.md
index 4192918af..13ff99602 100644
--- a/docs/framework/operators/tensor/tensor.qlinear_matmul.md
+++ b/docs/framework/operators/tensor/tensor.qlinear_matmul.md
@@ -37,7 +37,7 @@ fp16x16wide tensor, not supported.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, FP16x16Tensor};
use orion::numbers::{i8, FP16x16, FP16x16Impl, IntegerTrait, FixedTrait};
diff --git a/docs/framework/operators/tensor/tensor.qlinear_mul.md b/docs/framework/operators/tensor/tensor.qlinear_mul.md
index d773b9b19..695128ce0 100644
--- a/docs/framework/operators/tensor/tensor.qlinear_mul.md
+++ b/docs/framework/operators/tensor/tensor.qlinear_mul.md
@@ -37,7 +37,7 @@ fp16x16wide tensor, not supported.
## Example
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, FP16x16Tensor};
use orion::numbers::{i8, FP16x16, FP16x16Impl, IntegerTrait, FixedTrait};
diff --git a/docs/framework/operators/tensor/tensor.quantize_linear.md b/docs/framework/operators/tensor/tensor.quantize_linear.md
index 312cb94cb..7b99fef01 100644
--- a/docs/framework/operators/tensor/tensor.quantize_linear.md
+++ b/docs/framework/operators/tensor/tensor.quantize_linear.md
@@ -29,7 +29,7 @@ u32 tensor, not supported.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, I32Tensor};
use orion::numbers::{i8, i32, IntegerTrait};
diff --git a/docs/framework/operators/tensor/tensor.ravel_index.md b/docs/framework/operators/tensor/tensor.ravel_index.md
index 6d6ff9725..a88d1b142 100644
--- a/docs/framework/operators/tensor/tensor.ravel_index.md
+++ b/docs/framework/operators/tensor/tensor.ravel_index.md
@@ -22,7 +22,7 @@ The index corresponding to the given indices.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.reduce_l1.md b/docs/framework/operators/tensor/tensor.reduce_l1.md
index b4edd2a22..6fb448700 100644
--- a/docs/framework/operators/tensor/tensor.reduce_l1.md
+++ b/docs/framework/operators/tensor/tensor.reduce_l1.md
@@ -23,7 +23,7 @@ A new `Tensor` instance with the specified axis reduced by summing its elemen
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.reduce_mean.md b/docs/framework/operators/tensor/tensor.reduce_mean.md
index 633f8ab00..3d74c3a98 100644
--- a/docs/framework/operators/tensor/tensor.reduce_mean.md
+++ b/docs/framework/operators/tensor/tensor.reduce_mean.md
@@ -24,7 +24,7 @@ A new `Tensor` instance with the specified axes reduced by meaning its elemen
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.reduce_min.md b/docs/framework/operators/tensor/tensor.reduce_min.md
index eb416b7f7..d2d78ef2c 100644
--- a/docs/framework/operators/tensor/tensor.reduce_min.md
+++ b/docs/framework/operators/tensor/tensor.reduce_min.md
@@ -24,7 +24,7 @@ A new `Tensor` instance with the specified axes reduced by minimum of its ele
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.reduce_prod.md b/docs/framework/operators/tensor/tensor.reduce_prod.md
index 35117d602..2d79b4c4b 100644
--- a/docs/framework/operators/tensor/tensor.reduce_prod.md
+++ b/docs/framework/operators/tensor/tensor.reduce_prod.md
@@ -23,7 +23,7 @@ A new `Tensor` instance with the specified axis reduced by multiplying its el
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.reduce_sum.md b/docs/framework/operators/tensor/tensor.reduce_sum.md
index e596ba311..3aa77d2ce 100644
--- a/docs/framework/operators/tensor/tensor.reduce_sum.md
+++ b/docs/framework/operators/tensor/tensor.reduce_sum.md
@@ -23,7 +23,7 @@ A new `Tensor` instance with the specified axis reduced by summing its elemen
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.reshape.md b/docs/framework/operators/tensor/tensor.reshape.md
index 998c40c1a..b2c8f84eb 100644
--- a/docs/framework/operators/tensor/tensor.reshape.md
+++ b/docs/framework/operators/tensor/tensor.reshape.md
@@ -22,7 +22,7 @@ A new `Tensor` with the specified target shape and the same data.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.round.md b/docs/framework/operators/tensor/tensor.round.md
index 9a6c949eb..58e9f4f54 100644
--- a/docs/framework/operators/tensor/tensor.round.md
+++ b/docs/framework/operators/tensor/tensor.round.md
@@ -19,7 +19,7 @@ the round value of all elements in the input tensor.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP16x16Tensor};
use orion::numbers::{FixedTrait, FP16x16};
diff --git a/docs/framework/operators/tensor/tensor.scatter.md b/docs/framework/operators/tensor/tensor.scatter.md
index 439e7ab0b..4951a5d25 100644
--- a/docs/framework/operators/tensor/tensor.scatter.md
+++ b/docs/framework/operators/tensor/tensor.scatter.md
@@ -25,7 +25,7 @@ A new `Tensor` .
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.sequence_at.md b/docs/framework/operators/tensor/tensor.sequence_at.md
index e0a9535f9..386c594a6 100644
--- a/docs/framework/operators/tensor/tensor.sequence_at.md
+++ b/docs/framework/operators/tensor/tensor.sequence_at.md
@@ -23,7 +23,7 @@ The tensor `Tensor` from the sequence at the specified position.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor, I32Tensor};
use orion::numbers::{i32, IntegerTrait};
diff --git a/docs/framework/operators/tensor/tensor.sequence_construct.md b/docs/framework/operators/tensor/tensor.sequence_construct.md
index fea30780d..d5e627bd1 100644
--- a/docs/framework/operators/tensor/tensor.sequence_construct.md
+++ b/docs/framework/operators/tensor/tensor.sequence_construct.md
@@ -21,7 +21,7 @@ A tensor sequence `Array>` containing the input tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.sequence_empty.md b/docs/framework/operators/tensor/tensor.sequence_empty.md
index a46ddba5f..60ea380e5 100644
--- a/docs/framework/operators/tensor/tensor.sequence_empty.md
+++ b/docs/framework/operators/tensor/tensor.sequence_empty.md
@@ -17,7 +17,7 @@ An empty `Array>` instance.
Let's create a new empty sequence.
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{
TensorTrait, // we import the trait
diff --git a/docs/framework/operators/tensor/tensor.sequence_erase.md b/docs/framework/operators/tensor/tensor.sequence_erase.md
index 4c5f94a39..6bce32316 100644
--- a/docs/framework/operators/tensor/tensor.sequence_erase.md
+++ b/docs/framework/operators/tensor/tensor.sequence_erase.md
@@ -23,7 +23,7 @@ The tensor sequence `Array>` with the erased tensor at the specified p
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor, I32Tensor};
use orion::numbers::{i32, IntegerTrait};
diff --git a/docs/framework/operators/tensor/tensor.shrink.md b/docs/framework/operators/tensor/tensor.shrink.md
index 37c2e74aa..45bc26b1b 100644
--- a/docs/framework/operators/tensor/tensor.shrink.md
+++ b/docs/framework/operators/tensor/tensor.shrink.md
@@ -22,7 +22,7 @@ Constrain input and output types to fixed point numbers.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FixedTrait, FP8x23};
diff --git a/docs/framework/operators/tensor/tensor.sign.md b/docs/framework/operators/tensor/tensor.sign.md
index 8acb8c730..1829b9264 100644
--- a/docs/framework/operators/tensor/tensor.sign.md
+++ b/docs/framework/operators/tensor/tensor.sign.md
@@ -18,7 +18,7 @@ A new `Tensor` of the same shape as the input tensor with The sign of the inp
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
diff --git a/docs/framework/operators/tensor/tensor.sin.md b/docs/framework/operators/tensor/tensor.sin.md
index 3c7e0da17..e315ac8b3 100644
--- a/docs/framework/operators/tensor/tensor.sin.md
+++ b/docs/framework/operators/tensor/tensor.sin.md
@@ -23,7 +23,7 @@ Constrain input and output types to fixed point tensors.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FP8x23, FixedTrait};
diff --git a/docs/framework/operators/tensor/tensor.sinh.md b/docs/framework/operators/tensor/tensor.sinh.md
index c89f40ecb..a593594ed 100644
--- a/docs/framework/operators/tensor/tensor.sinh.md
+++ b/docs/framework/operators/tensor/tensor.sinh.md
@@ -24,7 +24,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FixedTrait, FP8x23};
diff --git a/docs/framework/operators/tensor/tensor.slice.md b/docs/framework/operators/tensor/tensor.slice.md
index 0f8782d6e..50a8ecaab 100644
--- a/docs/framework/operators/tensor/tensor.slice.md
+++ b/docs/framework/operators/tensor/tensor.slice.md
@@ -27,7 +27,7 @@ A new `Tensor` slice of the input tensor.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.sqrt.md b/docs/framework/operators/tensor/tensor.sqrt.md
index 918913f0b..dd45c9b83 100644
--- a/docs/framework/operators/tensor/tensor.sqrt.md
+++ b/docs/framework/operators/tensor/tensor.sqrt.md
@@ -23,7 +23,7 @@ Constrain input and output types to fixed point tensors.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FixedTrait, FP8x23};
diff --git a/docs/framework/operators/tensor/tensor.squeeze.md b/docs/framework/operators/tensor/tensor.squeeze.md
index 28076d678..d251cbb7d 100644
--- a/docs/framework/operators/tensor/tensor.squeeze.md
+++ b/docs/framework/operators/tensor/tensor.squeeze.md
@@ -18,7 +18,7 @@ A new `Tensor` Reshaped tensor with same data as input.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.stride.md b/docs/framework/operators/tensor/tensor.stride.md
index 75313770a..7085835fd 100644
--- a/docs/framework/operators/tensor/tensor.stride.md
+++ b/docs/framework/operators/tensor/tensor.stride.md
@@ -16,7 +16,7 @@ A span of usize representing the stride for each dimension of the tensor.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.tanh.md b/docs/framework/operators/tensor/tensor.tanh.md
index 9ffec7780..69090698e 100644
--- a/docs/framework/operators/tensor/tensor.tanh.md
+++ b/docs/framework/operators/tensor/tensor.tanh.md
@@ -24,7 +24,7 @@ Constrain input and output types to fixed point tensors.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
use orion::numbers::{FixedTrait, FP8x23};
diff --git a/docs/framework/operators/tensor/tensor.transpose.md b/docs/framework/operators/tensor/tensor.transpose.md
index 0dee6e2fb..adbdbf63a 100644
--- a/docs/framework/operators/tensor/tensor.transpose.md
+++ b/docs/framework/operators/tensor/tensor.transpose.md
@@ -22,7 +22,7 @@ A `Tensor` instance with the axes reordered according to the given permutatio
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.trilu.md b/docs/framework/operators/tensor/tensor.trilu.md
index afc504691..a3e3a3088 100644
--- a/docs/framework/operators/tensor/tensor.trilu.md
+++ b/docs/framework/operators/tensor/tensor.trilu.md
@@ -23,7 +23,7 @@ A `Tensor` instance with the uppper/lower triangular part of the tensor.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.unravel_index.md b/docs/framework/operators/tensor/tensor.unravel_index.md
index 6221a5bd7..388bc2614 100644
--- a/docs/framework/operators/tensor/tensor.unravel_index.md
+++ b/docs/framework/operators/tensor/tensor.unravel_index.md
@@ -22,7 +22,7 @@ The unraveled indices corresponding to the given index.
## Examples
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.unsqueeze.md b/docs/framework/operators/tensor/tensor.unsqueeze.md
index ba73743c0..22ef500ed 100644
--- a/docs/framework/operators/tensor/tensor.unsqueeze.md
+++ b/docs/framework/operators/tensor/tensor.unsqueeze.md
@@ -25,7 +25,7 @@ Reshaped `Tensor` with same data as input.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.where.md b/docs/framework/operators/tensor/tensor.where.md
index 187f12ecb..7151ce506 100644
--- a/docs/framework/operators/tensor/tensor.where.md
+++ b/docs/framework/operators/tensor/tensor.where.md
@@ -25,7 +25,7 @@ chosen from x or y depending on the condition.
## Example
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/framework/operators/tensor/tensor.xor.md b/docs/framework/operators/tensor/tensor.xor.md
index 53d89b24e..89e4c41ea 100644
--- a/docs/framework/operators/tensor/tensor.xor.md
+++ b/docs/framework/operators/tensor/tensor.xor.md
@@ -27,7 +27,7 @@ A new `Tensor` of booleans (0 or 1) with the same shape as the broadcaste
Case 1: Compare tensors with same shape
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
@@ -48,7 +48,7 @@ fn xor_example() -> Tensor {
Case 2: Compare tensors with different shapes
```rust
-use array::{ArrayTrait, SpanTrait};
+use core::array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
diff --git a/docs/resources/tutorials/mnist-classification-with-feedforward-neural-network.md b/docs/resources/tutorials/mnist-classification-with-feedforward-neural-network.md
index a58436ff4..62c5603fe 100644
--- a/docs/resources/tutorials/mnist-classification-with-feedforward-neural-network.md
+++ b/docs/resources/tutorials/mnist-classification-with-feedforward-neural-network.md
@@ -390,7 +390,7 @@ os.makedirs('src/generated', exist_ok=True)
for tensor_name, tensor in tensors.items():
with open(os.path.join('src', 'generated', f"{tensor_name}.cairo"), "w") as f:
f.write(
- "use array::ArrayTrait;\n" +
+ "use core::array::ArrayTrait;\n" +
"use orion::operators::tensor::core::{TensorTrait, Tensor, ExtraParams};\n" +
"use orion::operators::tensor::implementations::impl_tensor_i32::Tensor_i32;\n" +
"use orion::numbers::fixed_point::core::FixedImpl;\n" +
@@ -432,7 +432,7 @@ We have just created a file called `lib.cairo`, which contains a module declarat
Here is a file we generated: `fc1_bias.cairo`
```rust
-use array::ArrayTrait;
+use core::array::ArrayTrait;
use orion::operators::tensor::core::{TensorTrait, Tensor, ExtraParams};
use orion::operators::tensor::implementations::impl_tensor_i32::Tensor_i32;
use orion::numbers::fixed_point::core::FixedImpl;
diff --git a/nodegen/helpers.py b/nodegen/helpers.py
index a5b84efb3..041f480bd 100644
--- a/nodegen/helpers.py
+++ b/nodegen/helpers.py
@@ -186,7 +186,7 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]:
trait_to_ref = {
Trait.TENSOR: [
- "array::{ArrayTrait, SpanTrait}",
+ "core::array::{ArrayTrait, SpanTrait}",
"orion::operators::tensor::{TensorTrait, Tensor}",
],
Trait.NN: [
diff --git a/src/numbers.cairo b/src/numbers.cairo
index ff9bdf7b4..9b5d3ef8d 100644
--- a/src/numbers.cairo
+++ b/src/numbers.cairo
@@ -281,7 +281,7 @@ impl FP8x23Number of NumberTrait {
fn bitwise_or(lhs: FP8x23, rhs: FP8x23) -> FP8x23 {
comp_fp8x23::bitwise_or(lhs, rhs)
}
-
+
fn add(lhs: FP8x23, rhs: FP8x23) -> FP8x23 {
FP8x23Add::add(lhs, rhs)
}
@@ -508,7 +508,7 @@ impl FP8x23WNumber of NumberTrait {
fn bitwise_or(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W {
comp_fp8x23wide::bitwise_or(lhs, rhs)
}
-
+
fn add(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W {
FP8x23WAdd::add(lhs, rhs)
}
@@ -735,7 +735,7 @@ impl FP16x16Number of NumberTrait {
fn bitwise_or(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
comp_fp16x16::bitwise_or(lhs, rhs)
}
-
+
fn add(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
FP16x16Add::add(lhs, rhs)
}
@@ -962,7 +962,7 @@ impl FP16x16WNumber of NumberTrait {
fn bitwise_or(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
comp_fp16x16wide::bitwise_or(lhs, rhs)
}
-
+
fn add(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
FP16x16WAdd::add(lhs, rhs)
}
@@ -975,7 +975,7 @@ impl FP16x16WNumber of NumberTrait {
use orion::numbers::fixed_point::implementations::fp64x64::core::{
FP64x64Impl, FP64x64, FP64x64Add, FP64x64Sub
};
-use orion::numbers::fixed_point::implementations::fp64x64::core as core_fp64x64;
+use orion::numbers::fixed_point::implementations::fp64x64::{core as core_fp64x64};
use orion::numbers::fixed_point::implementations::fp64x64::comp as comp_fp64x64;
use cubit::f128 as fp64x64;
@@ -1084,7 +1084,7 @@ impl FP64x64Number of NumberTrait {
FP64x64Impl::ZERO()
}
fn is_zero(self: FP64x64) -> bool {
- fp64x64::core::eq(@self, @FP64x64Impl::ZERO())
+ fp64x64::ops::eq(@self, @FP64x64Impl::ZERO())
}
fn half() -> FP64x64 {
@@ -1104,11 +1104,11 @@ impl FP64x64Number of NumberTrait {
}
fn abs(self: FP64x64) -> FP64x64 {
- fp64x64::core::abs(self)
+ fp64x64::ops::abs(self)
}
fn neg(self: FP64x64) -> FP64x64 {
- fp64x64::core::neg(self)
+ fp64x64::ops::neg(self)
}
fn min_value() -> FP64x64 {
@@ -1190,7 +1190,7 @@ impl FP64x64Number of NumberTrait {
fn bitwise_or(lhs: FP64x64, rhs: FP64x64) -> FP64x64 {
comp_fp64x64::bitwise_or(lhs, rhs)
}
-
+
fn add(lhs: FP64x64, rhs: FP64x64) -> FP64x64 {
FP64x64Add::add(lhs, rhs)
}
@@ -1312,7 +1312,7 @@ impl FP32x32Number of NumberTrait {
FP32x32Impl::ZERO()
}
fn is_zero(self: FP32x32) -> bool {
- fp32x32::core::eq(@self, @FP32x32Impl::ZERO())
+ fp32x32::ops::eq(@self, @FP32x32Impl::ZERO())
}
fn half() -> FP32x32 {
@@ -1332,11 +1332,11 @@ impl FP32x32Number of NumberTrait {
}
fn abs(self: FP32x32) -> FP32x32 {
- fp32x32::core::abs(self)
+ fp32x32::ops::abs(self)
}
fn neg(self: FP32x32) -> FP32x32 {
- fp32x32::core::neg(self)
+ fp32x32::ops::neg(self)
}
fn min_value() -> FP32x32 {
@@ -1418,7 +1418,7 @@ impl FP32x32Number of NumberTrait {
fn bitwise_or(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
comp_fp32x32::bitwise_or(lhs, rhs)
}
-
+
fn add(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
FP32x32Add::add(lhs, rhs)
}
@@ -1659,7 +1659,7 @@ impl I8Number of NumberTrait {
fn bitwise_or(lhs: i8, rhs: i8) -> i8 {
i8_core::i8_bitwise_or(lhs, rhs)
}
-
+
fn add(lhs: i8, rhs: i8) -> i8 {
i8Add::add(lhs, rhs)
}
@@ -1900,7 +1900,7 @@ impl i16Number of NumberTrait {
fn bitwise_or(lhs: i16, rhs: i16) -> i16 {
i16_core::i16_bitwise_or(lhs, rhs)
}
-
+
fn add(lhs: i16, rhs: i16) -> i16 {
i16Add::add(lhs, rhs)
}
@@ -2141,7 +2141,7 @@ impl i32Number of NumberTrait {
fn bitwise_or(lhs: i32, rhs: i32) -> i32 {
i32_core::i32_bitwise_or(lhs, rhs)
}
-
+
fn add(lhs: i32, rhs: i32) -> i32 {
i32Add::add(lhs, rhs)
}
@@ -2382,7 +2382,7 @@ impl i64Number of NumberTrait {
fn bitwise_or(lhs: i64, rhs: i64) -> i64 {
i64_core::i64_bitwise_or(lhs, rhs)
}
-
+
fn add(lhs: i64, rhs: i64) -> i64 {
i64Add::add(lhs, rhs)
}
@@ -2624,7 +2624,7 @@ impl i128Number of NumberTrait {
fn bitwise_or(lhs: i128, rhs: i128) -> i128 {
i128_core::i128_bitwise_or(lhs, rhs)
}
-
+
fn add(lhs: i128, rhs: i128) -> i128 {
i128Add::add(lhs, rhs)
}
@@ -2870,7 +2870,7 @@ impl u32Number of NumberTrait {
fn bitwise_or(lhs: u32, rhs: u32) -> u32 {
lhs | rhs
}
-
+
fn add(lhs: u32, rhs: u32) -> u32 {
lhs + rhs
}
diff --git a/src/numbers/complex_number/complex64.cairo b/src/numbers/complex_number/complex64.cairo
index 026fcd58b..5b588fb9d 100644
--- a/src/numbers/complex_number/complex64.cairo
+++ b/src/numbers/complex_number/complex64.cairo
@@ -1,4 +1,4 @@
-use debug::PrintTrait;
+use core::debug::PrintTrait;
use orion::numbers::complex_number::complex_trait::ComplexTrait;
use orion::numbers::{FP64x64, FP64x64Impl, FP32x32, FP32x32Impl, FixedTrait};
diff --git a/src/numbers/fixed_point/implementations/fp16x16/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/core.cairo
index 02a4f9db8..a4a99bb82 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/core.cairo
@@ -1,12 +1,12 @@
-use debug::PrintTrait;
+use core::debug::PrintTrait;
-use option::OptionTrait;
-use result::{ResultTrait, ResultTraitImpl};
-use traits::{TryInto, Into};
+use core::option::OptionTrait;
+use core::result::{ResultTrait, ResultTraitImpl};
+use core::traits::{TryInto, Into};
use orion::numbers::signed_integer::{i32::i32, i8::i8};
use orion::numbers::fixed_point::core::FixedTrait;
-use orion::numbers::fixed_point::implementations::fp16x16::math::{core, trig, hyp};
+use orion::numbers::fixed_point::implementations::fp16x16::math::{core as core_math, trig, hyp};
use orion::numbers::fixed_point::utils;
/// A struct representing a fixed point number.
@@ -50,12 +50,12 @@ impl FP16x16Impl of FixedTrait {
}
fn from_felt(val: felt252) -> FP16x16 {
- let mag = integer::u32_try_from_felt252(utils::felt_abs(val)).unwrap();
+ let mag = core::integer::u32_try_from_felt252(utils::felt_abs(val)).unwrap();
return FixedTrait::new(mag, utils::felt_sign(val));
}
fn abs(self: FP16x16) -> FP16x16 {
- return core::abs(self);
+ return core_math::abs(self);
}
fn acos(self: FP16x16) -> FP16x16 {
@@ -95,7 +95,7 @@ impl FP16x16Impl of FixedTrait {
}
fn ceil(self: FP16x16) -> FP16x16 {
- return core::ceil(self);
+ return core_math::ceil(self);
}
fn cos(self: FP16x16) -> FP16x16 {
@@ -111,46 +111,46 @@ impl FP16x16Impl of FixedTrait {
}
fn floor(self: FP16x16) -> FP16x16 {
- return core::floor(self);
+ return core_math::floor(self);
}
// Calculates the natural exponent of x: e^x
fn exp(self: FP16x16) -> FP16x16 {
- return core::exp(self);
+ return core_math::exp(self);
}
// Calculates the binary exponent of x: 2^x
fn exp2(self: FP16x16) -> FP16x16 {
- return core::exp2(self);
+ return core_math::exp2(self);
}
// Calculates the natural logarithm of x: ln(x)
// self must be greater than zero
fn ln(self: FP16x16) -> FP16x16 {
- return core::ln(self);
+ return core_math::ln(self);
}
// Calculates the binary logarithm of x: log2(x)
// self must be greather than zero
fn log2(self: FP16x16) -> FP16x16 {
- return core::log2(self);
+ return core_math::log2(self);
}
// Calculates the base 10 log of x: log10(x)
// self must be greater than zero
fn log10(self: FP16x16) -> FP16x16 {
- return core::log10(self);
+ return core_math::log10(self);
}
// Calclates the value of x^y and checks for overflow before returning
// self is a fixed point value
// b is a fixed point value
fn pow(self: FP16x16, b: FP16x16) -> FP16x16 {
- return core::pow(self, b);
+ return core_math::pow(self, b);
}
fn round(self: FP16x16) -> FP16x16 {
- return core::round(self);
+ return core_math::round(self);
}
fn sin(self: FP16x16) -> FP16x16 {
@@ -168,7 +168,7 @@ impl FP16x16Impl of FixedTrait {
// Calculates the square root of a fixed point value
// x must be positive
fn sqrt(self: FP16x16) -> FP16x16 {
- return core::sqrt(self);
+ return core_math::sqrt(self);
}
fn tan(self: FP16x16) -> FP16x16 {
@@ -184,7 +184,7 @@ impl FP16x16Impl of FixedTrait {
}
fn sign(self: FP16x16) -> FP16x16 {
- return core::sign(self);
+ return core_math::sign(self);
}
fn NaN() -> FP16x16 {
@@ -212,11 +212,11 @@ impl FP16x16Impl of FixedTrait {
}
fn is_pos_inf(self: FP16x16) -> bool {
- self.is_inf() && !self.sign
+ self.is_inf() && !self.sign
}
fn is_neg_inf(self: FP16x16) -> bool {
- self.is_inf() && self.sign
+ self.is_inf() && self.sign
}
}
@@ -312,18 +312,18 @@ impl FP16x16TryIntoU8 of TryInto {
impl FP16x16PartialEq of PartialEq {
#[inline(always)]
fn eq(lhs: @FP16x16, rhs: @FP16x16) -> bool {
- return core::eq(lhs, rhs);
+ return core_math::eq(lhs, rhs);
}
#[inline(always)]
fn ne(lhs: @FP16x16, rhs: @FP16x16) -> bool {
- return core::ne(lhs, rhs);
+ return core_math::ne(lhs, rhs);
}
}
impl FP16x16Add of Add {
fn add(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
- return core::add(lhs, rhs);
+ return core_math::add(lhs, rhs);
}
}
@@ -336,7 +336,7 @@ impl FP16x16AddEq of AddEq {
impl FP16x16Sub of Sub {
fn sub(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
- return core::sub(lhs, rhs);
+ return core_math::sub(lhs, rhs);
}
}
@@ -349,7 +349,7 @@ impl FP16x16SubEq of SubEq {
impl FP16x16Mul of Mul {
fn mul(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
- return core::mul(lhs, rhs);
+ return core_math::mul(lhs, rhs);
}
}
@@ -362,7 +362,7 @@ impl FP16x16MulEq of MulEq {
impl FP16x16Div of Div {
fn div(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
- return core::div(lhs, rhs);
+ return core_math::div(lhs, rhs);
}
}
@@ -376,36 +376,36 @@ impl FP16x16DivEq of DivEq {
impl FP16x16PartialOrd of PartialOrd {
#[inline(always)]
fn ge(lhs: FP16x16, rhs: FP16x16) -> bool {
- return core::ge(lhs, rhs);
+ return core_math::ge(lhs, rhs);
}
#[inline(always)]
fn gt(lhs: FP16x16, rhs: FP16x16) -> bool {
- return core::gt(lhs, rhs);
+ return core_math::gt(lhs, rhs);
}
#[inline(always)]
fn le(lhs: FP16x16, rhs: FP16x16) -> bool {
- return core::le(lhs, rhs);
+ return core_math::le(lhs, rhs);
}
#[inline(always)]
fn lt(lhs: FP16x16, rhs: FP16x16) -> bool {
- return core::lt(lhs, rhs);
+ return core_math::lt(lhs, rhs);
}
}
impl FP16x16Neg of Neg {
#[inline(always)]
fn neg(a: FP16x16) -> FP16x16 {
- return core::neg(a);
+ return core_math::neg(a);
}
}
impl FP16x16Rem of Rem {
#[inline(always)]
fn rem(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
- return core::rem(lhs, rhs);
+ return core_math::rem(lhs, rhs);
}
}
diff --git a/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo b/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo
index c398b3911..03e0f49fb 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo
@@ -1,5 +1,5 @@
-use debug::PrintTrait;
-use traits::Into;
+use core::debug::PrintTrait;
+use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp16x16::core::{
HALF, ONE, TWO, FP16x16, FP16x16Impl, FP16x16Sub, FP16x16Div, FixedTrait, FP16x16Print
diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo
index 458a6e9b2..ddf153f18 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/math/comp.cairo
@@ -122,11 +122,11 @@ mod tests {
fn test_bitwise_xor() {
let a = FixedTrait::new(225280, false); // 3.4375
let b = FixedTrait::new(4160843776, true); // -2046.5625
- let c = FixedTrait::new(4160880640, true);
+ let c = FixedTrait::new(4160880640, true);
assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)')
}
-
+
fn test_bitwise_or() {
let a = FixedTrait::new(225280, false); // 3.4375
let b = FixedTrait::new(4160843776, true); // -2046.5625
diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo
index 1f35354aa..6cb9bebfe 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo
@@ -1,8 +1,9 @@
use core::debug::PrintTrait;
-use option::OptionTrait;
-use result::{ResultTrait, ResultTraitImpl};
-use traits::{Into, TryInto};
-use integer::{u32_safe_divmod, u32_as_non_zero, u32_wide_mul};
+use core::option::OptionTrait;
+use core::result::{ResultTrait, ResultTraitImpl};
+use core::traits::{Into, TryInto};
+use core::integer;
+use core::integer::{u32_safe_divmod, u32_as_non_zero, u32_wide_mul};
use orion::numbers::fixed_point::implementations::fp16x16::core::{
HALF, ONE, MAX, FP16x16, FP16x16Impl, FP16x16Add, FP16x16AddEq, FP16x16Sub, FP16x16Mul,
diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo
index adee2421f..78d0cdac2 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/math/hyp.cairo
@@ -47,8 +47,8 @@ fn atanh(a: FP16x16) -> FP16x16 {
#[cfg(test)]
mod tests {
- use option::OptionTrait;
- use traits::Into;
+ use core::option::OptionTrait;
+ use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp16x16::helpers::assert_precise;
diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo
index 52394686f..8b0d9b47f 100644
--- a/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16/math/trig.cairo
@@ -1,6 +1,6 @@
-use debug::PrintTrait;
-use integer::{u32_safe_divmod, u32_as_non_zero};
-use option::OptionTrait;
+use core::debug::PrintTrait;
+use core::integer::{u32_safe_divmod, u32_as_non_zero};
+use core::option::OptionTrait;
use orion::numbers::fixed_point::implementations::fp16x16::math::lut;
use orion::numbers::fixed_point::implementations::fp16x16::core::{
@@ -206,7 +206,7 @@ fn _sin_loop(a: FP16x16, i: u32, acc: FP16x16) -> FP16x16 {
#[cfg(test)]
mod tests {
- use traits::Into;
+ use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp16x16::helpers::{
assert_precise, assert_relative
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo
index 7be8ab278..c6703b7d3 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo
@@ -1,12 +1,12 @@
-use debug::PrintTrait;
+use core::debug::PrintTrait;
-use option::OptionTrait;
-use result::{ResultTrait, ResultTraitImpl};
-use traits::{TryInto, Into};
+use core::option::OptionTrait;
+use core::result::{ResultTrait, ResultTraitImpl};
+use core::traits::{TryInto, Into};
use orion::numbers::signed_integer::{i32::i32, i8::i8};
use orion::numbers::{fixed_point::core::FixedTrait, FP16x16};
-use orion::numbers::fixed_point::implementations::fp16x16wide::math::{core, trig, hyp};
+use orion::numbers::fixed_point::implementations::fp16x16wide::math::{core as core_math, trig, hyp};
use orion::numbers::fixed_point::utils;
/// A struct representing a fixed point number.
@@ -50,12 +50,12 @@ impl FP16x16WImpl of FixedTrait {
}
fn from_felt(val: felt252) -> FP16x16W {
- let mag = integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap();
+ let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap();
return FixedTrait::new(mag, utils::felt_sign(val));
}
fn abs(self: FP16x16W) -> FP16x16W {
- return core::abs(self);
+ return core_math::abs(self);
}
fn acos(self: FP16x16W) -> FP16x16W {
@@ -95,7 +95,7 @@ impl FP16x16WImpl of FixedTrait {
}
fn ceil(self: FP16x16W) -> FP16x16W {
- return core::ceil(self);
+ return core_math::ceil(self);
}
fn cos(self: FP16x16W) -> FP16x16W {
@@ -111,46 +111,46 @@ impl FP16x16WImpl of FixedTrait {
}
fn floor(self: FP16x16W) -> FP16x16W {
- return core::floor(self);
+ return core_math::floor(self);
}
// Calculates the natural exponent of x: e^x
fn exp(self: FP16x16W) -> FP16x16W {
- return core::exp(self);
+ return core_math::exp(self);
}
// Calculates the binary exponent of x: 2^x
fn exp2(self: FP16x16W) -> FP16x16W {
- return core::exp2(self);
+ return core_math::exp2(self);
}
// Calculates the natural logarithm of x: ln(x)
// self must be greater than zero
fn ln(self: FP16x16W) -> FP16x16W {
- return core::ln(self);
+ return core_math::ln(self);
}
// Calculates the binary logarithm of x: log2(x)
// self must be greather than zero
fn log2(self: FP16x16W) -> FP16x16W {
- return core::log2(self);
+ return core_math::log2(self);
}
// Calculates the base 10 log of x: log10(x)
// self must be greater than zero
fn log10(self: FP16x16W) -> FP16x16W {
- return core::log10(self);
+ return core_math::log10(self);
}
// Calclates the value of x^y and checks for overflow before returning
// self is a fixed point value
// b is a fixed point value
fn pow(self: FP16x16W, b: FP16x16W) -> FP16x16W {
- return core::pow(self, b);
+ return core_math::pow(self, b);
}
fn round(self: FP16x16W) -> FP16x16W {
- return core::round(self);
+ return core_math::round(self);
}
fn sin(self: FP16x16W) -> FP16x16W {
@@ -168,7 +168,7 @@ impl FP16x16WImpl of FixedTrait {
// Calculates the square root of a fixed point value
// x must be positive
fn sqrt(self: FP16x16W) -> FP16x16W {
- return core::sqrt(self);
+ return core_math::sqrt(self);
}
fn tan(self: FP16x16W) -> FP16x16W {
@@ -184,7 +184,7 @@ impl FP16x16WImpl of FixedTrait {
}
fn sign(self: FP16x16W) -> FP16x16W {
- return core::sign(self);
+ return core_math::sign(self);
}
fn NaN() -> FP16x16W {
@@ -212,11 +212,11 @@ impl FP16x16WImpl of FixedTrait {
}
fn is_pos_inf(self: FP16x16W) -> bool {
- self.is_inf() && !self.sign
+ self.is_inf() && !self.sign
}
fn is_neg_inf(self: FP16x16W) -> bool {
- self.is_inf() && self.sign
+ self.is_inf() && self.sign
}
}
@@ -327,18 +327,18 @@ impl FP16x16WTryIntoU8 of TryInto {
impl FP16x16WPartialEq of PartialEq {
#[inline(always)]
fn eq(lhs: @FP16x16W, rhs: @FP16x16W) -> bool {
- return core::eq(lhs, rhs);
+ return core_math::eq(lhs, rhs);
}
#[inline(always)]
fn ne(lhs: @FP16x16W, rhs: @FP16x16W) -> bool {
- return core::ne(lhs, rhs);
+ return core_math::ne(lhs, rhs);
}
}
impl FP16x16WAdd of Add {
fn add(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
- return core::add(lhs, rhs);
+ return core_math::add(lhs, rhs);
}
}
@@ -351,7 +351,7 @@ impl FP16x16WAddEq of AddEq {
impl FP16x16WSub of Sub {
fn sub(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
- return core::sub(lhs, rhs);
+ return core_math::sub(lhs, rhs);
}
}
@@ -364,7 +364,7 @@ impl FP16x16WSubEq of SubEq {
impl FP16x16WMul of Mul {
fn mul(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
- return core::mul(lhs, rhs);
+ return core_math::mul(lhs, rhs);
}
}
@@ -377,7 +377,7 @@ impl FP16x16WMulEq of MulEq {
impl FP16x16WDiv of Div {
fn div(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
- return core::div(lhs, rhs);
+ return core_math::div(lhs, rhs);
}
}
@@ -391,36 +391,36 @@ impl FP16x16WDivEq of DivEq {
impl FP16x16WPartialOrd of PartialOrd {
#[inline(always)]
fn ge(lhs: FP16x16W, rhs: FP16x16W) -> bool {
- return core::ge(lhs, rhs);
+ return core_math::ge(lhs, rhs);
}
#[inline(always)]
fn gt(lhs: FP16x16W, rhs: FP16x16W) -> bool {
- return core::gt(lhs, rhs);
+ return core_math::gt(lhs, rhs);
}
#[inline(always)]
fn le(lhs: FP16x16W, rhs: FP16x16W) -> bool {
- return core::le(lhs, rhs);
+ return core_math::le(lhs, rhs);
}
#[inline(always)]
fn lt(lhs: FP16x16W, rhs: FP16x16W) -> bool {
- return core::lt(lhs, rhs);
+ return core_math::lt(lhs, rhs);
}
}
impl FP16x16WNeg of Neg {
#[inline(always)]
fn neg(a: FP16x16W) -> FP16x16W {
- return core::neg(a);
+ return core_math::neg(a);
}
}
impl FP16x16WRem of Rem {
#[inline(always)]
fn rem(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
- return core::rem(lhs, rhs);
+ return core_math::rem(lhs, rhs);
}
}
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo
index a36f52be1..d6f50b1b5 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo
@@ -1,5 +1,5 @@
-use debug::PrintTrait;
-use traits::Into;
+use core::debug::PrintTrait;
+use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
HALF, ONE, TWO, FP16x16W, FP16x16WImpl, FP16x16WSub, FP16x16WDiv, FixedTrait, FP16x16WPrint
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo
index dac5b929e..50f93edea 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/comp.cairo
@@ -122,11 +122,11 @@ mod tests {
fn test_bitwise_xor() {
let a = FixedTrait::new(225280, false); // 3.4375
let b = FixedTrait::new(4160843776, true); // -2046.5625
- let c = FixedTrait::new(4160880640, true);
+ let c = FixedTrait::new(4160880640, true);
assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)')
}
-
+
fn test_bitwise_or() {
let a = FixedTrait::new(225280, false); // 3.4375
let b = FixedTrait::new(4160843776, true); // -2046.5625
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo
index 21f8b2cc8..62288a0f1 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo
@@ -1,8 +1,9 @@
use core::debug::PrintTrait;
-use option::OptionTrait;
-use result::{ResultTrait, ResultTraitImpl};
-use traits::{Into, TryInto};
-use integer::{u64_safe_divmod, u64_as_non_zero, u64_wide_mul};
+use core::option::OptionTrait;
+use core::result::{ResultTrait, ResultTraitImpl};
+use core::traits::{Into, TryInto};
+use core::integer;
+use core::integer::{u64_safe_divmod, u64_as_non_zero, u64_wide_mul};
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
HALF, ONE, MAX, FP16x16W, FP16x16WImpl, FP16x16WAdd, FP16x16WAddEq, FP16x16WSub, FP16x16WMul,
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo
index 8ab228449..527b6046d 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/hyp.cairo
@@ -47,8 +47,8 @@ fn atanh(a: FP16x16W) -> FP16x16W {
#[cfg(test)]
mod tests {
- use option::OptionTrait;
- use traits::Into;
+ use core::option::OptionTrait;
+ use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp16x16wide::helpers::assert_precise;
diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo
index 0cfebea64..3c22fd97f 100644
--- a/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo
+++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/trig.cairo
@@ -1,6 +1,6 @@
-use debug::PrintTrait;
-use integer::{u64_safe_divmod, u64_as_non_zero};
-use option::OptionTrait;
+use core::debug::PrintTrait;
+use core::integer::{u64_safe_divmod, u64_as_non_zero};
+use core::option::OptionTrait;
use orion::numbers::fixed_point::implementations::fp16x16wide::math::lut;
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
@@ -206,7 +206,7 @@ fn _sin_loop(a: FP16x16W, i: u64, acc: FP16x16W) -> FP16x16W {
#[cfg(test)]
mod tests {
- use traits::Into;
+ use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp16x16wide::helpers::{
assert_precise, assert_relative
diff --git a/src/numbers/fixed_point/implementations/fp32x32/core.cairo b/src/numbers/fixed_point/implementations/fp32x32/core.cairo
index fca39b6fb..869a8c519 100644
--- a/src/numbers/fixed_point/implementations/fp32x32/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp32x32/core.cairo
@@ -1,8 +1,8 @@
-use debug::PrintTrait;
+use core::debug::PrintTrait;
-use option::OptionTrait;
-use result::{ResultTrait, ResultTraitImpl};
-use traits::{TryInto, Into};
+use core::option::OptionTrait;
+use core::result::{ResultTrait, ResultTraitImpl};
+use core::traits::{TryInto, Into};
use cubit::f64 as fp32x32;
use cubit::f64::Fixed as FP32x32;
@@ -41,12 +41,12 @@ impl FP32x32Impl of FixedTrait {
}
fn from_felt(val: felt252) -> FP32x32 {
- let mag = integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap();
+ let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap();
return FixedTrait::new(mag, utils::felt_sign(val));
}
fn abs(self: FP32x32) -> FP32x32 {
- return fp32x32::core::abs(self);
+ return fp32x32::ops::abs(self);
}
fn acos(self: FP32x32) -> FP32x32 {
@@ -86,7 +86,7 @@ impl FP32x32Impl of FixedTrait {
}
fn ceil(self: FP32x32) -> FP32x32 {
- return fp32x32::core::ceil(self);
+ return fp32x32::ops::ceil(self);
}
fn cos(self: FP32x32) -> FP32x32 {
@@ -102,46 +102,46 @@ impl FP32x32Impl of FixedTrait {
}
fn floor(self: FP32x32) -> FP32x32 {
- return fp32x32::core::floor(self);
+ return fp32x32::ops::floor(self);
}
// Calculates the natural exponent of x: e^x
fn exp(self: FP32x32) -> FP32x32 {
- return fp32x32::core::exp(self);
+ return fp32x32::ops::exp(self);
}
// Calculates the binary exponent of x: 2^x
fn exp2(self: FP32x32) -> FP32x32 {
- return fp32x32::core::exp2(self);
+ return fp32x32::ops::exp2(self);
}
// Calculates the natural logarithm of x: ln(x)
// self must be greater than zero
fn ln(self: FP32x32) -> FP32x32 {
- return fp32x32::core::ln(self);
+ return fp32x32::ops::ln(self);
}
// Calculates the binary logarithm of x: log2(x)
// self must be greather than zero
fn log2(self: FP32x32) -> FP32x32 {
- return fp32x32::core::log2(self);
+ return fp32x32::ops::log2(self);
}
// Calculates the base 10 log of x: log10(x)
// self must be greater than zero
fn log10(self: FP32x32) -> FP32x32 {
- return fp32x32::core::log10(self);
+ return fp32x32::ops::log10(self);
}
// Calclates the value of x^y and checks for overflow before returning
// self is a fixed point value
// b is a fixed point value
fn pow(self: FP32x32, b: FP32x32) -> FP32x32 {
- return fp32x32::core::pow(self, b);
+ return fp32x32::ops::pow(self, b);
}
fn round(self: FP32x32) -> FP32x32 {
- return fp32x32::core::round(self);
+ return fp32x32::ops::round(self);
}
fn sin(self: FP32x32) -> FP32x32 {
@@ -159,7 +159,7 @@ impl FP32x32Impl of FixedTrait {
// Calculates the square root of a fixed point value
// x must be positive
fn sqrt(self: FP32x32) -> FP32x32 {
- return fp32x32::core::sqrt(self);
+ return fp32x32::ops::sqrt(self);
}
fn tan(self: FP32x32) -> FP32x32 {
@@ -203,11 +203,11 @@ impl FP32x32Impl of FixedTrait {
}
fn is_pos_inf(self: FP32x32) -> bool {
- self.is_inf() && !self.sign
+ self.is_inf() && !self.sign
}
fn is_neg_inf(self: FP32x32) -> bool {
- self.is_inf() && self.sign
+ self.is_inf() && self.sign
}
}
@@ -285,100 +285,100 @@ impl FP32x32TryIntoI8 of TryInto {
// impl FP32x32PartialEq of PartialEq {
// #[inline(always)]
// fn eq(lhs: @FP32x32, rhs: @FP32x32) -> bool {
-// return fp32x32::core::eq(lhs, rhs);
+// return fp32x32::ops::eq(lhs, rhs);
// }
// #[inline(always)]
// fn ne(lhs: @FP32x32, rhs: @FP32x32) -> bool {
-// return fp32x32::core::ne(lhs, rhs);
+// return fp32x32::ops::ne(lhs, rhs);
// }
// }
impl FP32x32Add of Add {
fn add(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
- return fp32x32::core::add(lhs, rhs);
+ return fp32x32::ops::add(lhs, rhs);
}
}
impl FP32x32AddEq of AddEq {
#[inline(always)]
fn add_eq(ref self: FP32x32, other: FP32x32) {
- self = fp32x32::core::add(self, other);
+ self = fp32x32::ops::add(self, other);
}
}
impl FP32x32Sub of Sub {
fn sub(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
- return fp32x32::core::sub(lhs, rhs);
+ return fp32x32::ops::sub(lhs, rhs);
}
}
impl FP32x32SubEq of SubEq {
#[inline(always)]
fn sub_eq(ref self: FP32x32, other: FP32x32) {
- self = fp32x32::core::sub(self, other);
+ self = fp32x32::ops::sub(self, other);
}
}
impl FP32x32Mul of Mul {
fn mul(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
- return fp32x32::core::mul(lhs, rhs);
+ return fp32x32::ops::mul(lhs, rhs);
}
}
impl FP32x32MulEq of MulEq {
#[inline(always)]
fn mul_eq(ref self: FP32x32, other: FP32x32) {
- self = fp32x32::core::mul(self, other);
+ self = fp32x32::ops::mul(self, other);
}
}
impl FP32x32Div of Div {
fn div(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
- return fp32x32::core::div(lhs, rhs);
+ return fp32x32::ops::div(lhs, rhs);
}
}
impl FP32x32DivEq of DivEq {
#[inline(always)]
fn div_eq(ref self: FP32x32, other: FP32x32) {
- self = fp32x32::core::div(self, other);
+ self = fp32x32::ops::div(self, other);
}
}
impl FP32x32PartialOrd of PartialOrd {
#[inline(always)]
fn ge(lhs: FP32x32, rhs: FP32x32) -> bool {
- return fp32x32::core::ge(lhs, rhs);
+ return fp32x32::ops::ge(lhs, rhs);
}
#[inline(always)]
fn gt(lhs: FP32x32, rhs: FP32x32) -> bool {
- return fp32x32::core::gt(lhs, rhs);
+ return fp32x32::ops::gt(lhs, rhs);
}
#[inline(always)]
fn le(lhs: FP32x32, rhs: FP32x32) -> bool {
- return fp32x32::core::le(lhs, rhs);
+ return fp32x32::ops::le(lhs, rhs);
}
#[inline(always)]
fn lt(lhs: FP32x32, rhs: FP32x32) -> bool {
- return fp32x32::core::lt(lhs, rhs);
+ return fp32x32::ops::lt(lhs, rhs);
}
}
impl FP32x32Neg of Neg {
#[inline(always)]
fn neg(a: FP32x32) -> FP32x32 {
- return fp32x32::core::neg(a);
+ return fp32x32::ops::neg(a);
}
}
impl FP32x32Rem of Rem {
#[inline(always)]
fn rem(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
- return fp32x32::core::rem(lhs, rhs);
+ return fp32x32::ops::rem(lhs, rhs);
}
}
diff --git a/src/numbers/fixed_point/implementations/fp64x64/core.cairo b/src/numbers/fixed_point/implementations/fp64x64/core.cairo
index bb674e03c..6cfba5423 100644
--- a/src/numbers/fixed_point/implementations/fp64x64/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp64x64/core.cairo
@@ -1,13 +1,13 @@
-use debug::PrintTrait;
+use core::debug::PrintTrait;
-use option::OptionTrait;
-use result::{ResultTrait, ResultTraitImpl};
-use traits::{TryInto, Into};
+use core::option::OptionTrait;
+use core::result::{ResultTrait, ResultTraitImpl};
+use core::traits::{TryInto, Into};
use cubit::f128 as fp64x64;
use cubit::f128::types::Fixed as FP64x64;
use cubit::f128::ONE_u128 as ONE;
-use cubit::f128::core::MAX_u128 as MAX;
+use cubit::f128::ops::MAX_u128 as MAX;
use orion::numbers::fixed_point::core::{FixedTrait};
use orion::numbers::fixed_point::utils;
@@ -41,12 +41,12 @@ impl FP64x64Impl of FixedTrait {
}
fn from_felt(val: felt252) -> FP64x64 {
- let mag = integer::u128_try_from_felt252(utils::felt_abs(val)).unwrap();
+ let mag = core::integer::u128_try_from_felt252(utils::felt_abs(val)).unwrap();
return FixedTrait::new(mag, utils::felt_sign(val));
}
fn abs(self: FP64x64) -> FP64x64 {
- return fp64x64::core::abs(self);
+ return fp64x64::ops::abs(self);
}
fn acos(self: FP64x64) -> FP64x64 {
@@ -86,7 +86,7 @@ impl FP64x64Impl of FixedTrait {
}
fn ceil(self: FP64x64) -> FP64x64 {
- return fp64x64::core::ceil(self);
+ return fp64x64::ops::ceil(self);
}
fn cos(self: FP64x64) -> FP64x64 {
@@ -102,46 +102,46 @@ impl FP64x64Impl of FixedTrait {
}
fn floor(self: FP64x64) -> FP64x64 {
- return fp64x64::core::floor(self);
+ return fp64x64::ops::floor(self);
}
// Calculates the natural exponent of x: e^x
fn exp(self: FP64x64) -> FP64x64 {
- return fp64x64::core::exp(self);
+ return fp64x64::ops::exp(self);
}
// Calculates the binary exponent of x: 2^x
fn exp2(self: FP64x64) -> FP64x64 {
- return fp64x64::core::exp2(self);
+ return fp64x64::ops::exp2(self);
}
// Calculates the natural logarithm of x: ln(x)
// self must be greater than zero
fn ln(self: FP64x64) -> FP64x64 {
- return fp64x64::core::ln(self);
+ return fp64x64::ops::ln(self);
}
// Calculates the binary logarithm of x: log2(x)
// self must be greather than zero
fn log2(self: FP64x64) -> FP64x64 {
- return fp64x64::core::log2(self);
+ return fp64x64::ops::log2(self);
}
// Calculates the base 10 log of x: log10(x)
// self must be greater than zero
fn log10(self: FP64x64) -> FP64x64 {
- return fp64x64::core::log10(self);
+ return fp64x64::ops::log10(self);
}
// Calclates the value of x^y and checks for overflow before returning
// self is a fixed point value
// b is a fixed point value
fn pow(self: FP64x64, b: FP64x64) -> FP64x64 {
- return fp64x64::core::pow(self, b);
+ return fp64x64::ops::pow(self, b);
}
fn round(self: FP64x64) -> FP64x64 {
- return fp64x64::core::round(self);
+ return fp64x64::ops::round(self);
}
fn sin(self: FP64x64) -> FP64x64 {
@@ -159,7 +159,7 @@ impl FP64x64Impl of FixedTrait {
// Calculates the square root of a fixed point value
// x must be positive
fn sqrt(self: FP64x64) -> FP64x64 {
- return fp64x64::core::sqrt(self);
+ return fp64x64::ops::sqrt(self);
}
fn tan(self: FP64x64) -> FP64x64 {
@@ -203,11 +203,11 @@ impl FP64x64Impl of FixedTrait {
}
fn is_pos_inf(self: FP64x64) -> bool {
- self.is_inf() && !self.sign
+ self.is_inf() && !self.sign
}
fn is_neg_inf(self: FP64x64) -> bool {
- self.is_inf() && self.sign
+ self.is_inf() && self.sign
}
}
@@ -285,100 +285,100 @@ impl FP64x64TryIntoI8 of TryInto {
// impl FP64x64PartialEq of PartialEq {
// #[inline(always)]
// fn eq(lhs: @FP64x64, rhs: @FP64x64) -> bool {
-// return fp64x64::core::eq(lhs, rhs);
+// return fp64x64::ops::eq(lhs, rhs);
// }
// #[inline(always)]
// fn ne(lhs: @FP64x64, rhs: @FP64x64) -> bool {
-// return fp64x64::core::ne(lhs, rhs);
+// return fp64x64::ops::ne(lhs, rhs);
// }
// }
impl FP64x64Add of Add {
fn add(lhs: FP64x64, rhs: FP64x64) -> FP64x64 {
- return fp64x64::core::add(lhs, rhs);
+ return fp64x64::ops::add(lhs, rhs);
}
}
impl FP64x64AddEq of AddEq {
#[inline(always)]
fn add_eq(ref self: FP64x64, other: FP64x64) {
- self = fp64x64::core::add(self, other);
+ self = fp64x64::ops::add(self, other);
}
}
impl FP64x64Sub of Sub {
fn sub(lhs: FP64x64, rhs: FP64x64) -> FP64x64 {
- return fp64x64::core::sub(lhs, rhs);
+ return fp64x64::ops::sub(lhs, rhs);
}
}
impl FP64x64SubEq of SubEq {
#[inline(always)]
fn sub_eq(ref self: FP64x64, other: FP64x64) {
- self = fp64x64::core::sub(self, other);
+ self = fp64x64::ops::sub(self, other);
}
}
impl FP64x64Mul of Mul {
fn mul(lhs: FP64x64, rhs: FP64x64) -> FP64x64 {
- return fp64x64::core::mul(lhs, rhs);
+ return fp64x64::ops::mul(lhs, rhs);
}
}
impl FP64x64MulEq of MulEq {
#[inline(always)]
fn mul_eq(ref self: FP64x64, other: FP64x64) {
- self = fp64x64::core::mul(self, other);
+ self = fp64x64::ops::mul(self, other);
}
}
impl FP64x64Div of Div {
fn div(lhs: FP64x64, rhs: FP64x64) -> FP64x64 {
- return fp64x64::core::div(lhs, rhs);
+ return fp64x64::ops::div(lhs, rhs);
}
}
impl FP64x64DivEq of DivEq {
#[inline(always)]
fn div_eq(ref self: FP64x64, other: FP64x64) {
- self = fp64x64::core::div(self, other);
+ self = fp64x64::ops::div(self, other);
}
}
impl FP64x64PartialOrd of PartialOrd {
#[inline(always)]
fn ge(lhs: FP64x64, rhs: FP64x64) -> bool {
- return fp64x64::core::ge(lhs, rhs);
+ return fp64x64::ops::ge(lhs, rhs);
}
#[inline(always)]
fn gt(lhs: FP64x64, rhs: FP64x64) -> bool {
- return fp64x64::core::gt(lhs, rhs);
+ return fp64x64::ops::gt(lhs, rhs);
}
#[inline(always)]
fn le(lhs: FP64x64, rhs: FP64x64) -> bool {
- return fp64x64::core::le(lhs, rhs);
+ return fp64x64::ops::le(lhs, rhs);
}
#[inline(always)]
fn lt(lhs: FP64x64, rhs: FP64x64) -> bool {
- return fp64x64::core::lt(lhs, rhs);
+ return fp64x64::ops::lt(lhs, rhs);
}
}
impl FP64x64Neg of Neg {
#[inline(always)]
fn neg(a: FP64x64) -> FP64x64 {
- return fp64x64::core::neg(a);
+ return fp64x64::ops::neg(a);
}
}
impl FP64x64Rem of Rem {
#[inline(always)]
fn rem(lhs: FP64x64, rhs: FP64x64) -> FP64x64 {
- return fp64x64::core::rem(lhs, rhs);
+ return fp64x64::ops::rem(lhs, rhs);
}
}
diff --git a/src/numbers/fixed_point/implementations/fp8x23/core.cairo b/src/numbers/fixed_point/implementations/fp8x23/core.cairo
index 9faffd18e..dcbfefa96 100644
--- a/src/numbers/fixed_point/implementations/fp8x23/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp8x23/core.cairo
@@ -1,12 +1,12 @@
-use debug::PrintTrait;
+use core::debug::PrintTrait;
-use option::OptionTrait;
-use result::{ResultTrait, ResultTraitImpl};
-use traits::{TryInto, Into};
+use core::option::OptionTrait;
+use core::result::{ResultTrait, ResultTraitImpl};
+use core::traits::{TryInto, Into};
use orion::numbers::signed_integer::{i32::i32, i8::i8};
use orion::numbers::fixed_point::core::{FixedTrait};
-use orion::numbers::fixed_point::implementations::fp8x23::math::{core, trig, hyp};
+use orion::numbers::fixed_point::implementations::fp8x23::math::{core as core_math, trig, hyp};
use orion::numbers::fixed_point::utils;
/// A struct representing a fixed point number.
@@ -50,12 +50,12 @@ impl FP8x23Impl of FixedTrait {
}
fn from_felt(val: felt252) -> FP8x23 {
- let mag = integer::u32_try_from_felt252(utils::felt_abs(val)).unwrap();
+ let mag = core::integer::u32_try_from_felt252(utils::felt_abs(val)).unwrap();
return FixedTrait::new(mag, utils::felt_sign(val));
}
fn abs(self: FP8x23) -> FP8x23 {
- return core::abs(self);
+ return core_math::abs(self);
}
fn acos(self: FP8x23) -> FP8x23 {
@@ -95,7 +95,7 @@ impl FP8x23Impl of FixedTrait {
}
fn ceil(self: FP8x23) -> FP8x23 {
- return core::ceil(self);
+ return core_math::ceil(self);
}
fn cos(self: FP8x23) -> FP8x23 {
@@ -111,46 +111,46 @@ impl FP8x23Impl of FixedTrait {
}
fn floor(self: FP8x23) -> FP8x23 {
- return core::floor(self);
+ return core_math::floor(self);
}
// Calculates the natural exponent of x: e^x
fn exp(self: FP8x23) -> FP8x23 {
- return core::exp(self);
+ return core_math::exp(self);
}
// Calculates the binary exponent of x: 2^x
fn exp2(self: FP8x23) -> FP8x23 {
- return core::exp2(self);
+ return core_math::exp2(self);
}
// Calculates the natural logarithm of x: ln(x)
// self must be greater than zero
fn ln(self: FP8x23) -> FP8x23 {
- return core::ln(self);
+ return core_math::ln(self);
}
// Calculates the binary logarithm of x: log2(x)
// self must be greather than zero
fn log2(self: FP8x23) -> FP8x23 {
- return core::log2(self);
+ return core_math::log2(self);
}
// Calculates the base 10 log of x: log10(x)
// self must be greater than zero
fn log10(self: FP8x23) -> FP8x23 {
- return core::log10(self);
+ return core_math::log10(self);
}
// Calclates the value of x^y and checks for overflow before returning
// self is a fixed point value
// b is a fixed point value
fn pow(self: FP8x23, b: FP8x23) -> FP8x23 {
- return core::pow(self, b);
+ return core_math::pow(self, b);
}
fn round(self: FP8x23) -> FP8x23 {
- return core::round(self);
+ return core_math::round(self);
}
fn sin(self: FP8x23) -> FP8x23 {
@@ -168,7 +168,7 @@ impl FP8x23Impl of FixedTrait {
// Calculates the square root of a fixed point value
// x must be positive
fn sqrt(self: FP8x23) -> FP8x23 {
- return core::sqrt(self);
+ return core_math::sqrt(self);
}
fn tan(self: FP8x23) -> FP8x23 {
@@ -184,7 +184,7 @@ impl FP8x23Impl of FixedTrait {
}
fn sign(self: FP8x23) -> FP8x23 {
- return core::sign(self);
+ return core_math::sign(self);
}
fn NaN() -> FP8x23 {
@@ -212,11 +212,11 @@ impl FP8x23Impl of FixedTrait {
}
fn is_pos_inf(self: FP8x23) -> bool {
- self.is_inf() && !self.sign
+ self.is_inf() && !self.sign
}
fn is_neg_inf(self: FP8x23) -> bool {
- self.is_inf() && self.sign
+ self.is_inf() && self.sign
}
}
@@ -311,18 +311,18 @@ impl FP8x23TryIntoI8 of TryInto {
impl FP8x23PartialEq of PartialEq {
#[inline(always)]
fn eq(lhs: @FP8x23, rhs: @FP8x23) -> bool {
- return core::eq(lhs, rhs);
+ return core_math::eq(lhs, rhs);
}
#[inline(always)]
fn ne(lhs: @FP8x23, rhs: @FP8x23) -> bool {
- return core::ne(lhs, rhs);
+ return core_math::ne(lhs, rhs);
}
}
impl FP8x23Add of Add {
fn add(lhs: FP8x23, rhs: FP8x23) -> FP8x23 {
- return core::add(lhs, rhs);
+ return core_math::add(lhs, rhs);
}
}
@@ -335,7 +335,7 @@ impl FP8x23AddEq of AddEq {
impl FP8x23Sub of Sub {
fn sub(lhs: FP8x23, rhs: FP8x23) -> FP8x23 {
- return core::sub(lhs, rhs);
+ return core_math::sub(lhs, rhs);
}
}
@@ -348,7 +348,7 @@ impl FP8x23SubEq of SubEq {
impl FP8x23Mul of Mul {
fn mul(lhs: FP8x23, rhs: FP8x23) -> FP8x23 {
- return core::mul(lhs, rhs);
+ return core_math::mul(lhs, rhs);
}
}
@@ -361,7 +361,7 @@ impl FP8x23MulEq of MulEq {
impl FP8x23Div of Div {
fn div(lhs: FP8x23, rhs: FP8x23) -> FP8x23 {
- return core::div(lhs, rhs);
+ return core_math::div(lhs, rhs);
}
}
@@ -375,36 +375,36 @@ impl FP8x23DivEq of DivEq {
impl FP8x23PartialOrd of PartialOrd {
#[inline(always)]
fn ge(lhs: FP8x23, rhs: FP8x23) -> bool {
- return core::ge(lhs, rhs);
+ return core_math::ge(lhs, rhs);
}
#[inline(always)]
fn gt(lhs: FP8x23, rhs: FP8x23) -> bool {
- return core::gt(lhs, rhs);
+ return core_math::gt(lhs, rhs);
}
#[inline(always)]
fn le(lhs: FP8x23, rhs: FP8x23) -> bool {
- return core::le(lhs, rhs);
+ return core_math::le(lhs, rhs);
}
#[inline(always)]
fn lt(lhs: FP8x23, rhs: FP8x23) -> bool {
- return core::lt(lhs, rhs);
+ return core_math::lt(lhs, rhs);
}
}
impl FP8x23Neg of Neg {
#[inline(always)]
fn neg(a: FP8x23) -> FP8x23 {
- return core::neg(a);
+ return core_math::neg(a);
}
}
impl FP8x23Rem of Rem {
#[inline(always)]
fn rem(lhs: FP8x23, rhs: FP8x23) -> FP8x23 {
- return core::rem(lhs, rhs);
+ return core_math::rem(lhs, rhs);
}
}
diff --git a/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo b/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo
index e63b1fe6a..f019ef08a 100644
--- a/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo
+++ b/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo
@@ -1,5 +1,5 @@
-use debug::PrintTrait;
-use traits::Into;
+use core::debug::PrintTrait;
+use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp8x23::core::{
HALF, ONE, TWO, FP8x23, FP8x23Sub, FP8x23Div, FixedTrait, FP8x23Print
diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo
index b39868957..366f385e8 100644
--- a/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo
+++ b/src/numbers/fixed_point/implementations/fp8x23/math/comp.cairo
@@ -123,7 +123,7 @@ mod tests {
assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)')
}
-
+
fn test_bitwise_or() {
let a = FixedTrait::new(28835840, false); // 3.4375
let b = FixedTrait::new(1639448576, true); // -60.5625
diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo
index 4ac727357..9e3d2cdce 100644
--- a/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo
@@ -1,8 +1,9 @@
use core::debug::PrintTrait;
-use option::OptionTrait;
-use result::{ResultTrait, ResultTraitImpl};
-use traits::{Into, TryInto};
-use integer::{u32_safe_divmod, u32_as_non_zero, u32_wide_mul};
+use core::option::OptionTrait;
+use core::result::{ResultTrait, ResultTraitImpl};
+use core::traits::{Into, TryInto};
+use core::integer;
+use core::integer::{u32_safe_divmod, u32_as_non_zero, u32_wide_mul};
use orion::numbers::fixed_point::implementations::fp8x23::core::{
HALF, ONE, MAX, FP8x23, FP8x23Add, FP8x23Impl, FP8x23AddEq, FP8x23Sub, FP8x23Mul, FP8x23MulEq,
diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/hyp.cairo
index aa07ded7d..e2059d848 100644
--- a/src/numbers/fixed_point/implementations/fp8x23/math/hyp.cairo
+++ b/src/numbers/fixed_point/implementations/fp8x23/math/hyp.cairo
@@ -47,8 +47,8 @@ fn atanh(a: FP8x23) -> FP8x23 {
#[cfg(test)]
mod tests {
- use option::OptionTrait;
- use traits::Into;
+ use core::option::OptionTrait;
+ use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp8x23::helpers::assert_precise;
diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo
index 16cb40189..11aec54ad 100644
--- a/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo
+++ b/src/numbers/fixed_point/implementations/fp8x23/math/trig.cairo
@@ -1,6 +1,6 @@
-use debug::PrintTrait;
-use integer::{u32_safe_divmod, u32_as_non_zero};
-use option::OptionTrait;
+use core::debug::PrintTrait;
+use core::integer::{u32_safe_divmod, u32_as_non_zero};
+use core::option::OptionTrait;
use orion::numbers::fixed_point::implementations::fp8x23::math::lut;
use orion::numbers::fixed_point::implementations::fp8x23::core::{
@@ -205,7 +205,7 @@ fn _sin_loop(a: FP8x23, i: u32, acc: FP8x23) -> FP8x23 {
#[cfg(test)]
mod tests {
- use traits::Into;
+ use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp8x23::helpers::{
assert_precise, assert_relative
diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo
index 428570029..d33ea4524 100644
--- a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo
@@ -1,12 +1,12 @@
-use debug::PrintTrait;
+use core::debug::PrintTrait;
-use option::OptionTrait;
-use result::{ResultTrait, ResultTraitImpl};
-use traits::{TryInto, Into};
+use core::option::OptionTrait;
+use core::result::{ResultTrait, ResultTraitImpl};
+use core::traits::{TryInto, Into};
use orion::numbers::signed_integer::{i32::i32, i8::i8};
use orion::numbers::{fixed_point::core::{FixedTrait}, FP8x23};
-use orion::numbers::fixed_point::implementations::fp8x23wide::math::{core, trig, hyp};
+use orion::numbers::fixed_point::implementations::fp8x23wide::math::{core as core_math, trig, hyp};
use orion::numbers::fixed_point::utils;
/// A struct representing a fixed point number.
@@ -50,12 +50,12 @@ impl FP8x23WImpl of FixedTrait {
}
fn from_felt(val: felt252) -> FP8x23W {
- let mag = integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap();
+ let mag = core::integer::u64_try_from_felt252(utils::felt_abs(val)).unwrap();
return FixedTrait::new(mag, utils::felt_sign(val));
}
fn abs(self: FP8x23W) -> FP8x23W {
- return core::abs(self);
+ return core_math::abs(self);
}
fn acos(self: FP8x23W) -> FP8x23W {
@@ -95,7 +95,7 @@ impl FP8x23WImpl of FixedTrait {
}
fn ceil(self: FP8x23W) -> FP8x23W {
- return core::ceil(self);
+ return core_math::ceil(self);
}
fn cos(self: FP8x23W) -> FP8x23W {
@@ -111,46 +111,46 @@ impl FP8x23WImpl of FixedTrait {
}
fn floor(self: FP8x23W) -> FP8x23W {
- return core::floor(self);
+ return core_math::floor(self);
}
// Calculates the natural exponent of x: e^x
fn exp(self: FP8x23W) -> FP8x23W {
- return core::exp(self);
+ return core_math::exp(self);
}
// Calculates the binary exponent of x: 2^x
fn exp2(self: FP8x23W) -> FP8x23W {
- return core::exp2(self);
+ return core_math::exp2(self);
}
// Calculates the natural logarithm of x: ln(x)
// self must be greater than zero
fn ln(self: FP8x23W) -> FP8x23W {
- return core::ln(self);
+ return core_math::ln(self);
}
// Calculates the binary logarithm of x: log2(x)
// self must be greather than zero
fn log2(self: FP8x23W) -> FP8x23W {
- return core::log2(self);
+ return core_math::log2(self);
}
// Calculates the base 10 log of x: log10(x)
// self must be greater than zero
fn log10(self: FP8x23W) -> FP8x23W {
- return core::log10(self);
+ return core_math::log10(self);
}
// Calclates the value of x^y and checks for overflow before returning
// self is a fixed point value
// b is a fixed point value
fn pow(self: FP8x23W, b: FP8x23W) -> FP8x23W {
- return core::pow(self, b);
+ return core_math::pow(self, b);
}
fn round(self: FP8x23W) -> FP8x23W {
- return core::round(self);
+ return core_math::round(self);
}
fn sin(self: FP8x23W) -> FP8x23W {
@@ -168,7 +168,7 @@ impl FP8x23WImpl of FixedTrait {
// Calculates the square root of a fixed point value
// x must be positive
fn sqrt(self: FP8x23W) -> FP8x23W {
- return core::sqrt(self);
+ return core_math::sqrt(self);
}
fn tan(self: FP8x23W) -> FP8x23W {
@@ -184,7 +184,7 @@ impl FP8x23WImpl of FixedTrait {
}
fn sign(self: FP8x23W) -> FP8x23W {
- return core::sign(self);
+ return core_math::sign(self);
}
fn NaN() -> FP8x23W {
@@ -212,11 +212,11 @@ impl FP8x23WImpl of FixedTrait {
}
fn is_pos_inf(self: FP8x23W) -> bool {
- self.is_inf() && !self.sign
+ self.is_inf() && !self.sign
}
fn is_neg_inf(self: FP8x23W) -> bool {
- self.is_inf() && self.sign
+ self.is_inf() && self.sign
}
}
@@ -316,18 +316,18 @@ impl FP8x23WTryIntoI8 of TryInto {
impl FP8x23WPartialEq of PartialEq {
#[inline(always)]
fn eq(lhs: @FP8x23W, rhs: @FP8x23W) -> bool {
- return core::eq(lhs, rhs);
+ return core_math::eq(lhs, rhs);
}
#[inline(always)]
fn ne(lhs: @FP8x23W, rhs: @FP8x23W) -> bool {
- return core::ne(lhs, rhs);
+ return core_math::ne(lhs, rhs);
}
}
impl FP8x23WAdd of Add {
fn add(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W {
- return core::add(lhs, rhs);
+ return core_math::add(lhs, rhs);
}
}
@@ -340,7 +340,7 @@ impl FP8x23WAddEq of AddEq {
impl FP8x23WSub of Sub {
fn sub(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W {
- return core::sub(lhs, rhs);
+ return core_math::sub(lhs, rhs);
}
}
@@ -353,7 +353,7 @@ impl FP8x23WSubEq of SubEq {
impl FP8x23WMul of Mul {
fn mul(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W {
- return core::mul(lhs, rhs);
+ return core_math::mul(lhs, rhs);
}
}
@@ -366,7 +366,7 @@ impl FP8x23WMulEq of MulEq {
impl FP8x23WDiv of Div {
fn div(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W {
- return core::div(lhs, rhs);
+ return core_math::div(lhs, rhs);
}
}
@@ -380,36 +380,36 @@ impl FP8x23WDivEq of DivEq {
impl FP8x23WPartialOrd of PartialOrd {
#[inline(always)]
fn ge(lhs: FP8x23W, rhs: FP8x23W) -> bool {
- return core::ge(lhs, rhs);
+ return core_math::ge(lhs, rhs);
}
#[inline(always)]
fn gt(lhs: FP8x23W, rhs: FP8x23W) -> bool {
- return core::gt(lhs, rhs);
+ return core_math::gt(lhs, rhs);
}
#[inline(always)]
fn le(lhs: FP8x23W, rhs: FP8x23W) -> bool {
- return core::le(lhs, rhs);
+ return core_math::le(lhs, rhs);
}
#[inline(always)]
fn lt(lhs: FP8x23W, rhs: FP8x23W) -> bool {
- return core::lt(lhs, rhs);
+ return core_math::lt(lhs, rhs);
}
}
impl FP8x23WNeg of Neg {
#[inline(always)]
fn neg(a: FP8x23W) -> FP8x23W {
- return core::neg(a);
+ return core_math::neg(a);
}
}
impl FP8x23WRem of Rem {
#[inline(always)]
fn rem(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W {
- return core::rem(lhs, rhs);
+ return core_math::rem(lhs, rhs);
}
}
diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo
index 2a08af58a..292dc44d5 100644
--- a/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo
+++ b/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo
@@ -1,5 +1,5 @@
-use debug::PrintTrait;
-use traits::Into;
+use core::debug::PrintTrait;
+use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp8x23wide::core::{
HALF, ONE, TWO, FP8x23W, FP8x23WSub, FP8x23WDiv, FixedTrait, FP8x23WPrint
diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo
index 8bbb7436c..b2dad2e6d 100644
--- a/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo
+++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/comp.cairo
@@ -125,7 +125,7 @@ mod tests {
assert(bitwise_xor(a, b) == c, 'bitwise_xor(a,b)')
}
-
+
fn test_bitwise_or() {
let a = FixedTrait::new(28835840, false); // 3.4375
let b = FixedTrait::new(1639448576, true); // -60.5625
diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo
index bb3b318ee..9bf6a3db1 100644
--- a/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo
+++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo
@@ -1,8 +1,9 @@
use core::debug::PrintTrait;
-use option::OptionTrait;
-use result::{ResultTrait, ResultTraitImpl};
-use traits::{Into, TryInto};
-use integer::{u64_safe_divmod, u64_as_non_zero, u64_wide_mul};
+use core::option::OptionTrait;
+use core::result::{ResultTrait, ResultTraitImpl};
+use core::traits::{Into, TryInto};
+use core::integer;
+use core::integer::{u64_safe_divmod, u64_as_non_zero, u64_wide_mul};
use orion::numbers::fixed_point::implementations::fp8x23wide::core::{
HALF, ONE, MAX, FP8x23W, FP8x23WAdd, FP8x23WImpl, FP8x23WAddEq, FP8x23WSub, FP8x23WMul,
diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/hyp.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/hyp.cairo
index be95eedba..848f711a2 100644
--- a/src/numbers/fixed_point/implementations/fp8x23wide/math/hyp.cairo
+++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/hyp.cairo
@@ -47,8 +47,8 @@ fn atanh(a: FP8x23W) -> FP8x23W {
#[cfg(test)]
mod tests {
- use option::OptionTrait;
- use traits::Into;
+ use core::option::OptionTrait;
+ use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp8x23wide::helpers::assert_precise;
diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/trig.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/trig.cairo
index 5a16d18fa..f2074215c 100644
--- a/src/numbers/fixed_point/implementations/fp8x23wide/math/trig.cairo
+++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/trig.cairo
@@ -1,6 +1,6 @@
-use debug::PrintTrait;
-use integer::{u64_safe_divmod, u64_as_non_zero};
-use option::OptionTrait;
+use core::debug::PrintTrait;
+use core::integer::{u64_safe_divmod, u64_as_non_zero};
+use core::option::OptionTrait;
use orion::numbers::fixed_point::implementations::fp8x23wide::math::lut;
use orion::numbers::fixed_point::implementations::fp8x23wide::core::{
@@ -205,7 +205,7 @@ fn _sin_loop(a: FP8x23W, i: u64, acc: FP8x23W) -> FP8x23W {
#[cfg(test)]
mod tests {
- use traits::Into;
+ use core::traits::Into;
use orion::numbers::fixed_point::implementations::fp8x23wide::helpers::{
assert_precise, assert_relative
diff --git a/src/numbers/fixed_point/utils.cairo b/src/numbers/fixed_point/utils.cairo
index 7e57cc68d..c15b28690 100644
--- a/src/numbers/fixed_point/utils.cairo
+++ b/src/numbers/fixed_point/utils.cairo
@@ -1,3 +1,5 @@
+use core::integer;
+
const HALF_PRIME: felt252 =
1809251394333065606848661391547535052811553607665798349986546028067936010240;
diff --git a/src/numbers/signed_integer/i128.cairo b/src/numbers/signed_integer/i128.cairo
index f75748b77..7d4355367 100644
--- a/src/numbers/signed_integer/i128.cairo
+++ b/src/numbers/signed_integer/i128.cairo
@@ -1,4 +1,4 @@
-use traits::Into;
+use core::traits::Into;
use orion::numbers::signed_integer::integer_trait::IntegerTrait;
@@ -63,11 +63,11 @@ impl i128Impl of IntegerTrait {
}
fn is_pos_inf(self: i128) -> bool {
- self.is_inf() && !self.sign
+ self.is_inf() && !self.sign
}
fn is_neg_inf(self: i128) -> bool {
- self.is_inf() && self.sign
+ self.is_inf() && self.sign
}
}
diff --git a/src/numbers/signed_integer/i16.cairo b/src/numbers/signed_integer/i16.cairo
index f668d7b2a..50c68529e 100644
--- a/src/numbers/signed_integer/i16.cairo
+++ b/src/numbers/signed_integer/i16.cairo
@@ -1,4 +1,4 @@
-use traits::Into;
+use core::traits::Into;
use orion::numbers::signed_integer::integer_trait::IntegerTrait;
@@ -63,11 +63,11 @@ impl i16Impl of IntegerTrait {
}
fn is_pos_inf(self: i16) -> bool {
- self.is_inf() && !self.sign
+ self.is_inf() && !self.sign
}
fn is_neg_inf(self: i16) -> bool {
- self.is_inf() && self.sign
+ self.is_inf() && self.sign
}
}
diff --git a/src/numbers/signed_integer/i32.cairo b/src/numbers/signed_integer/i32.cairo
index 1c1fbc121..e5821967f 100644
--- a/src/numbers/signed_integer/i32.cairo
+++ b/src/numbers/signed_integer/i32.cairo
@@ -1,6 +1,6 @@
use core::option::OptionTrait;
use core::traits::TryInto;
-use traits::Into;
+use core::traits::Into;
use orion::numbers::signed_integer::integer_trait::IntegerTrait;
use orion::numbers::signed_integer::i8::i8;
@@ -66,11 +66,11 @@ impl i32Impl of IntegerTrait {
}
fn is_pos_inf(self: i32) -> bool {
- self.is_inf() && !self.sign
+ self.is_inf() && !self.sign
}
fn is_neg_inf(self: i32) -> bool {
- self.is_inf() && self.sign
+ self.is_inf() && self.sign
}
}
diff --git a/src/numbers/signed_integer/i64.cairo b/src/numbers/signed_integer/i64.cairo
index e7d6bda94..d0ce98ea1 100644
--- a/src/numbers/signed_integer/i64.cairo
+++ b/src/numbers/signed_integer/i64.cairo
@@ -1,4 +1,4 @@
-use traits::Into;
+use core::traits::Into;
use orion::numbers::signed_integer::integer_trait::IntegerTrait;
@@ -63,11 +63,11 @@ impl i64Impl of IntegerTrait {
}
fn is_pos_inf(self: i64) -> bool {
- self.is_inf() && !self.sign
+ self.is_inf() && !self.sign
}
fn is_neg_inf(self: i64) -> bool {
- self.is_inf() && self.sign
+ self.is_inf() && self.sign
}
}
diff --git a/src/numbers/signed_integer/i8.cairo b/src/numbers/signed_integer/i8.cairo
index babc90cea..016469b05 100644
--- a/src/numbers/signed_integer/i8.cairo
+++ b/src/numbers/signed_integer/i8.cairo
@@ -1,4 +1,4 @@
-use traits::Into;
+use core::traits::Into;
use orion::numbers::signed_integer::integer_trait::IntegerTrait;
use orion::numbers::signed_integer::i32::i32;
@@ -69,11 +69,11 @@ impl i8Impl of IntegerTrait {
}
fn is_pos_inf(self: i8) -> bool {
- self.is_inf() && !self.sign
+ self.is_inf() && !self.sign
}
fn is_neg_inf(self: i8) -> bool {
- self.is_inf() && self.sign
+ self.is_inf() && self.sign
}
}
diff --git a/src/operators/ml.cairo b/src/operators/ml.cairo
index 826f51f51..4b560e336 100644
--- a/src/operators/ml.cairo
+++ b/src/operators/ml.cairo
@@ -1,26 +1,5 @@
-mod tree_regressor;
-mod tree_classifier;
-mod xgboost_regressor;
mod tree_ensemble;
-use orion::operators::ml::tree_regressor::core::{TreeRegressorTrait, TreeRegressor};
-use orion::operators::ml::tree_regressor::implementations::tree_regressor_fp16x16::FP16x16TreeRegressor;
-use orion::operators::ml::tree_regressor::implementations::tree_regressor_fp8x23::FP8x23TreeRegressor;
-use orion::operators::ml::tree_regressor::implementations::tree_regressor_fp32x32::FP32x32TreeRegressor;
-use orion::operators::ml::tree_regressor::implementations::tree_regressor_fp64x64::FP64x64TreeRegressor;
-
-use orion::operators::ml::tree_classifier::core::{TreeClassifierTrait, TreeClassifier};
-use orion::operators::ml::tree_classifier::implementations::tree_classifier_fp16x16::FP16x16TreeClassifier;
-use orion::operators::ml::tree_classifier::implementations::tree_classifier_fp8x23::FP8x23TreeClassifier;
-use orion::operators::ml::tree_classifier::implementations::tree_classifier_fp32x32::FP32x32TreeClassifier;
-use orion::operators::ml::tree_classifier::implementations::tree_classifier_fp64x64::FP64x64TreeClassifier;
-
-use orion::operators::ml::xgboost_regressor::core::{XGBoostRegressorTrait};
-use orion::operators::ml::xgboost_regressor::implementations::xgboost_regressor_fp16x16::FP16x16XGBoostRegressor;
-use orion::operators::ml::xgboost_regressor::implementations::xgboost_regressor_fp8x23::FP8x23XGBoostRegressor;
-use orion::operators::ml::xgboost_regressor::implementations::xgboost_regressor_fp32x32::FP32x32XGBoostRegressor;
-use orion::operators::ml::xgboost_regressor::implementations::xgboost_regressor_fp64x64::FP64x64XGBoostRegressor;
-
use orion::operators::ml::tree_ensemble::core::{
TreeEnsemble, TreeEnsembleAttributes, TreeEnsembleImpl, NODE_MODES
};
diff --git a/src/operators/ml/tree_classifier.cairo b/src/operators/ml/tree_classifier.cairo
deleted file mode 100644
index 469d634e4..000000000
--- a/src/operators/ml/tree_classifier.cairo
+++ /dev/null
@@ -1,2 +0,0 @@
-mod core;
-mod implementations;
diff --git a/src/operators/ml/tree_classifier/core.cairo b/src/operators/ml/tree_classifier/core.cairo
deleted file mode 100644
index a42e6dd94..000000000
--- a/src/operators/ml/tree_classifier/core.cairo
+++ /dev/null
@@ -1,163 +0,0 @@
-use orion::numbers::{FixedTrait};
-
-#[derive(Copy, Drop)]
-struct TreeClassifier {
- left: Option>>,
- right: Option>>,
- split_feature: usize,
- split_value: T,
- prediction: T,
- class_distribution: Span<
- T
- >, // assuming class labels of type usize (span index), and probability as T.
-}
-
-/// Trait
-///
-/// predict - Given a set of features, predicts the target value using the constructed decision tree.
-/// predict_proba - Predicts class probabilities based on feature data.
-trait TreeClassifierTrait {
- /// # TreeClassifierTrait::predict
- ///
- /// ```rust
- /// fn predict(ref self: TreeClassifier, features: Span) -> T;
- /// ```
- ///
- /// Predicts the target value for a set of features using the provided decision tree.
- ///
- /// ## Args
- ///
- /// * `self`: A reference to the decision tree used for making the prediction.
- /// * `features`: A span representing the features for which the prediction is to be made.
- ///
- /// ## Returns
- ///
- /// The predicted target value.
- ///
- /// ## Type Constraints
- ///
- /// Constrain input and output types to fixed point.
- ///
- /// ## Examples
- ///
- /// ```rust
- /// use orion::operators::ml::{FP16x16TreeClassifier, TreeClassifierTrait, TreeClassifier};
- /// use orion::numbers::{FP16x16, FixedTrait};
- ///
- /// fn tree_classifier_example(tree: TreeClassifier) {
- ///
- /// tree.predict(
- /// array![FixedTrait::new_unscaled(1, false), FixedTrait::new_unscaled(2, false),].span()
- /// );
- ///
- /// }
- /// ```
- ///
- fn predict(ref self: TreeClassifier, features: Span) -> T;
- /// # TreeClassifierTrait::predict_proba
- ///
- /// ```rust
- /// fn predict_proba(ref self: TreeClassifier, features: Span) -> Span;
- /// ```
- ///
- /// Given a set of features, this method traverses the decision tree
- /// represented by `self` and returns the class distribution (probabilities)
- /// found in the leaf node that matches the provided features. The traversal
- /// stops once a leaf node is reached in the decision tree.
- ///
- /// ## Args
- ///
- /// * `self`: A reference to the decision tree used for making the prediction.
- /// * `features`: A span representing the features for which the prediction is to be made.
- ///
- /// ## Returns
- ///
- /// Returns a `Span` representing the class distribution at the leaf node.
- ///
- /// ## Type Constraints
- ///
- /// Constrain input and output types to fixed points.
- ///
- /// ## Examples
- ///
- /// ```rust
- /// use orion::operators::ml::{FP16x16TreeClassifier, TreeClassifierTrait, TreeClassifier};
- /// use orion::numbers::{FP16x16, FixedTrait};
- ///
- /// fn tree_classifier_example(tree: TreeClassifier) {
- ///
- /// tree.predict_proba(
- /// array![FixedTrait::new_unscaled(1, false), FixedTrait::new_unscaled(2, false),].span()
- /// );
- ///
- /// }
- /// ```
- ///
- fn predict_proba(ref self: TreeClassifier, features: Span) -> Span;
-}
-
-fn predict<
- T,
- MAG,
- impl FFixedTrait: FixedTrait,
- impl TPartialOrd: PartialOrd,
- impl FCopy: Copy,
- impl FDrop: Drop,
->(
- ref self: TreeClassifier, features: Span
-) -> T {
- let mut current_node: TreeClassifier = self;
-
- loop {
- match current_node.left {
- Option::Some(left) => {
- match current_node.right {
- Option::Some(right) => {
- if *features.at(current_node.split_feature) < current_node.split_value {
- current_node = left.unbox();
- } else {
- current_node = right.unbox();
- }
- },
- Option::None(_) => { break; }
- }
- },
- Option::None(_) => { break; }
- };
- };
-
- current_node.prediction
-}
-
-fn predict_proba<
- T,
- MAG,
- impl FFixedTrait: FixedTrait,
- impl TPartialOrd: PartialOrd,
- impl FCopy: Copy,
- impl FDrop: Drop,
->(
- ref self: TreeClassifier, features: Span
-) -> Span {
- let mut current_node: TreeClassifier = self;
-
- loop {
- match current_node.left {
- Option::Some(left) => {
- match current_node.right {
- Option::Some(right) => {
- if *features.at(current_node.split_feature) < current_node.split_value {
- current_node = left.unbox();
- } else {
- current_node = right.unbox();
- }
- },
- Option::None(_) => { break; }
- }
- },
- Option::None(_) => { break; }
- };
- };
-
- current_node.class_distribution
-}
diff --git a/src/operators/ml/tree_classifier/implementations.cairo b/src/operators/ml/tree_classifier/implementations.cairo
deleted file mode 100644
index 2421c7809..000000000
--- a/src/operators/ml/tree_classifier/implementations.cairo
+++ /dev/null
@@ -1,4 +0,0 @@
-mod tree_classifier_fp8x23;
-mod tree_classifier_fp16x16;
-mod tree_classifier_fp32x32;
-mod tree_classifier_fp64x64;
diff --git a/src/operators/ml/tree_classifier/implementations/tree_classifier_fp16x16.cairo b/src/operators/ml/tree_classifier/implementations/tree_classifier_fp16x16.cairo
deleted file mode 100644
index 1789c8a64..000000000
--- a/src/operators/ml/tree_classifier/implementations/tree_classifier_fp16x16.cairo
+++ /dev/null
@@ -1,13 +0,0 @@
-use orion::operators::ml::tree_classifier::core::{TreeClassifier, TreeClassifierTrait};
-use orion::operators::ml::tree_classifier::core;
-use orion::numbers::FP16x16;
-
-impl FP16x16TreeClassifier of TreeClassifierTrait {
- fn predict(ref self: TreeClassifier, features: Span) -> FP16x16 {
- core::predict(ref self, features)
- }
-
- fn predict_proba(ref self: TreeClassifier, features: Span) -> Span {
- core::predict_proba(ref self, features)
- }
-}
diff --git a/src/operators/ml/tree_classifier/implementations/tree_classifier_fp32x32.cairo b/src/operators/ml/tree_classifier/implementations/tree_classifier_fp32x32.cairo
deleted file mode 100644
index 442fb100a..000000000
--- a/src/operators/ml/tree_classifier/implementations/tree_classifier_fp32x32.cairo
+++ /dev/null
@@ -1,13 +0,0 @@
-use orion::operators::ml::tree_classifier::core::{TreeClassifier, TreeClassifierTrait};
-use orion::operators::ml::tree_classifier::core;
-use orion::numbers::{FP32x32, FP32x32Impl};
-
-impl FP32x32TreeClassifier of TreeClassifierTrait