diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml index f0d96e5e..31e2a89b 100644 --- a/.JuliaFormatter.toml +++ b/.JuliaFormatter.toml @@ -1,5 +1,5 @@ style = "sciml" -format_markdown = true +format_markdown = false whitespace_in_kwargs = false margin = 92 indent = 4 diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml index 0b0fbfc2..bbcfc4ba 100644 --- a/.github/workflows/Downgrade.yml +++ b/.github/workflows/Downgrade.yml @@ -33,8 +33,8 @@ jobs: version: ${{ matrix.version }} - uses: julia-actions/julia-downgrade-compat@v1 with: - skip: Pkg,TOML - - uses: julia-actions/cache@v1 + skip: Pkg, TOML, Test, Random, LinearAlgebra, Statistics + - uses: julia-actions/cache@v2 with: token: ${{ secrets.GITHUB_TOKEN }} - uses: julia-actions/julia-buildpkg@v1 diff --git a/.github/workflows/TagBot.yml b/.github/workflows/TagBot.yml deleted file mode 100644 index f49313b6..00000000 --- a/.github/workflows/TagBot.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: TagBot -on: - issue_comment: - types: - - created - workflow_dispatch: -jobs: - TagBot: - if: github.event_name == 'workflow_dispatch' || github.actor == 'JuliaTagBot' - runs-on: ubuntu-latest - steps: - - uses: JuliaRegistries/TagBot@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - ssh: ${{ secrets.DOCUMENTER_KEY }} diff --git a/Project.toml b/Project.toml index 86eb9627..f939bba1 100644 --- a/Project.toml +++ b/Project.toml @@ -25,15 +25,15 @@ RCLIBSVMExt = "LIBSVM" RCMLJLinearModelsExt = "MLJLinearModels" [compat] -Adapt = "3.3.3, 4" +Adapt = "4.1.1" Aqua = "0.8" CellularAutomata = "0.0.2" -DifferentialEquations = "7" +DifferentialEquations = "7.15.0" Distances = "0.10" LIBSVM = "0.8" LinearAlgebra = "1.10" MLJLinearModels = "0.9.2, 0.10" -NNlib = "0.8.4, 0.9" +NNlib = "0.9.26" PartialFunctions = "1.2" Random = "1.10" Reexport = "1.2.2" diff --git a/README.md b/README.md index 20f7cbf2..94be56bf 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@

+
[![Join the chat at https://julialang.zulipchat.com #sciml-bridged](https://img.shields.io/static/v1?label=Zulip&message=chat&color=9558b2&labelColor=389826)](https://julialang.zulipchat.com/#narrow/stream/279055-sciml-bridged) @@ -11,9 +12,11 @@ [![Build status](https://badge.buildkite.com/db8f91b89a10ad79bbd1d9fdb1340e6f6602a1c0ed9496d4d0.svg)](https://buildkite.com/julialang/reservoircomputing-dot-jl) [![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor%27s%20Guide-blueviolet)](https://github.com/SciML/ColPrac) [![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle) +
# ReservoirComputing.jl + ReservoirComputing.jl provides an efficient, modular and easy to use implementation of Reservoir Computing models such as Echo State Networks (ESNs). For information on using this package please refer to the [stable documentation](https://docs.sciml.ai/ReservoirComputing/stable/). Use the [in-development documentation](https://docs.sciml.ai/ReservoirComputing/dev/) to take a look at at not yet released features. ## Quick Example diff --git a/docs/Project.toml b/docs/Project.toml index dc7bca9e..d372c202 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,5 +1,4 @@ [deps] -CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" CellularAutomata = "878138dc-5b27-11ea-1a71-cb95d38d6b29" DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" @@ -12,12 +11,11 @@ ReservoirComputing = "7c2d2b1e-3dd4-11ea-355a-8f6a8116e294" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" [compat] -CUDA = "3, 4, 5" CellularAutomata = "0.0.2" -DifferentialEquations = "7" +DifferentialEquations = "7.15.0" Documenter = "1" OrdinaryDiffEq = "6" Plots = "1" PredefinedDynamicalSystems = "1" -ReservoirComputing = "0.10" -StatsBase = "0.33, 0.34" +ReservoirComputing = "0.10.5" +StatsBase = "0.34.4" diff --git a/docs/make.jl b/docs/make.jl index 07314e42..99e06512 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,7 +1,7 @@ using Documenter, ReservoirComputing -cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml", force = true) -cp("./docs/Project.toml", "./docs/src/assets/Project.toml", force = true) +cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml"; force=true) +cp("./docs/Project.toml", "./docs/src/assets/Project.toml"; force=true) ENV["PLOTS_TEST"] = "true" ENV["GKSwstype"] = "100" diff --git a/docs/src/api/esn_variations.md b/docs/src/api/esn_variations.md index c8df19e7..c319f8af 100644 --- a/docs/src/api/esn_variations.md +++ b/docs/src/api/esn_variations.md @@ -11,4 +11,4 @@ ```@docs HybridESN KnowledgeModel -``` \ No newline at end of file +``` diff --git a/docs/src/api/inits.md b/docs/src/api/inits.md index 83942c01..dd423518 100644 --- a/docs/src/api/inits.md +++ b/docs/src/api/inits.md @@ -1,4 +1,5 @@ # Echo State Networks Initializers + ## Input layers ```@docs @@ -17,4 +18,4 @@ cycle_jumps simple_cycle pseudo_svd -``` \ No newline at end of file +``` diff --git a/docs/src/api/states.md b/docs/src/api/states.md index 3a29d591..49b31bfd 100644 --- a/docs/src/api/states.md +++ b/docs/src/api/states.md @@ -22,4 +22,4 @@ ```@docs ReservoirComputing.create_states -``` \ No newline at end of file +``` diff --git a/docs/src/esn_tutorials/deep_esn.md b/docs/src/esn_tutorials/deep_esn.md index 412edadf..26ab0f00 100644 --- a/docs/src/esn_tutorials/deep_esn.md +++ b/docs/src/esn_tutorials/deep_esn.md @@ -2,7 +2,7 @@ Deep Echo State Network architectures started to gain some traction recently. In this guide, we illustrate how it is possible to use ReservoirComputing.jl to build a deep ESN. -The network implemented in this library is taken from [^1]. It works by stacking reservoirs on top of each other, feeding the output from one into the next. The states are obtained by merging all the inner states of the stacked reservoirs. For a more in-depth explanation, refer to the paper linked above. +The network implemented in this library is taken from [^1]. It works by stacking reservoirs on top of each other, feeding the output from one into the next. The states are obtained by merging all the inner states of the stacked reservoirs. For a more in-depth explanation, refer to the paper linked above. ## Lorenz Example diff --git a/docs/src/general/states_variation.md b/docs/src/general/states_variation.md index a1b0dab5..e5767a24 100644 --- a/docs/src/general/states_variation.md +++ b/docs/src/general/states_variation.md @@ -6,11 +6,11 @@ In ReservoirComputing models, it's possible to perform alterations on the reserv ### Extending States -Extending the states involves appending the corresponding input values to the reservoir states. If \(\textbf{x}(t)\) represents the reservoir state at time \(t\) corresponding to the input \(\textbf{u}(t)\), the extended state is represented as \([\textbf{x}(t); \textbf{u}(t)]\), where \([;]\) denotes vertical concatenation. This procedure is commonly used in Echo State Networks and is described in [Jaeger's Scholarpedia](http://www.scholarpedia.org/article/Echo_state_network). You can extend the states in every ReservoirComputing.jl model by using the `states_type` keyword argument and calling the `ExtendedStates()` method. No additional arguments are needed. +Extending the states involves appending the corresponding input values to the reservoir states. If $\textbf{x}(t)$ represents the reservoir state at time $t$ corresponding to the input $\textbf{u}(t)$, the extended state is represented as $[\textbf{x}(t); \textbf{u}(t)]$, where $[;]$ denotes vertical concatenation. This procedure is commonly used in Echo State Networks. You can extend the states in every ReservoirComputing.jl model by using the `states_type` keyword argument and calling the `ExtendedStates()` method. No additional arguments are needed. ### Padding States -Padding the states involves appending a constant value, such as 1.0, to each state. In the notation introduced earlier, padded states can be represented as \([\textbf{x}(t); 1.0]\). This approach is detailed in the [seminal guide](https://mantas.info/get-publication/?f=Practical_ESN.pdf) to Echo State Networks by Mantas Lukoševičius. To pad the states, you can use the `states_type` keyword argument and call the `PaddedStates(padding)` method, where `padding` represents the value to be concatenated to the states. By default, the padding value is set to 1.0, so most of the time, calling `PaddedStates()` will suffice. +Padding the states involves appending a constant value, such as 1.0, to each state. In the notation introduced earlier, padded states can be represented as $[\textbf{x}(t); 1.0]$. This approach is detailed in "A practical guide to applying echo state networks." by Lukoševičius, Mantas. To pad the states, you can use the `states_type` keyword argument and call the `PaddedStates(padding)` method, where `padding` represents the value to be concatenated to the states. By default, the padding value is set to 1.0, so most of the time, calling `PaddedStates()` will suffice. Additionally, you can pad the extended states by using the `PaddedExtendedStates(padding)` method, which also has a default padding value of 1.0.