From 4c3925b20cace670eb879ab2e322392f5cf10f74 Mon Sep 17 00:00:00 2001 From: MartinuzziFrancesco Date: Sun, 4 Feb 2024 18:34:43 +0100 Subject: [PATCH] small changes to inits, start of streamline --- README.md | 3 +- docs/src/esn_tutorials/hybrid.md | 3 +- docs/src/esn_tutorials/lorenz_basic.md | 4 +- src/ReservoirComputing.jl | 8 +- src/esn/deepesn.jl | 92 ++++++----- src/esn/esn.jl | 31 ++-- src/esn/esn_input_layers.jl | 203 ++++++++++--------------- src/esn/esn_predict.jl | 11 +- src/esn/esn_reservoir_drivers.jl | 24 +-- src/esn/esn_reservoirs.jl | 91 ++++++----- src/esn/hybridesn.jl | 45 +++--- test/esn/test_drivers.jl | 83 ++++------ test/esn/test_inits.jl | 90 +++++++++++ test/esn/test_input_layers.jl | 55 ------- test/esn/test_reservoirs.jl | 41 ----- test/runtests.jl | 7 +- test/test_states.jl | 76 ++++----- test/utils.jl | 5 - 18 files changed, 394 insertions(+), 478 deletions(-) create mode 100644 test/esn/test_inits.jl delete mode 100644 test/esn/test_input_layers.jl delete mode 100644 test/esn/test_reservoirs.jl delete mode 100644 test/utils.jl diff --git a/README.md b/README.md index 2b123d66..b4667be0 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ Now that we have the data we can initialize the ESN with the chosen parameters. input_size = 3 res_size = 300 esn = ESN(input_data, input_size, res_size; - reservoir = rand_sparse(;radius = 1.2, sparsity = 6 / res_size), + reservoir = rand_sparse(; radius = 1.2, sparsity = 6 / res_size), input_layer = weighted_init, nla_type = NLAT2()) ``` @@ -104,6 +104,7 @@ If you use this library in your work, please cite: url = {http://jmlr.org/papers/v23/22-0611.html} } ``` + ## Acknowledgements This project was possible thanks to initial funding through the [Google summer of code](https://summerofcode.withgoogle.com/) 2020 program. Francesco M. further acknowledges [ScaDS.AI](https://scads.ai/) and [RSC4Earth](https://rsc4earth.de/) for supporting the current progress on the library. diff --git a/docs/src/esn_tutorials/hybrid.md b/docs/src/esn_tutorials/hybrid.md index 5682e9db..25797089 100644 --- a/docs/src/esn_tutorials/hybrid.md +++ b/docs/src/esn_tutorials/hybrid.md @@ -57,8 +57,7 @@ km = KnowledgeModel(prior_model_data_generator, u0, tspan_train, train_len) in_size = 3 res_size = 300 -hesn = HybridESN( - km, +hesn = HybridESN(km, input_data, in_size, res_size; diff --git a/docs/src/esn_tutorials/lorenz_basic.md b/docs/src/esn_tutorials/lorenz_basic.md index 364c2c4b..1a66f834 100644 --- a/docs/src/esn_tutorials/lorenz_basic.md +++ b/docs/src/esn_tutorials/lorenz_basic.md @@ -53,8 +53,8 @@ input_scaling = 0.1 #build ESN struct esn = ESN(input_data, in_size, res_size; - reservoir = rand_sparse(;radius = res_radius, sparsity = res_sparsity), - input_layer = weighted_init(;scaling = input_scaling), + reservoir = rand_sparse(; radius = res_radius, sparsity = res_sparsity), + input_layer = weighted_init(; scaling = input_scaling), reservoir_driver = RNN(), nla_type = NLADefault(), states_type = StandardStates()) diff --git a/src/ReservoirComputing.jl b/src/ReservoirComputing.jl index f7a2046a..4afb7be1 100644 --- a/src/ReservoirComputing.jl +++ b/src/ReservoirComputing.jl @@ -19,8 +19,8 @@ export NLADefault, NLAT1, NLAT2, NLAT3 export StandardStates, ExtendedStates, PaddedStates, PaddedExtendedStates export StandardRidge, LinearModel export AbstractLayer, create_layer -export scaled_rand, weighted_init, sparse_layer, informed_layer, bernoulli_sample_layer, irrational_sample_layer -export rand_sparse, delay_line, delay_line_backward_reservoir, cycle_jumps_reservoir, simple_cycle_reservoir, pseudo_svd_reservoir +export scaled_rand, weighted_init, sparse_init, informed_init, minimal_init +export rand_sparse, delay_line, delay_line_backward, cycle_jumps, simple_cycle, pseudo_svd export RNN, MRNN, GRU, GRUParams, FullyGated, Minimal export ESN, train export HybridESN, KnowledgeModel @@ -75,7 +75,9 @@ function Predictive(prediction_data) end #fallbacks for initializers -for initializer in (:rand_sparse, :delay_line, :delay_line_backward_reservoir, :cycle_jumps_reservoir, :simple_cycle_reservoir, :pseudo_svd_reservoir, :scaled_rand, :weighted_init, :sparse_layer, :informed_layer, :bernoulli_sample_layer, :irrational_sample_layer) +for initializer in (:rand_sparse, :delay_line, :delay_line_backward, :cycle_jumps, + :simple_cycle, :pseudo_svd, + :scaled_rand, :weighted_init, :sparse_init, :informed_init, :minimal_init) NType = ifelse(initializer === :rand_sparse, Real, Number) @eval function ($initializer)(dims::Integer...; kwargs...) return $initializer(_default_rng(), Float32, dims...; kwargs...) diff --git a/src/esn/deepesn.jl b/src/esn/deepesn.jl index 4ab05f39..3402d2b5 100644 --- a/src/esn/deepesn.jl +++ b/src/esn/deepesn.jl @@ -1,7 +1,6 @@ -struct DeepESN{I, S, V, N, T, O, M, B, ST, W, IS} <: AbstractEchoStateNetwork +struct DeepESN{I, S, N, T, O, M, B, ST, W, IS} <: AbstractEchoStateNetwork res_size::I train_data::S - variation::V nla_type::N input_matrix::T reservoir_driver::O @@ -12,22 +11,19 @@ struct DeepESN{I, S, V, N, T, O, M, B, ST, W, IS} <: AbstractEchoStateNetwork states::IS end -function DeepESN( - train_data, - in_size::Int, - res_size::AbstractArray; - input_layer = scaled_rand, - reservoir = rand_sparse, - bias = zeros64, - reservoir_driver = RNN(), - nla_type = NLADefault(), - states_type = StandardStates(), - washout = 0, - rng = _default_rng(), - T=Float64, - matrix_type = typeof(train_data) -) - +function DeepESN(train_data, + in_size::Int, + res_size::AbstractArray; + input_layer = scaled_rand, + reservoir = rand_sparse, + bias = zeros64, + reservoir_driver = RNN(), + nla_type = NLADefault(), + states_type = StandardStates(), + washout = 0, + rng = _default_rng(), + T = Float64, + matrix_type = typeof(train_data)) if states_type isa AbstractPaddedStates in_size = size(train_data, 1) + 1 train_data = vcat(Adapt.adapt(matrix_type, ones(1, size(train_data, 2))), @@ -42,43 +38,43 @@ function DeepESN( input_matrix, bias_vector) train_data = train_data[:, (washout + 1):end] - ESN(sum(res_size), train_data, variation, nla_type, input_matrix, + DeepESN(sum(res_size), train_data, variation, nla_type, input_matrix, inner_res_driver, reservoir_matrix, bias_vector, states_type, washout, states) end function obtain_layers(in_size, - input_layer, - reservoir::Vector, - bias; - matrix_type = Matrix{Float64}) -esn_depth = length(reservoir) -input_res_sizes = [get_ressize(reservoir[i]) for i in 1:esn_depth] -in_sizes = zeros(Int, esn_depth) -in_sizes[2:end] = input_res_sizes[1:(end - 1)] -in_sizes[1] = in_size + input_layer, + reservoir::Vector, + bias; + matrix_type = Matrix{Float64}) + esn_depth = length(reservoir) + input_res_sizes = [get_ressize(reservoir[i]) for i in 1:esn_depth] + in_sizes = zeros(Int, esn_depth) + in_sizes[2:end] = input_res_sizes[1:(end - 1)] + in_sizes[1] = in_size + + if input_layer isa Array + input_matrix = [create_layer(input_layer[j], input_res_sizes[j], in_sizes[j], + matrix_type = matrix_type) for j in 1:esn_depth] + else + _input_layer = fill(input_layer, esn_depth) + input_matrix = [create_layer(_input_layer[k], input_res_sizes[k], in_sizes[k], + matrix_type = matrix_type) for k in 1:esn_depth] + end -if input_layer isa Array - input_matrix = [create_layer(input_layer[j], input_res_sizes[j], in_sizes[j], - matrix_type = matrix_type) for j in 1:esn_depth] -else - _input_layer = fill(input_layer, esn_depth) - input_matrix = [create_layer(_input_layer[k], input_res_sizes[k], in_sizes[k], + res_sizes = [get_ressize(input_matrix[j]) for j in 1:esn_depth] + reservoir_matrix = [create_reservoir(reservoir[k], res_sizes[k], matrix_type = matrix_type) for k in 1:esn_depth] -end -res_sizes = [get_ressize(input_matrix[j]) for j in 1:esn_depth] -reservoir_matrix = [create_reservoir(reservoir[k], res_sizes[k], - matrix_type = matrix_type) for k in 1:esn_depth] + if bias isa Array + bias_vector = [create_layer(bias[j], res_sizes[j], 1, matrix_type = matrix_type) + for j in 1:esn_depth] + else + _bias = fill(bias, esn_depth) + bias_vector = [create_layer(_bias[k], res_sizes[k], 1, matrix_type = matrix_type) + for k in 1:esn_depth] + end -if bias isa Array - bias_vector = [create_layer(bias[j], res_sizes[j], 1, matrix_type = matrix_type) - for j in 1:esn_depth] -else - _bias = fill(bias, esn_depth) - bias_vector = [create_layer(_bias[k], res_sizes[k], 1, matrix_type = matrix_type) - for k in 1:esn_depth] + return input_matrix, reservoir_matrix, bias_vector, res_sizes end - -return input_matrix, reservoir_matrix, bias_vector, res_sizes -end \ No newline at end of file diff --git a/src/esn/esn.jl b/src/esn/esn.jl index 2beb552f..260e7aff 100644 --- a/src/esn/esn.jl +++ b/src/esn/esn.jl @@ -41,22 +41,19 @@ train_data = rand(10, 100) # 10 features, 100 time steps esn = ESN(train_data, reservoir=RandSparseReservoir(200), washout=10) ``` """ -function ESN( - train_data, - in_size::Int, - res_size::Int; - input_layer = scaled_rand, - reservoir = rand_sparse, - bias = zeros64, - reservoir_driver = RNN(), - nla_type = NLADefault(), - states_type = StandardStates(), - washout = 0, - rng = _default_rng(), - T = Float32, - matrix_type = typeof(train_data) -) - +function ESN(train_data, + in_size::Int, + res_size::Int; + input_layer = scaled_rand, + reservoir = rand_sparse, + bias = zeros64, + reservoir_driver = RNN(), + nla_type = NLADefault(), + states_type = StandardStates(), + washout = 0, + rng = _default_rng(), + T = Float32, + matrix_type = typeof(train_data)) if states_type isa AbstractPaddedStates in_size = size(train_data, 1) + 1 train_data = vcat(Adapt.adapt(matrix_type, ones(1, size(train_data, 2))), @@ -64,7 +61,7 @@ function ESN( end reservoir_matrix = reservoir(rng, T, res_size, res_size) - input_matrix = input_layer(rng, T, in_size, res_size) + input_matrix = input_layer(rng, T, res_size, in_size) bias_vector = bias(rng, res_size) inner_res_driver = reservoir_driver_params(reservoir_driver, res_size, in_size) states = create_states(inner_res_driver, train_data, washout, reservoir_matrix, diff --git a/src/esn/esn_input_layers.jl b/src/esn/esn_input_layers.jl index 61cfc742..10ea2330 100644 --- a/src/esn/esn_input_layers.jl +++ b/src/esn/esn_input_layers.jl @@ -17,15 +17,12 @@ A matrix of type with dimensions specified by `dims`. Each element of the matrix rng = Random.default_rng() matrix = scaled_rand(rng, Float64, (100, 50); scaling=0.2) """ -function scaled_rand( - rng::AbstractRNG, - ::Type{T}, - dims::Integer...; - scaling=T(0.1) -) where {T <: Number} - +function scaled_rand(rng::AbstractRNG, + ::Type{T}, + dims::Integer...; + scaling = T(0.1)) where {T <: Number} res_size, in_size = dims - layer_matrix = rand(rng, Uniform(-scaling, scaling), res_size, in_size) + layer_matrix = T.(rand(rng, Uniform(-scaling, scaling), res_size, in_size)) return layer_matrix end @@ -53,24 +50,27 @@ input_layer = weighted_init(rng, Float64, (3, 300); scaling=0.2) "Reservoir observers: Model-free inference of unmeasured variables in chaotic systems." Chaos: An Interdisciplinary Journal of Nonlinear Science 27.4 (2017): 041102. """ -function weighted_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; scaling=T(0.1)) where {T <: Number} - - in_size, approx_res_size = dims +function weighted_init(rng::AbstractRNG, + ::Type{T}, + dims::Integer...; + scaling = T(0.1)) where {T <: Number} + approx_res_size, in_size = dims res_size = Int(floor(approx_res_size / in_size) * in_size) layer_matrix = zeros(T, res_size, in_size) q = floor(Int, res_size / in_size) for i in 1:in_size - layer_matrix[((i - 1) * q + 1):((i) * q), i] = rand(rng, Uniform(-scaling, scaling), q) + layer_matrix[((i - 1) * q + 1):((i) * q), i] = rand(rng, + Uniform(-scaling, scaling), + q) end return layer_matrix end - - +# TODO: @MartinuzziFrancesco remove when pr gets into WeightInitializers """ - sparse_layer(rng::AbstractRNG, ::Type{T}, dims::Integer...; scaling=T(0.1), sparsity=T(0.1)) where {T <: Number} + sparse_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; scaling=T(0.1), sparsity=T(0.1)) where {T <: Number} Create and return a sparse layer matrix for use in neural network models. The matrix will be of size specified by `dims`, with the specified `sparsity` and `scaling`. @@ -89,24 +89,22 @@ A sparse layer matrix. # Example ```julia rng = Random.default_rng() -input_layer = sparse_layer(rng, Float64, (3, 300); scaling=0.2, sparsity=0.1) +input_layer = sparse_init(rng, Float64, (3, 300); scaling=0.2, sparsity=0.1) ``` """ -function sparse_layer(rng::AbstractRNG,::Type{T}, dims::Integer...; - scaling=T(0.1), sparsity=T(0.1)) where {T <: Number} - - in_size, res_size = dims - layer_matrix = Matrix(sprand(rng, res_size, in_size, sparsity)) - layer_matrix = 2.0 .* (layer_matrix .- 0.5) - replace!(layer_matrix, -1.0 => 0.0) +function sparse_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; + scaling = T(0.1), sparsity = T(0.1)) where {T <: Number} + res_size, in_size = dims + layer_matrix = Matrix(sprand(rng, T, res_size, in_size, sparsity)) + layer_matrix = T.(2.0) .* (layer_matrix .- T.(0.5)) + replace!(layer_matrix, T(-1.0) => T(0.0)) layer_matrix = scaling .* layer_matrix return layer_matrix end - """ - informed_layer(rng::AbstractRNG, ::Type{T}, dims::Integer...; scaling=T(0.1), model_in_size, gamma=T(0.5)) where {T <: Number} + informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; scaling=T(0.1), model_in_size, gamma=T(0.5)) where {T <: Number} Create a layer of a neural network. @@ -126,12 +124,11 @@ Create a layer of a neural network. rng = Random.default_rng() dims = (100, 200) model_in_size = 50 -input_matrix = informed_layer(rng, Float64, dims; model_in_size=model_in_size) +input_matrix = informed_init(rng, Float64, dims; model_in_size=model_in_size) ``` """ -function informed_layer(rng::AbstractRNG, ::Type{T}, dims::Integer...; - scaling=T(0.1), model_in_size, gamma=T(0.5)) where {T <: Number} - +function informed_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; + scaling = T(0.1), model_in_size, gamma = T(0.5)) where {T <: Number} res_size, in_size = dims state_size = in_size - model_in_size @@ -148,7 +145,7 @@ function informed_layer(rng::AbstractRNG, ::Type{T}, dims::Integer...; idxs = findall(Bool[zero_connections .== input_matrix[i, :] for i in 1:size(input_matrix, 1)]) random_row_idx = idxs[rand(rng, 1:end)] - random_clm_idx = range(1, state_size, step=1)[rand(rng, 1:end)] + random_clm_idx = range(1, state_size, step = 1)[rand(rng, 1:end)] input_matrix[random_row_idx, random_clm_idx] = rand(rng, Uniform(-scaling, scaling)) end @@ -156,82 +153,90 @@ function informed_layer(rng::AbstractRNG, ::Type{T}, dims::Integer...; idxs = findall(Bool[zero_connections .== input_matrix[i, :] for i in 1:size(input_matrix, 1)]) random_row_idx = idxs[rand(rng, 1:end)] - random_clm_idx = range(state_size + 1, in_size, step=1)[rand(rng, 1:end)] + random_clm_idx = range(state_size + 1, in_size, step = 1)[rand(rng, 1:end)] input_matrix[random_row_idx, random_clm_idx] = rand(rng, Uniform(-scaling, scaling)) end return input_matrix end - - -function BernoulliSample(; p = 0.5) - return p -end - -function create_minimum_input(p = sampling, res_size, in_size, weight, rng::AbstractRNG) - - input_matrix = zeros(res_size, in_size) - for i in 1:res_size - for j in 1:in_size - rand(rng, Bernoulli(p)) ? (input_matrix[i, j] = weight) : (input_matrix[i, j] = -weight) - end - end - return input_matrix -end - - """ - bernoulli_sample_layer(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight = 0.1, - sampling = BernoulliSample(0.5) + irrational_sample_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; + weight = 0.1, + sampling = IrrationalSample(; irrational = pi, start = 1) ) where {T <: Number} -Create a layer matrix using the Bernoulli sampling method. +Create a layer matrix using the provided random number generator and sampling parameters. # Arguments -- `rng::AbstractRNG`: The random number generator. +- `rng::AbstractRNG`: The random number generator used to generate random numbers. - `dims::Integer...`: The dimensions of the layer matrix. -- `weight::Number = 0.1`: The weight value. -- `sampling::BernoulliSample = BernoulliSample(0.5)`: The Bernoulli sampling object. +- `weight`: The weight used to fill the layer matrix. Default is 0.1. +- `sampling`: The sampling parameters used to generate the input matrix. Default is IrrationalSample(irrational = pi, start = 1). # Returns -The generated layer matrix. +The layer matrix generated using the provided random number generator and sampling parameters. # Example ```julia +using Random rng = Random.default_rng() -dims = (100, 200) -weight = 0.1 -sampling = BernoulliSample(0.5) -layer_matrix = bernoulli_sample_layer(rng, Float64, dims; weight=weight, sampling=sampling) +dims = (3, 2) +weight = 0.5 +layer_matrix = irrational_sample_init(rng, Float64, dims; weight = weight, sampling = IrrationalSample(irrational = sqrt(2), start = 1)) ``` - """ -function bernoulli_sample_layer(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight = 0.1, - sampling = BernoulliSample(0.5) - )where {T <: Number} - +function minimal_init(rng::AbstractRNG, ::Type{T}, dims::Integer...; + sampling_type::Symbol = :bernoulli, + weight::Number = T(0.1), + irrational::Real = pi, + start::Int = 1, + p::Number = T(0.5)) where {T <: Number} res_size, in_size = dims - sampling = sampling - weight = weight - layer_matrix = create_minimum_input(sampling, res_size, in_size, weight, rng) + if sampling_type == :bernoulli + layer_matrix = _create_bernoulli(p, res_size, in_size, weight, rng, T) + elseif sampling_type == :irrational + layer_matrix = _create_irrational(irrational, + start, + res_size, + in_size, + weight, + rng, + T) + else + error("Sampling type not allowed. Please use one of :bernoulli or :irrational") + end return layer_matrix end - - -function IrrationalSample(; irrational = pi, start = 1) - return irrational, start +function _create_bernoulli(p::T, + res_size::Int, + in_size::Int, + weight::T, + rng::AbstractRNG, + ::Type{T}) where {T <: Number} + input_matrix = zeros(T, res_size, in_size) + for i in 1:res_size + for j in 1:in_size + rand(rng, Bernoulli(p)) ? (input_matrix[i, j] = weight) : + (input_matrix[i, j] = -weight) + end + end + return input_matrix end -function create_minimum_input(irrational, start = sampling, res_size, in_size, weight, rng::AbstractRNG) +function _create_irrational(irrational::Irrational, + start::Int, + res_size::Int, + in_size::Int, + weight::T, + rng::AbstractRNG, + ::Type{T}) where {T <: Number} setprecision(BigFloat, Int(ceil(log2(10) * (res_size * in_size + start + 1)))) ir_string = string(BigFloat(irrational)) |> collect deleteat!(ir_string, findall(x -> x == '.', ir_string)) ir_array = zeros(length(ir_string)) - input_matrix = zeros(res_size, in_size) + input_matrix = zeros(T, res_size, in_size) for i in 1:length(ir_string) ir_array[i] = parse(Int, ir_string[i]) @@ -239,50 +244,10 @@ function create_minimum_input(irrational, start = sampling, res_size, in_size, w for i in 1:res_size for j in 1:in_size - random_number = rand(rng) - input_matrix[i, j] = random_number < 0.5 ? -weight : weight + random_number = rand(rng, T) + input_matrix[i, j] = random_number < 0.5 ? -weight : weight end end - return input_matrix + return T.(input_matrix) end - -""" - irrational_sample_layer(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight = 0.1, - sampling = IrrationalSample(; irrational = pi, start = 1) - ) where {T <: Number} - -Create a layer matrix using the provided random number generator and sampling parameters. - -# Arguments -- `rng::AbstractRNG`: The random number generator used to generate random numbers. -- `dims::Integer...`: The dimensions of the layer matrix. -- `weight`: The weight used to fill the layer matrix. Default is 0.1. -- `sampling`: The sampling parameters used to generate the input matrix. Default is IrrationalSample(irrational = pi, start = 1). - -# Returns -The layer matrix generated using the provided random number generator and sampling parameters. - -# Example -```julia -using Random -rng = Random.default_rng() -dims = (3, 2) -weight = 0.5 -layer_matrix = irrational_sample_layer(rng, Float64, dims; weight = weight, sampling = IrrationalSample(irrational = sqrt(2), start = 1)) -``` -""" - -function irrational_sample_layer(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight = 0.1, - sampling = IrrationalSample(; irrational = pi, start = 1) - )where {T <: Number} - - res_size, in_size = dims - sampling = sampling - weight = weight - layer_matrix = create_minimum_input(sampling, res_size, in_size, weight, rng) - - return layer_matrix -end \ No newline at end of file diff --git a/src/esn/esn_predict.jl b/src/esn/esn_predict.jl index 1b4cd462..05deb98c 100644 --- a/src/esn/esn_predict.jl +++ b/src/esn/esn_predict.jl @@ -69,11 +69,18 @@ function next_state_prediction!(esn::ESN, x, x_new, out, out_pad, i, tmp_array, end #TODO fixme @MatrinuzziFra -function next_state_prediction!(hesn::HybridESN, x, x_new, out, out_pad, i, tmp_array, model_prediction_data) +function next_state_prediction!(hesn::HybridESN, + x, + x_new, + out, + out_pad, + i, + tmp_array, + model_prediction_data) out_tmp = vcat(out, model_prediction_data[:, i]) out_pad = pad_state!(hesn.states_type, out_pad, out_tmp) x = next_state!(x, hesn.reservoir_driver, x[1:(hesn.res_size)], out_pad, - hesn.reservoir_matrix, hesn.input_matrix, hesn.bias_vector, tmp_array) + hesn.reservoir_matrix, hesn.input_matrix, hesn.bias_vector, tmp_array) x_tmp = vcat(x, model_prediction_data[:, i]) x_new = hesn.states_type(hesn.nla_type, x_tmp, out_pad) return x, x_new diff --git a/src/esn/esn_reservoir_drivers.jl b/src/esn/esn_reservoir_drivers.jl index 6ab198d7..98e7ab78 100644 --- a/src/esn/esn_reservoir_drivers.jl +++ b/src/esn/esn_reservoir_drivers.jl @@ -285,9 +285,9 @@ A GRUParams object containing the parameters needed for the GRU-based reservoir function GRU( ; activation_function = [NNlib.sigmoid, NNlib.sigmoid, tanh], - inner_layer = fill(DenseLayer(), 2), - reservoir = fill(RandSparseReservoir(0), 2), - bias = fill(DenseLayer(), 2), + inner_layer = fill(scaled_rand, 2), + reservoir = fill(rand_sparse, 2), + bias = fill(scaled_rand, 2), variant = FullyGated()) return GRU(activation_function, inner_layer, reservoir, bias, variant) end @@ -312,22 +312,22 @@ end #dispatch on the different gru variations function create_gru_layers(gru, variant::FullyGated, res_size, in_size) - Wz_in = create_layer(gru.inner_layer[1], res_size, in_size) - Wz = create_reservoir(gru.reservoir[1], res_size) - bz = create_layer(gru.bias[1], res_size, 1) + Wz_in = gru.inner_layer[1](res_size, in_size) + Wz = gru.reservoir[1](res_size, res_size) + bz = gru.bias[1](res_size, 1) - Wr_in = create_layer(gru.inner_layer[2], res_size, in_size) - Wr = create_reservoir(gru.reservoir[2], res_size) - br = create_layer(gru.bias[2], res_size, 1) + Wr_in = gru.inner_layer[2](res_size, in_size) + Wr = gru.reservoir[2](res_size, res_size) + br = gru.bias[2](res_size, 1) return GRUParams(gru.activation_function, variant, Wz_in, Wz, bz, Wr_in, Wr, br) end #check this one, not sure function create_gru_layers(gru, variant::Minimal, res_size, in_size) - Wz_in = create_layer(gru.inner_layer, res_size, in_size) - Wz = create_reservoir(gru.reservoir, res_size) - bz = create_layer(gru.bias, res_size, 1) + Wz_in = gru.inner_layer(res_size, in_size) + Wz = gru.reservoir(res_size, res_size) + bz = gru.bias(res_size, 1) Wr_in = nothing Wr = nothing diff --git a/src/esn/esn_reservoirs.jl b/src/esn/esn_reservoirs.jl index fccb3f24..5d9dd615 100644 --- a/src/esn/esn_reservoirs.jl +++ b/src/esn/esn_reservoirs.jl @@ -60,7 +60,7 @@ function delay_line(rng::AbstractRNG, dims::Integer...; weight = T(0.1)) where {T <: Number} reservoir_matrix = zeros(T, dims...) - @assert length(dims) == 2 && dims[1] == dims[2] "The dimensions must define a square matrix (e.g., (100, 100))" + @assert length(dims) == 2&&dims[1] == dims[2] "The dimensions must define a square matrix (e.g., (100, 100))" for i in 1:(dims[1] - 1) reservoir_matrix[i + 1, i] = weight @@ -70,7 +70,7 @@ function delay_line(rng::AbstractRNG, end """ - delay_line_backward_reservoir(rng::AbstractRNG, ::Type{T}, dims::Integer...; + delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; weight = T(0.1), fb_weight = T(0.2)) where {T <: Number} Create a delay line backward reservoir with the specified by `dims` and weights. Creates a matrix with backward connections @@ -92,12 +92,13 @@ Reservoir matrix with the dimensions specified by `dims` and weights. [^Rodan2010]: Rodan, Ali, and Peter Tino. "Minimum complexity echo state network." IEEE transactions on neural networks 22.1 (2010): 131-144. """ -function delay_line_backward_reservoir(rng::AbstractRNG, +function delay_line_backward(rng::AbstractRNG, ::Type{T}, dims::Integer...; - weight = T(0.1), + weight = T(0.1), fb_weight = T(0.2)) where {T <: Number} - reservoir_matrix = zeros(res_size, res_size) + res_size = first(dims) + reservoir_matrix = zeros(T, dims...) for i in 1:(res_size - 1) reservoir_matrix[i + 1, i] = weight @@ -107,9 +108,8 @@ function delay_line_backward_reservoir(rng::AbstractRNG, return reservoir_matrix end - """ - cycle_jumps_reservoir(rng::AbstractRNG, ::Type{T}, dims::Integer...; + cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...; cycle_weight = T(0.1), jump_weight = T(0.1), jump_size = 3) where {T <: Number} Create a cycle jumps reservoir with the specified dimensions, cycle weight, jump weight, and jump size. @@ -129,25 +129,25 @@ Reservoir matrix with the specified dimensions, cycle weight, jump weight, and j [^Rodan2012]: Rodan, Ali, and Peter Tiňo. "Simple deterministically constructed cycle reservoirs with regular jumps." Neural computation 24.7 (2012): 1822-1852. """ -function cycle_jumps_reservoir(rng::AbstractRNG, +function cycle_jumps(rng::AbstractRNG, ::Type{T}, dims::Integer...; - cycle_weight = T(0.1), - jump_weight = T(0.1), - jump_size = T(3)) where {T <: Number} - + cycle_weight::Number = T(0.1), + jump_weight::Number = T(0.1), + jump_size::Int = 3) where {T <: Number} + res_size = first(dims) reservoir_matrix = zeros(T, dims...) - for i in 1:(dims[1] - 1) + for i in 1:(res_size - 1) reservoir_matrix[i + 1, i] = cycle_weight end - reservoir_matrix[1, dims[1]] = cycle_weight + reservoir_matrix[1, res_size] = cycle_weight - for i in 1:jump_size:(dims[1] - jump_size) - tmp = (i + jump_size) % dims[1] + for i in 1:jump_size:(res_size - jump_size) + tmp = (i + jump_size) % res_size if tmp == 0 - tmp = dims[1] + tmp = res_size end reservoir_matrix[i, tmp] = jump_weight reservoir_matrix[tmp, i] = jump_weight @@ -156,9 +156,8 @@ function cycle_jumps_reservoir(rng::AbstractRNG, return reservoir_matrix end - """ - simple_cycle_reservoir(rng::AbstractRNG, ::Type{T}, dims::Integer...; + simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; weight = T(0.1)) where {T <: Number} Create a simple cycle reservoir with the specified dimensions and weight. @@ -176,7 +175,7 @@ Reservoir matrix with the dimensions specified by `dims` and weights. [^Rodan2010]: Rodan, Ali, and Peter Tino. "Minimum complexity echo state network." IEEE transactions on neural networks 22.1 (2010): 131-144. """ -function simple_cycle_reservoir(rng::AbstractRNG, +function simple_cycle(rng::AbstractRNG, ::Type{T}, dims::Integer...; weight = T(0.1)) where {T <: Number} @@ -190,11 +189,8 @@ function simple_cycle_reservoir(rng::AbstractRNG, return reservoir_matrix end - - - """ - pseudo_svd_reservoir(rng::AbstractRNG, ::Type{T}, dims::Integer...; + pseudo_svd(rng::AbstractRNG, ::Type{T}, dims::Integer...; max_value, sparsity, sorted = true, reverse_sort = false) where {T <: Number} Returns an initializer to build a sparse reservoir matrix with the given `sparsity` by using a pseudo-SVD approach as described in [^yang]. @@ -216,36 +212,45 @@ This reservoir initialization method, based on a pseudo-SVD approach, is inspire [^yang]: Yang, Cuili, et al. "_Design of polynomial echo state networks for time series prediction._" Neurocomputing 290 (2018): 148-160. """ -function pseudo_svd_reservoir(rng::AbstractRNG, +function pseudo_svd(rng::AbstractRNG, ::Type{T}, - dims::Integer...; - max_value, sparsity; - sorted = true, - reverse_sort = false) where {T <: Number} - - reservoir_matrix = create_diag( dims[1], max_value, sorted = sorted, reverse_sort = reverse_sort) - tmp_sparsity = get_sparsity(reservoir_matrix, dims[1]) + dims::Integer...; + max_value::Number = T(1.0), + sparsity::Number = 0.1, + sorted::Bool = true, + reverse_sort::Bool = false) where {T <: Number} + reservoir_matrix = create_diag(dims[1], + max_value, + T; + sorted = sorted, + reverse_sort = reverse_sort) + tmp_sparsity = get_sparsity(reservoir_matrix, dims[1]) while tmp_sparsity <= sparsity - reservoir_matrix *= create_qmatrix(dims[1], rand(1:dims[1]), rand(1:dims[1]), rand() * 2 - 1) + reservoir_matrix *= create_qmatrix(dims[1], + rand(1:dims[1]), + rand(1:dims[1]), + rand(T) * T(2) - T(1), + T) tmp_sparsity = get_sparsity(reservoir_matrix, dims[1]) end return reservoir_matrix end -function create_diag(dim, max_value; sorted = true, reverse_sort = false) - diagonal_matrix = zeros(dim, dim) +function create_diag(dim::Number, max_value::Number, ::Type{T}; + sorted::Bool = true, reverse_sort::Bool = false) where {T <: Number} + diagonal_matrix = zeros(T, dim, dim) if sorted == true if reverse_sort == true - diagonal_values = sort(rand(dim) .* max_value, rev = true) + diagonal_values = sort(rand(T, dim) .* max_value, rev = true) diagonal_values[1] = max_value else - diagonal_values = sort(rand(dim) .* max_value) + diagonal_values = sort(rand(T, dim) .* max_value) diagonal_values[end] = max_value end else - diagonal_values = rand(dim) .* max_value + diagonal_values = rand(T, dim) .* max_value end for i in 1:dim @@ -255,8 +260,12 @@ function create_diag(dim, max_value; sorted = true, reverse_sort = false) return diagonal_matrix end -function create_qmatrix(dim, coord_i, coord_j, theta) - qmatrix = zeros(dim, dim) +function create_qmatrix(dim::Number, + coord_i::Number, + coord_j::Number, + theta::Number, + ::Type{T}) where {T <: Number} + qmatrix = zeros(T, dim, dim) for i in 1:dim qmatrix[i, i] = 1.0 @@ -273,8 +282,6 @@ function get_sparsity(M, dim) return size(M[M .!= 0], 1) / (dim * dim - size(M[M .!= 0], 1)) #nonzero/zero elements end - - # from WeightInitializers.jl, TODO @MartinuzziFrancesco consider importing package function _default_rng() @static if VERSION >= v"1.7" diff --git a/src/esn/hybridesn.jl b/src/esn/hybridesn.jl index f29028fc..37b40d0c 100644 --- a/src/esn/hybridesn.jl +++ b/src/esn/hybridesn.jl @@ -52,23 +52,20 @@ function KnowledgeModel(prior_model, u0, tspan, datasize) return KnowledgeModel(prior_model, u0, tspan, dt, datasize, model_data) end -function HybridESN( - model, - train_data, - in_size::Int, - res_size::Int; - input_layer = scaled_rand, - reservoir = rand_sparse, - bias = zeros64, - reservoir_driver = RNN(), - nla_type = NLADefault(), - states_type = StandardStates(), - washout = 0, - rng = _default_rng(), - T = Float32, - matrix_type = typeof(train_data) -) - +function HybridESN(model, + train_data, + in_size::Int, + res_size::Int; + input_layer = scaled_rand, + reservoir = rand_sparse, + bias = zeros64, + reservoir_driver = RNN(), + nla_type = NLADefault(), + states_type = StandardStates(), + washout = 0, + rng = _default_rng(), + T = Float32, + matrix_type = typeof(train_data)) train_data = vcat(train_data, model.model_data[:, 1:(end - 1)]) if states_type isa AbstractPaddedStates @@ -94,10 +91,9 @@ function HybridESN( end function (hesn::HybridESN)(prediction::AbstractPrediction, - output_layer::AbstractOutputLayer; - last_state = hesn.states[:, [end]], - kwargs...) - + output_layer::AbstractOutputLayer; + last_state = hesn.states[:, [end]], + kwargs...) km = hesn.model pred_len = prediction.prediction_len @@ -114,11 +110,10 @@ function (hesn::HybridESN)(prediction::AbstractPrediction, end function train(hesn::HybridESN, - target_data, - training_method = StandardRidge(0.0)) - + target_data, + training_method = StandardRidge(0.0)) states = vcat(hesn.states, hesn.model.model_data[:, 2:end]) states_new = hesn.states_type(hesn.nla_type, states, hesn.train_data[:, 1:end]) return _train(states_new, target_data, training_method) -end \ No newline at end of file +end diff --git a/test/esn/test_drivers.jl b/test/esn/test_drivers.jl index db1c7b01..6dd7f7a2 100644 --- a/test/esn/test_drivers.jl +++ b/test/esn/test_drivers.jl @@ -1,65 +1,40 @@ using ReservoirComputing, Random, Statistics, NNlib -const res_size = 20 +const res_size = 50 const ts = 0.0:0.1:50.0 const data = sin.(ts) const train_len = 400 const input_data = reduce(hcat, data[1:(train_len - 1)]) const target_data = reduce(hcat, data[2:train_len]) const predict_len = 100 -const test = reduce(hcat, data[(train_len + 1):(train_len + predict_len)]) +const test_data = reduce(hcat, data[(train_len + 1):(train_len + predict_len)]) const training_method = StandardRidge(10e-6) - Random.seed!(77) -esn = ESN(input_data; - reservoir = RandSparseReservoir(res_size, 1.2, 0.1), - reservoir_driver = GRU(variant = FullyGated(), - reservoir = [ - RandSparseReservoir(res_size, 1.0, 0.5), - RandSparseReservoir(res_size, 1.2, 0.1), - ])) - -output_layer = train(esn, target_data, training_method) -output = esn(Predictive(target_data), output_layer, initial_conditions = target_data[1]) -@test mean(abs.(target_data .- output)) ./ mean(abs.(target_data)) < 0.11 - -esn = ESN(input_data; - reservoir = RandSparseReservoir(res_size, 1.2, 0.1), - reservoir_driver = GRU(variant = Minimal(), - reservoir = RandSparseReservoir(res_size, 1.0, 0.5), - inner_layer = DenseLayer(), - bias = DenseLayer())) - -output_layer = train(esn, target_data, training_method) -output = esn(Predictive(target_data), output_layer, initial_conditions = target_data[1]) -@test mean(abs.(target_data .- output)) ./ mean(abs.(target_data)) < 0.11 - -#multiple rnn -esn = ESN(input_data; - reservoir = RandSparseReservoir(res_size, 1.2, 0.1), - reservoir_driver = MRNN(activation_function = (tanh, sigmoid), - scaling_factor = (0.8, 0.1))) -output_layer = train(esn, target_data, training_method) -output = esn(Predictive(target_data), output_layer, initial_conditions = target_data[1]) -@test mean(abs.(target_data .- output)) ./ mean(abs.(target_data)) < 0.11 - -#deep esn -esn = ESN(input_data; - reservoir = [ - RandSparseReservoir(res_size, 1.2, 0.1), - RandSparseReservoir(res_size, 1.2, 0.1), - ]) -output_layer = train(esn, target_data, training_method) -output = esn(Predictive(target_data), output_layer, initial_conditions = target_data[1]) -@test mean(abs.(target_data .- output)) ./ mean(abs.(target_data)) < 0.11 -esn = ESN(input_data; - reservoir = [ - RandSparseReservoir(res_size, 1.2, 0.1), - RandSparseReservoir(res_size, 1.2, 0.1), - ], - input_layer = [DenseLayer(), DenseLayer()], - bias = [NullLayer(), NullLayer()]) -output_layer = train(esn, target_data, training_method) -output = esn(Predictive(target_data), output_layer, initial_conditions = target_data[1]) -@test mean(abs.(target_data .- output)) ./ mean(abs.(target_data)) < 0.11 +function test_esn(input_data, target_data, training_method, esn_config) + esn = ESN(input_data, 1, res_size; esn_config...) + output_layer = train(esn, target_data, training_method) + + output = esn(Predictive(target_data), output_layer, initial_conditions = target_data[1]) + @test mean(abs.(target_data .- output)) ./ mean(abs.(target_data)) < 0.15 +end + +esn_configs = [ + Dict(:reservoir => rand_sparse(; radius = 1.2), + :reservoir_driver => GRU(variant = FullyGated(), + reservoir = [ + rand_sparse(; radius = 1.0, sparsity = 0.5), + rand_sparse(; radius = 1.2, sparsity = 0.1), + ])), + Dict(:reservoir => rand_sparse(; radius = 1.2), + :reservoir_driver => GRU(variant = Minimal(), + reservoir = rand_sparse(; radius = 1.0, sparsity = 0.5), + inner_layer = scaled_rand)), + Dict(:reservoir => rand_sparse(; radius = 1.2), + :reservoir_driver => MRNN(activation_function = (tanh, sigmoid), + scaling_factor = (0.8, 0.1))), +] + +for config in esn_configs + test_esn(input_data, target_data, training_method, config) +end diff --git a/test/esn/test_inits.jl b/test/esn/test_inits.jl new file mode 100644 index 00000000..789f71e1 --- /dev/null +++ b/test/esn/test_inits.jl @@ -0,0 +1,90 @@ +using ReservoirComputing +using LinearAlgebra +using Random + +const res_size = 30 +const in_size = 3 +const radius = 1.0 +const sparsity = 0.1 +const weight = 0.2 +const jump_size = 3 +const rng = Random.default_rng() + +function check_radius(matrix, target_radius; tolerance = 1e-5) + eigenvalues = eigvals(matrix) + spectral_radius = maximum(abs.(eigenvalues)) + return isapprox(spectral_radius, target_radius, atol = tolerance) +end + +ft = [Float16, Float32, Float64] +reservoir_inits = [ + rand_sparse, + delay_line, + delay_line_backward, + cycle_jumps, + simple_cycle, + pseudo_svd, +] +input_inits = [ + scaled_rand, + weighted_init, + sparse_init, + minimal_init, + minimal_init(; sampling_type = :irrational), +] + +@testset "Reservoir Initializers" begin + @testset "Sizes and types: $init $T" for init in reservoir_inits, T in ft + #sizes + @test size(init(res_size, res_size)) == (res_size, res_size) + @test size(init(rng, res_size, res_size)) == (res_size, res_size) + #types + @test eltype(init(T, res_size, res_size)) == T + @test eltype(init(rng, T, res_size, res_size)) == T + #closure + cl = init(rng) + @test eltype(cl(T, res_size, res_size)) == T + end + + @testset "Check spectral radius" begin + sp = rand_sparse(res_size, res_size) + @test check_radius(sp, radius) + end + + @testset "Minimum complexity: $init" for init in [ + delay_line, + delay_line_backward, + cycle_jumps, + simple_cycle, + ] + dl = init(res_size, res_size) + if init === delay_line_backward + @test unique(dl) == Float32.([0.0, 0.1, 0.2]) + else + @test unique(dl) == Float32.([0.0, 0.1]) + end + end +end + +# TODO: @MartinuzziFrancesco Missing tests for informed_init +@testset "Input Initializers" begin + @testset "Sizes and types: $init $T" for init in input_inits, T in ft + #sizes + @test size(init(res_size, in_size)) == (res_size, in_size) + @test size(init(rng, res_size, in_size)) == (res_size, in_size) + #types + @test eltype(init(T, res_size, in_size)) == T + @test eltype(init(rng, T, res_size, in_size)) == T + #closure + cl = init(rng) + @test eltype(cl(T, res_size, in_size)) == T + end + + @testset "Minimum complexity: $init" for init in [ + minimal_init, + minimal_init(; sampling_type = :irrational), + ] + dl = init(res_size, in_size) + @test sort(unique(dl)) == Float32.([-0.1, 0.1]) + end +end diff --git a/test/esn/test_input_layers.jl b/test/esn/test_input_layers.jl deleted file mode 100644 index adc655da..00000000 --- a/test/esn/test_input_layers.jl +++ /dev/null @@ -1,55 +0,0 @@ -using ReservoirComputing - -const res_size = 10 -const in_size = 3 -const scaling = 0.1 -const weight = 0.2 - -#testing WeightedLayer implicit and esplicit constructors -input_constructor = WeightedLayer(scaling) -input_matrix = create_layer(input_constructor, res_size, in_size) -@test size(input_matrix) == (Int(floor(res_size / in_size) * in_size), in_size) -@test maximum(input_matrix) <= scaling - -input_constructor = WeightedLayer(scaling = scaling) -input_matrix = create_layer(input_constructor, res_size, in_size) -@test size(input_matrix) == (Int(floor(res_size / in_size) * in_size), in_size) -@test maximum(input_matrix) <= scaling - -#testing DenseLayer implicit and esplicit constructors -input_constructor = DenseLayer(scaling) -input_matrix = create_layer(input_constructor, res_size, in_size) -@test size(input_matrix) == (res_size, in_size) -@test maximum(input_matrix) <= scaling - -input_constructor = DenseLayer(scaling = scaling) -input_matrix = create_layer(input_constructor, res_size, in_size) -@test size(input_matrix) == (res_size, in_size) -@test maximum(input_matrix) <= scaling - -#testing SparseLayer implicit and esplicit constructors -input_constructor = SparseLayer(scaling) -input_matrix = create_layer(input_constructor, res_size, in_size) -@test size(input_matrix) == (res_size, in_size) -@test maximum(input_matrix) <= scaling - -input_constructor = SparseLayer(scaling = scaling) -input_matrix = create_layer(input_constructor, res_size, in_size) -@test size(input_matrix) == (res_size, in_size) -@test maximum(input_matrix) <= scaling - -#testing MinimumLayer implicit and esplicit constructors -input_constructor = MinimumLayer(weight) -input_matrix = create_layer(input_constructor, res_size, in_size) -@test size(input_matrix) == (res_size, in_size) -@test maximum(input_matrix) == weight - -input_constructor = MinimumLayer(weight = weight) -input_matrix = create_layer(input_constructor, res_size, in_size) -@test size(input_matrix) == (res_size, in_size) -@test maximum(input_matrix) == weight - -#testing NullLayer constructor -input_constructor = NullLayer() -input_matrix = create_layer(input_constructor, res_size, in_size) -@test size(input_matrix) == (res_size, in_size) diff --git a/test/esn/test_reservoirs.jl b/test/esn/test_reservoirs.jl deleted file mode 100644 index debd9be0..00000000 --- a/test/esn/test_reservoirs.jl +++ /dev/null @@ -1,41 +0,0 @@ -using ReservoirComputing -using LinearAlgebra -using Random -include("../utils.jl") - -const res_size = 20 -const radius = 1.0 -const sparsity = 0.1 -const weight = 0.2 -const jump_size = 3 -const rng = Random.default_rng() - -dtypes = [Float16, Float32, Float64] -reservoir_inits = [rand_sparse, delay_line] - -@testset "Sizes and types" begin - for init in reservoir_inits - for dt in dtypes - #sizes - @test size(init(res_size, res_size)) == (res_size, res_size) - @test size(init(rng, res_size, res_size)) == (res_size, res_size) - #types - @test eltype(init(dt, res_size, res_size)) == dt - @test eltype(init(rng, dt, res_size, res_size)) == dt - #closure - cl = init(rng) - @test cl(dt, res_size, res_size) isa AbstractArray{dt} - end - end -end - -@testset "rand_sparse" begin - sp = rand_sparse(res_size, res_size) - @test check_radius(sp, radius) -end - -@testset "delay_line" begin - dl = delay_line(res_size, res_size) - @test unique(dl) == Float32.([0.0, 0.1]) -end - diff --git a/test/runtests.jl b/test/runtests.jl index 2d114e99..4dfad5c4 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -11,11 +11,8 @@ using Test end @testset "Echo State Networks" begin - @safetestset "ESN Input Layers" begin - include("esn/test_input_layers.jl") - end - @safetestset "ESN Reservoirs" begin - include("esn/test_reservoirs.jl") + @safetestset "Test initializers" begin + include("esn/test_inits.jl") end @safetestset "ESN States" begin include("esn/test_states.jl") diff --git a/test/test_states.jl b/test/test_states.jl index c8808bbf..1215d47b 100644 --- a/test/test_states.jl +++ b/test/test_states.jl @@ -1,51 +1,37 @@ using ReservoirComputing -#padding test_array = [1, 2, 3, 4, 5, 6, 7, 8, 9] -standard_array = zeros(length(test_array), 1) extension = [0, 0, 0] -padded_array = zeros(length(test_array) + 1, 1) -extended_array = zeros(length(test_array) + length(extension), 1) -padded_extended_array = zeros(length(test_array) + length(extension) + 1, 1) padding = 10.0 -#testing non linear algos -nla_array = ReservoirComputing.nla(NLADefault(), test_array) -@test nla_array == test_array - -nla_array = ReservoirComputing.nla(NLAT1(), test_array) -@test nla_array == [1, 2, 9, 4, 25, 6, 49, 8, 81] - -nla_array = ReservoirComputing.nla(NLAT2(), test_array) -@test nla_array == [1, 2, 2, 4, 12, 6, 30, 8, 9] - -nla_array = ReservoirComputing.nla(NLAT3(), test_array) -@test nla_array == [1, 2, 8, 4, 24, 6, 48, 8, 9] - -#testing padding and extension -states_type = StandardStates() -standard_array = states_type(NLADefault(), test_array, extension) -@test standard_array == test_array - -states_type = PaddedStates(padding = padding) -padded_array = states_type(NLADefault(), test_array, extension) -@test padded_array == reshape(vcat(padding, test_array), length(test_array) + 1, 1) - -states_type = PaddedStates(padding) -padded_array = states_type(NLADefault(), test_array, extension) -@test padded_array == reshape(vcat(padding, test_array), length(test_array) + 1, 1) - -states_type = PaddedExtendedStates(padding = padding) -padded_extended_array = states_type(NLADefault(), test_array, extension) -@test padded_extended_array == reshape(vcat(padding, extension, test_array), - length(test_array) + length(extension) + 1, 1) - -states_type = PaddedExtendedStates(padding) -padded_extended_array = states_type(NLADefault(), test_array, extension) -@test padded_extended_array == reshape(vcat(padding, extension, test_array), - length(test_array) + length(extension) + 1, 1) - -states_type = ExtendedStates() -extended_array = states_type(NLADefault(), test_array, extension) -@test extended_array == vcat(extension, test_array) -#reshape(vcat(extension, test_array), length(test_array)+length(extension), 1) +nlas = [(NLADefault(), test_array), + (NLAT1(), [1, 2, 9, 4, 25, 6, 49, 8, 81]), + (NLAT2(), [1, 2, 2, 4, 12, 6, 30, 8, 9]), + (NLAT3(), [1, 2, 8, 4, 24, 6, 48, 8, 9])] + +pes = [(StandardStates(), test_array), + (PaddedStates(padding = padding), + reshape(vcat(padding, test_array), length(test_array) + 1, 1)), + (PaddedExtendedStates(padding = padding), + reshape(vcat(padding, extension, test_array), + length(test_array) + length(extension) + 1, + 1)), + (ExtendedStates(), vcat(extension, test_array))] + +function test_nla(algo, expected_output) + nla_array = ReservoirComputing.nla(algo, test_array) + @test nla_array == expected_output +end + +function test_states_type(state_type, expected_output) + states_output = state_type(NLADefault(), test_array, extension) + @test states_output == expected_output +end + +@testset "Nonlinear Algorithms Testing" for (algo, expected_output) in nlas + test_nla(algo, expected_output) +end + +@testset "States Testing" for (state_type, expected_output) in pes + test_states_type(state_type, expected_output) +end diff --git a/test/utils.jl b/test/utils.jl deleted file mode 100644 index 9ef6f360..00000000 --- a/test/utils.jl +++ /dev/null @@ -1,5 +0,0 @@ -function check_radius(matrix, target_radius; tolerance=1e-5) - eigenvalues = eigvals(matrix) - spectral_radius = maximum(abs.(eigenvalues)) - return isapprox(spectral_radius, target_radius, atol=tolerance) -end \ No newline at end of file