diff --git a/Project.toml b/Project.toml index 3953f3b8..20f449aa 100644 --- a/Project.toml +++ b/Project.toml @@ -6,13 +6,10 @@ version = "0.10.5" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" CellularAutomata = "878138dc-5b27-11ea-1a71-cb95d38d6b29" -Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" -PartialFunctions = "570af359-4316-4cb7-8c74-252c00c2016b" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" -Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" WeightInitializers = "d49dbf32-c5c2-4618-8acc-27bb2598ef2d" @@ -29,12 +26,10 @@ Adapt = "4.1.1" Aqua = "0.8" CellularAutomata = "0.0.2" DifferentialEquations = "7.15.0" -Distances = "0.10" LIBSVM = "0.8" LinearAlgebra = "1.10" MLJLinearModels = "0.9.2, 0.10" NNlib = "0.9.26" -PartialFunctions = "1.2" Random = "1.10" Reexport = "1.2.2" SafeTestsets = "0.1" @@ -51,7 +46,9 @@ LIBSVM = "b1bec4e5-fd48-53fe-b0cb-9723c09d164b" MLJLinearModels = "6ee0df7b-362f-4a72-a706-9e79364fb692" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["Aqua", "Test", "SafeTestsets", "Random", "DifferentialEquations", "MLJLinearModels", "LIBSVM"] +test = ["Aqua", "Test", "SafeTestsets", "Random", "DifferentialEquations", + "MLJLinearModels", "LIBSVM", "Statistics"] diff --git a/src/ReservoirComputing.jl b/src/ReservoirComputing.jl index 3c89ad62..2c507d60 100644 --- a/src/ReservoirComputing.jl +++ b/src/ReservoirComputing.jl @@ -1,14 +1,11 @@ module ReservoirComputing -using Adapt -using CellularAutomata -using Distances -using LinearAlgebra -using NNlib -using PartialFunctions -using Random +using Adapt: adapt +using CellularAutomata: CellularAutomaton +using LinearAlgebra: eigvals, mul!, I +using NNlib: fast_act, sigmoid +using Random: Random, AbstractRNG using Reexport: Reexport, @reexport -using Statistics using StatsBase: sample using WeightInitializers: DeviceAgnostic, PartialFunction, Utils @reexport using WeightInitializers diff --git a/src/esn/deepesn.jl b/src/esn/deepesn.jl index 4b66975e..d9018128 100644 --- a/src/esn/deepesn.jl +++ b/src/esn/deepesn.jl @@ -77,7 +77,7 @@ function DeepESN(train_data, matrix_type=typeof(train_data)) if states_type isa AbstractPaddedStates in_size = size(train_data, 1) + 1 - train_data = vcat(Adapt.adapt(matrix_type, ones(1, size(train_data, 2))), + train_data = vcat(adapt(matrix_type, ones(1, size(train_data, 2))), train_data) end diff --git a/src/esn/esn.jl b/src/esn/esn.jl index 6e88e023..07824cc4 100644 --- a/src/esn/esn.jl +++ b/src/esn/esn.jl @@ -66,7 +66,7 @@ function ESN(train_data, matrix_type=typeof(train_data)) if states_type isa AbstractPaddedStates in_size = size(train_data, 1) + 1 - train_data = vcat(Adapt.adapt(matrix_type, ones(1, size(train_data, 2))), + train_data = vcat(adapt(matrix_type, ones(1, size(train_data, 2))), train_data) end diff --git a/src/esn/esn_predict.jl b/src/esn/esn_predict.jl index ba6c80da..46e30e95 100644 --- a/src/esn/esn_predict.jl +++ b/src/esn/esn_predict.jl @@ -93,16 +93,16 @@ end function allocate_outpad(hesn::HybridESN, states_type, out) pad_length = length(out) + size(hesn.model.model_data[:, 1], 1) - out_tmp = Adapt.adapt(typeof(out), zeros(pad_length)) + out_tmp = adapt(typeof(out), zeros(pad_length)) return allocate_singlepadding(states_type, out_tmp) end function allocate_singlepadding(::AbstractPaddedStates, out) - Adapt.adapt(typeof(out), zeros(size(out, 1) + 1)) + adapt(typeof(out), zeros(size(out, 1) + 1)) end function allocate_singlepadding(::StandardStates, out) - Adapt.adapt(typeof(out), zeros(size(out, 1))) + adapt(typeof(out), zeros(size(out, 1))) end function allocate_singlepadding(::ExtendedStates, out) - Adapt.adapt(typeof(out), zeros(size(out, 1))) + adapt(typeof(out), zeros(size(out, 1))) end diff --git a/src/esn/esn_reservoir_drivers.jl b/src/esn/esn_reservoir_drivers.jl index edbed914..46e4cda8 100644 --- a/src/esn/esn_reservoir_drivers.jl +++ b/src/esn/esn_reservoir_drivers.jl @@ -30,9 +30,9 @@ function create_states(reservoir_driver::AbstractReservoirDriver, train_len = size(train_data, 2) - washout res_size = size(reservoir_matrix, 1) - states = Adapt.adapt(typeof(train_data), zeros(res_size, train_len)) + states = adapt(typeof(train_data), zeros(res_size, train_len)) tmp_array = allocate_tmp(reservoir_driver, typeof(train_data), res_size) - _state = Adapt.adapt(typeof(train_data), zeros(res_size, 1)) + _state = adapt(typeof(train_data), zeros(res_size, 1)) for i in 1:washout yv = @view train_data[:, i] @@ -59,9 +59,9 @@ function create_states(reservoir_driver::AbstractReservoirDriver, train_len = size(train_data, 2) - washout res_size = sum([size(reservoir_matrix[i], 1) for i in 1:length(reservoir_matrix)]) - states = Adapt.adapt(typeof(train_data), zeros(res_size, train_len)) + states = adapt(typeof(train_data), zeros(res_size, train_len)) tmp_array = allocate_tmp(reservoir_driver, typeof(train_data), res_size) - _state = Adapt.adapt(typeof(train_data), zeros(res_size)) + _state = adapt(typeof(train_data), zeros(res_size)) for i in 1:washout for j in 1:length(reservoir_matrix) @@ -108,7 +108,7 @@ echo state networks (`ESN`). - `leaky_coefficient`: The leaky coefficient used in the RNN. Defaults to 1.0. """ -function RNN(; activation_function=NNlib.fast_act(tanh), leaky_coefficient=1.0) +function RNN(; activation_function=fast_act(tanh), leaky_coefficient=1.0) RNN(activation_function, leaky_coefficient) end @@ -142,7 +142,7 @@ function next_state!(out, rnn::RNN, x, y, W::Vector, W_in, b, tmp_array) end function allocate_tmp(::RNN, tmp_type, res_size) - return [Adapt.adapt(tmp_type, zeros(res_size, 1)) for i in 1:2] + return [adapt(tmp_type, zeros(res_size, 1)) for i in 1:2] end #multiple RNN driver @@ -210,7 +210,7 @@ function next_state!(out, mrnn::MRNN, x, y, W, W_in, b, tmp_array) end function allocate_tmp(::MRNN, tmp_type, res_size) - return [Adapt.adapt(tmp_type, zeros(res_size, 1)) for i in 1:2] + return [adapt(tmp_type, zeros(res_size, 1)) for i in 1:2] end abstract type AbstractGRUVariant end @@ -280,7 +280,7 @@ This driver is based on the GRU architecture [^Cho2014]. "_Learning phrase representations using RNN encoder-decoder for statistical machine translation._" arXiv preprint arXiv:1406.1078 (2014). """ -function GRU(; activation_function=[NNlib.sigmoid, NNlib.sigmoid, tanh], +function GRU(; activation_function=[sigmoid, sigmoid, tanh], inner_layer=fill(scaled_rand, 2), reservoir=fill(rand_sparse, 2), bias=fill(scaled_rand, 2), @@ -344,7 +344,7 @@ function next_state!(out, gru::GRUParams, x, y, W, W_in, b, tmp_array) end function allocate_tmp(::GRUParams, tmp_type, res_size) - return [Adapt.adapt(tmp_type, zeros(res_size, 1)) for i in 1:9] + return [adapt(tmp_type, zeros(res_size, 1)) for i in 1:9] end #W=U, W_in=W in papers. x=h, and y=x. I know, it's confusing. ( on the left our notation) diff --git a/src/esn/hybridesn.jl b/src/esn/hybridesn.jl index 5453c814..129b4f8f 100644 --- a/src/esn/hybridesn.jl +++ b/src/esn/hybridesn.jl @@ -109,7 +109,7 @@ function HybridESN(model, if states_type isa AbstractPaddedStates in_size = size(train_data, 1) + 1 - train_data = vcat(Adapt.adapt(matrix_type, ones(1, size(train_data, 2))), + train_data = vcat(adapt(matrix_type, ones(1, size(train_data, 2))), train_data) else in_size = size(train_data, 1) diff --git a/src/predict.jl b/src/predict.jl index 4ba275ea..ca0f782c 100644 --- a/src/predict.jl +++ b/src/predict.jl @@ -116,7 +116,7 @@ end #single matrix for other training methods function output_storing(training_method, out_size, prediction_len, storing_type) - return Adapt.adapt(storing_type, zeros(out_size, prediction_len)) + return adapt(storing_type, zeros(out_size, prediction_len)) end #general storing -> single matrix