-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlotka_volterra.jl
47 lines (38 loc) · 1.11 KB
/
lotka_volterra.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
using DifferentialEquations
using Plots
using Flux, DiffEqFlux
function lotka_volterra(du,u,p,t)
x, y = u
α, β, δ, γ = p
du[1] = dx = α*x - β*x*y
du[2] = dy = -δ*y + γ*x*y
end
# Initial conditions, Time domain, Parameters
u₀ = [1.0,1.0]
timespan = (0.0,10.0)
parameters = [1.5,1.0,3.0,1.0]
# posing the ODE problem
problem = ODEProblem(lotka_volterra, u₀, timespan, parameters)
# solving the ODE problem
solution = solve(problem)
plot(solution)
# Now the inverse problem: try to find parameters so that the ODE solution is a cosine.
p = param([2.2, 1.0, 2.0, 0.4])
params = Flux.Params([p])
# one-layer neural network
function predict_rd()
Tracker.collect(diffeq_rd(p, problem, Tsit5(), saveat=0.1))
end
# cost-function
loss_rd() = sum(abs2, x-cos(x) for x in predict_rd())
# optimalisation
data = Iterators.repeated((), 100)
opt = ADAM(0.1)
call_back = function () # callback function to observe training
display(loss_rd())
display(plot(solve(remake(problem, p = Flux.data(p)),Tsit5(),saveat=0.1),ylim=(0,6)))
end
# training
call_back()
Flux.train!(loss_rd, params, data, opt, cb = call_back)
@show p