From ed280a6296d1dae9b49c0d659c92fe7aecfa6cd7 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Fri, 2 Sep 2022 15:12:18 -0400 Subject: [PATCH 01/42] Modified 1D approx test to show get_argument bug --- test/direct_function_tests.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/direct_function_tests.jl b/test/direct_function_tests.jl index 8871e29eef..7287b4fdae 100644 --- a/test/direct_function_tests.jl +++ b/test/direct_function_tests.jl @@ -55,7 +55,7 @@ println("Approximation of function 1D 2") @parameters x @variables u(..) func(x) = @. cos(5pi * x) * x -eq = [u(x) ~ func(x)] +eq = [u(x) - u(0) ~ func(x)] bc = [u(0) ~ u(0)] x0 = 0 @@ -82,8 +82,8 @@ res = Optimization.solve(prob, OptimizationOptimJL.BFGS(), maxiters = 1000) dx = 0.01 xs = collect(x0:dx:x_end) func_s = func(xs) - -@test discretization.phi(xs', res.u)≈func(xs') rtol=0.01 +func_approx(x) = discretization.phi(x, res.u) .- discretization.phi(0., res.u) +@test func_approx(xs')≈func(xs') rtol=0.01 # plot(xs,func(xs)) # plot!(xs, discretization.phi(xs',res.u)') From 1c0f0d00e98ea699d083967a1e863dd86a5c997f Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Wed, 26 Oct 2022 22:17:25 -0400 Subject: [PATCH 02/42] Updated get_argument for eval with multiple inputs --- src/symbolic_utilities.jl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index 199d2ebf8f..382a5df23d 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -131,7 +131,7 @@ function _transform_expression(pinnrep::PINNRepresentation, ex; is_integral = fa indvars = _args[2:end] var_ = is_integral ? :(u) : :($(Expr(:$, :u))) ex.args = if !multioutput - [var_, Symbol(:cord, num_depvar), :($θ), :phi] + [var_, Symbol(:cord, num_depvar), :($θ), :phi] #TODO: this should somehow use indvars else [ var_, @@ -439,8 +439,9 @@ function get_argument(eqs, dict_indvars, dict_depvars) vars = map(exprs) do expr _vars = map(depvar -> find_thing_in_expr(expr, depvar), collect(keys(dict_depvars))) f_vars = filter(x -> !isempty(x), _vars) - map(x -> first(x), f_vars) + #map(x -> first(x), f_vars) end + vars = [depvar for expr in vars for depvar in expr ] args_ = map(vars) do _vars ind_args_ = map(var -> var.args[2:end], _vars) syms = Set{Symbol}() From 63e0ddcb5c32de0a81d8b35271380ee9cc134ede Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Wed, 26 Oct 2022 22:18:51 -0400 Subject: [PATCH 03/42] Forced get_argument when strategy != Quadrature --- src/discretize.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/discretize.jl b/src/discretize.jl index c06bc56a8c..0e776b2248 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -513,7 +513,7 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem, eqs = [eqs] end - pde_indvars = if strategy isa QuadratureTraining + pde_indvars = if true# strategy isa QuadratureTraining get_argument(eqs, dict_indvars, dict_depvars) else get_variables(eqs, dict_indvars, dict_depvars) From 4e7b1b862f4dd6a61ee429fe16d436d21ab7d45b Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Wed, 26 Oct 2022 22:19:15 -0400 Subject: [PATCH 04/42] Test file for fixing get_argument --- src/mwe.jl | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 src/mwe.jl diff --git a/src/mwe.jl b/src/mwe.jl new file mode 100644 index 0000000000..f3db432956 --- /dev/null +++ b/src/mwe.jl @@ -0,0 +1,39 @@ +using NeuralPDE +using ModelingToolkit + +@parameters x +@variables u(..) + +eqs = [u(x) - u(0) ~ 0.] +dict_indvars = Dict(:x => 1) +dict_depvars = Dict(:u => 1) +a1 = NeuralPDE.get_argument(eqs, dict_indvars, dict_depvars) +a2 = NeuralPDE.get_variables(eqs, dict_indvars, dict_depvars) +a3 = NeuralPDE.get_number(eqs, dict_indvars, dict_depvars) +a4 = NeuralPDE.pair(eqs[1], [:u], dict_depvars, dict_indvars) +println(eqs[1]) +println("Arguments: ", a1) +println("Variables: ", a2) +println("Numbers: ", a3) +println("Pair: ", a4) +println() + +eqs = [u(x) ~ 0.] +b1 = NeuralPDE.get_argument(eqs, dict_indvars, dict_depvars) +b2 = NeuralPDE.get_variables(eqs, dict_indvars, dict_depvars) +b3 = NeuralPDE.get_number(eqs, dict_indvars, dict_depvars) +println(eqs[1]) +println("Arguments: ", b1) +println("Variables: ", b2) +println("Numbers: ", b3) +println() + +eqs = [u(0) ~ 0.] +c1 = NeuralPDE.get_argument(eqs, dict_indvars, dict_depvars) +c2 = NeuralPDE.get_variables(eqs, dict_indvars, dict_depvars) +c3 = NeuralPDE.get_number(eqs, dict_indvars, dict_depvars) +println(eqs[1]) +println("Arguments: ", c1) +println("Variables: ", c2) +println("Numbers: ", c3) +println() \ No newline at end of file From 8a612dc941127d2e89226598a3641e170aba21c4 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Wed, 26 Oct 2022 22:19:44 -0400 Subject: [PATCH 05/42] Test file for debugging symbolic_discretize --- src/mwe2.jl | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 src/mwe2.jl diff --git a/src/mwe2.jl b/src/mwe2.jl new file mode 100644 index 0000000000..6782e5db4f --- /dev/null +++ b/src/mwe2.jl @@ -0,0 +1,45 @@ +using Flux, NeuralPDE, Test +using Optimization, OptimizationOptimJL, OptimizationOptimisers +using QuasiMonteCarlo +import ModelingToolkit: Interval, infimum, supremum +using DomainSets +using Random +import Lux + +Random.seed!(110) + +println("Approximation of function 1D") + +@parameters x +@variables u(..) +func(x) = @. cos(5pi * x) * x +eq = [u(x) - u(0) ~ func(x)] +#eq = [u(x) ~ func(x)] +bc = [u(0) ~ u(0)] + +x0 = 0 +x_end = 4 +domain = [x ∈ Interval(x0, x_end)] + +hidden = 20 +chain = Lux.Chain(Lux.Dense(1, hidden, Lux.sin), + Lux.Dense(hidden, hidden, Lux.sin), + Lux.Dense(hidden, hidden, Lux.sin), + Lux.Dense(hidden, 1)) + +strategy = NeuralPDE.GridTraining(0.01) + +discretization = NeuralPDE.PhysicsInformedNN(chain, strategy) +@named pde_system = PDESystem(eq, bc, domain, [x], [u(x)]) +sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization) +prob = NeuralPDE.discretize(pde_system, discretization) + +res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01), maxiters = 500) +prob = remake(prob, u0 = res.minimizer) +res = Optimization.solve(prob, OptimizationOptimJL.BFGS(), maxiters = 1000) + +dx = 0.01 +xs = collect(x0:dx:x_end) +func_s = func(xs) +func_approx(x) = discretization.phi(x, res.u) .- discretization.phi(0., res.u) +@test func_approx(xs')≈func(xs') rtol=0.01 \ No newline at end of file From 83e2475cc96b4b1cdee752db6aaf95250c057e7c Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Sat, 31 Dec 2022 16:09:31 +1300 Subject: [PATCH 06/42] transform_expression uses indvars now --- src/discretize.jl | 2 +- src/symbolic_utilities.jl | 12 ++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/discretize.jl b/src/discretize.jl index 0e776b2248..c06bc56a8c 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -513,7 +513,7 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem, eqs = [eqs] end - pde_indvars = if true# strategy isa QuadratureTraining + pde_indvars = if strategy isa QuadratureTraining get_argument(eqs, dict_indvars, dict_depvars) else get_variables(eqs, dict_indvars, dict_depvars) diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index 382a5df23d..493e483101 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -19,10 +19,15 @@ julia> _dot_(e) dottable_(x) = Broadcast.dottable(x) dottable_(x::Function) = true +_vcat(x) = vcat(x) +dottable_(x::typeof(_vcat)) = false + _dot_(x) = x function _dot_(x::Expr) dotargs = Base.mapany(_dot_, x.args) - if x.head === :call && dottable_(x.args[1]) + if x.head === :call && x.args[1] === :_vcat + Expr(x.head, dotargs...) + elseif x.head === :call && dottable_(x.args[1]) Expr(:., dotargs[1], Expr(:tuple, dotargs[2:end]...)) elseif x.head === :comparison Expr(:comparison, @@ -129,9 +134,12 @@ function _transform_expression(pinnrep::PINNRepresentation, ex; is_integral = fa depvar = _args[1] num_depvar = dict_depvars[depvar] indvars = _args[2:end] + for i in eachindex(indvars) + indvars[i] = transform_expression(pinnrep, indvars[i]) + end var_ = is_integral ? :(u) : :($(Expr(:$, :u))) ex.args = if !multioutput - [var_, Symbol(:cord, num_depvar), :($θ), :phi] #TODO: this should somehow use indvars + [var_, :( (_vcat)($(indvars...)) ), :($θ), :phi] else [ var_, From 13df6570c2fb78c5644f5039893c5039e4dd410e Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Sat, 31 Dec 2022 17:08:20 +1300 Subject: [PATCH 07/42] Some test files --- src/mwe2.jl | 26 +++++++++++++---- src/mwe3.jl | 83 +++++++++++++++++++++++++++++++++++++++++++++++++++++ src/mwe4.jl | 9 ++++++ 3 files changed, 112 insertions(+), 6 deletions(-) create mode 100644 src/mwe3.jl create mode 100644 src/mwe4.jl diff --git a/src/mwe2.jl b/src/mwe2.jl index 6782e5db4f..10320407a0 100644 --- a/src/mwe2.jl +++ b/src/mwe2.jl @@ -4,6 +4,7 @@ using QuasiMonteCarlo import ModelingToolkit: Interval, infimum, supremum using DomainSets using Random +using CUDA import Lux Random.seed!(110) @@ -12,24 +13,29 @@ println("Approximation of function 1D") @parameters x @variables u(..) -func(x) = @. cos(5pi * x) * x +func(x) = @. cos(pi * x) * x eq = [u(x) - u(0) ~ func(x)] #eq = [u(x) ~ func(x)] bc = [u(0) ~ u(0)] x0 = 0 -x_end = 4 +x_end = 0.5 domain = [x ∈ Interval(x0, x_end)] hidden = 20 chain = Lux.Chain(Lux.Dense(1, hidden, Lux.sin), Lux.Dense(hidden, hidden, Lux.sin), Lux.Dense(hidden, hidden, Lux.sin), - Lux.Dense(hidden, 1)) + Lux.Dense(hidden, 1, bias=false)) -strategy = NeuralPDE.GridTraining(0.01) +# Train on GPU +ps = Lux.setup(Random.default_rng(), chain)[1] +ps = ps |> Lux.ComponentArray |> gpu .|> Float64 -discretization = NeuralPDE.PhysicsInformedNN(chain, strategy) +#strategy = NeuralPDE.GridTraining(0.01) +strategy = NeuralPDE.QuasiRandomTraining(10000;bcs_points=0) + +discretization = NeuralPDE.PhysicsInformedNN(chain, strategy, init_params = ps) @named pde_system = PDESystem(eq, bc, domain, [x], [u(x)]) sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization) prob = NeuralPDE.discretize(pde_system, discretization) @@ -41,5 +47,13 @@ res = Optimization.solve(prob, OptimizationOptimJL.BFGS(), maxiters = 1000) dx = 0.01 xs = collect(x0:dx:x_end) func_s = func(xs) -func_approx(x) = discretization.phi(x, res.u) .- discretization.phi(0., res.u) +# func_approx(x) = discretization.phi(x, res.u) .- discretization.phi(0., res.u) +func_approx(x) = first.(vec(Array(discretization.phi(gpu(x), res.u)) .- Array(discretization.phi(gpu(0.), res.u)))) + + +using Plots + +plot(xs, func(xs')', label="True function") +plot!(xs, func_approx(xs'), label="Approximate function") + @test func_approx(xs')≈func(xs') rtol=0.01 \ No newline at end of file diff --git a/src/mwe3.jl b/src/mwe3.jl new file mode 100644 index 0000000000..b3f72f0817 --- /dev/null +++ b/src/mwe3.jl @@ -0,0 +1,83 @@ +using NeuralPDE +import ModelingToolkit: Interval, infimum, supremum +using DomainSets + +# Set up problem + +@parameters x +@variables u(..) + +func(x) = @. cos(5pi * x) * x +eqs = [u(x) - u(0) ~ func(x)] +bcs = [u(0) ~ u(0)] + +x0 = 0 +x_end = 4 +domains = [x ∈ Interval(x0, x_end)] + +# Set up pinnrep + +eq_params = SciMLBase.NullParameters() +defaults = Dict{Any, Any}() +default_p = nothing +param_estim = false +additional_loss = nothing +adaloss = nothing +depvars, indvars, dict_indvars, dict_depvars, dict_depvar_input = NeuralPDE.get_vars([x],[u(x)]) +logger = nothing +multioutput = false +iteration = [1] +init_params = nothing +flat_init_params = nothing +phi = nothing +derivative = nothing +strategy = NeuralPDE.GridTraining(0.01) + +pde_indvars = if true# strategy isa QuadratureTraining + NeuralPDE.get_argument(eqs, dict_indvars, dict_depvars) +else + NeuralPDE.get_variables(eqs, dict_indvars, dict_depvars) +end + +bc_indvars = if strategy isa QuadratureTraining + NeuralPDE.get_argument(bcs, dict_indvars, dict_depvars) +else + NeuralPDE.get_variables(bcs, dict_indvars, dict_depvars) +end + +pde_integration_vars = NeuralPDE.get_integration_variables(eqs, dict_indvars, dict_depvars) +bc_integration_vars = NeuralPDE.get_integration_variables(bcs, dict_indvars, dict_depvars) + +pinnrep = NeuralPDE.PINNRepresentation(eqs, bcs, domains, eq_params, defaults, default_p, + param_estim, additional_loss, adaloss, depvars, indvars, + dict_indvars, dict_depvars, dict_depvar_input, logger, + multioutput, iteration, init_params, flat_init_params, phi, + derivative, + strategy, pde_indvars, bc_indvars, pde_integration_vars, + bc_integration_vars, nothing, nothing, nothing, nothing) + +# Attemp to build loss function expressions + +eq = first(pinnrep.eqs) + +eq_rhs = isequal(expand_derivatives(eq.rhs), 0) ? eq.rhs : expand_derivatives(eq.rhs) +eq_rhs_expr = toexpr(eq_rhs) +right_expr = NeuralPDE._transform_expression(pinnrep, eq_rhs_expr) + +eq_lhs = isequal(expand_derivatives(eq.lhs), 0) ? eq.lhs : expand_derivatives(eq.lhs) +eq_lhs_expr = toexpr(eq_lhs) +left_expr = NeuralPDE._transform_expression(pinnrep, eq_lhs_expr) + +println("left_expr = ", left_expr) + +left_expr_dot = NeuralPDE._dot_(left_expr) + +println("_dot_(left_expr) = ", left_expr_dot) + +println("parse_equation(", eq,") = ", NeuralPDE.parse_equation(pinnrep, eq)) + +#a = Expr(:$, :x) +#b = :((vcat)(Any[$a])) +#println(NeuralPDE._dot_(b)) +#c = :( $(Expr(:$, :u))($b) ) +#println(NeuralPDE._dot_(c)) diff --git a/src/mwe4.jl b/src/mwe4.jl new file mode 100644 index 0000000000..aa2d1d346a --- /dev/null +++ b/src/mwe4.jl @@ -0,0 +1,9 @@ +ex = :(u(x,0,y-5)) +indvars = ex.args[2:end] + +var_ = :($(Expr(:$, :u))) + +ex2 = :() +ex2.head = ex.head +#ex2.args = [var_, :( $indvars )] +ex2.args = [var_, :( [$(indvars...)] )] \ No newline at end of file From e885f450225880372cad145a4d104d567973a992 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Sat, 31 Dec 2022 17:40:39 +1300 Subject: [PATCH 08/42] Reverted get_argument to original state --- src/symbolic_utilities.jl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index 1d211d1ccd..5f9da4b182 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -447,9 +447,8 @@ function get_argument(eqs, dict_indvars, dict_depvars) vars = map(exprs) do expr _vars = map(depvar -> find_thing_in_expr(expr, depvar), collect(keys(dict_depvars))) f_vars = filter(x -> !isempty(x), _vars) - #map(x -> first(x), f_vars) + map(x -> first(x), f_vars) end - vars = [depvar for expr in vars for depvar in expr ] args_ = map(vars) do _vars ind_args_ = map(var -> var.args[2:end], _vars) syms = Set{Symbol}() From 74a27496513dabf83f764c6d56437607ff426c30 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Sat, 31 Dec 2022 17:43:51 +1300 Subject: [PATCH 09/42] Removed temporary debug files --- src/mwe.jl | 39 ------------------------- src/mwe2.jl | 59 ------------------------------------- src/mwe3.jl | 83 ----------------------------------------------------- src/mwe4.jl | 9 ------ 4 files changed, 190 deletions(-) delete mode 100644 src/mwe.jl delete mode 100644 src/mwe2.jl delete mode 100644 src/mwe3.jl delete mode 100644 src/mwe4.jl diff --git a/src/mwe.jl b/src/mwe.jl deleted file mode 100644 index f3db432956..0000000000 --- a/src/mwe.jl +++ /dev/null @@ -1,39 +0,0 @@ -using NeuralPDE -using ModelingToolkit - -@parameters x -@variables u(..) - -eqs = [u(x) - u(0) ~ 0.] -dict_indvars = Dict(:x => 1) -dict_depvars = Dict(:u => 1) -a1 = NeuralPDE.get_argument(eqs, dict_indvars, dict_depvars) -a2 = NeuralPDE.get_variables(eqs, dict_indvars, dict_depvars) -a3 = NeuralPDE.get_number(eqs, dict_indvars, dict_depvars) -a4 = NeuralPDE.pair(eqs[1], [:u], dict_depvars, dict_indvars) -println(eqs[1]) -println("Arguments: ", a1) -println("Variables: ", a2) -println("Numbers: ", a3) -println("Pair: ", a4) -println() - -eqs = [u(x) ~ 0.] -b1 = NeuralPDE.get_argument(eqs, dict_indvars, dict_depvars) -b2 = NeuralPDE.get_variables(eqs, dict_indvars, dict_depvars) -b3 = NeuralPDE.get_number(eqs, dict_indvars, dict_depvars) -println(eqs[1]) -println("Arguments: ", b1) -println("Variables: ", b2) -println("Numbers: ", b3) -println() - -eqs = [u(0) ~ 0.] -c1 = NeuralPDE.get_argument(eqs, dict_indvars, dict_depvars) -c2 = NeuralPDE.get_variables(eqs, dict_indvars, dict_depvars) -c3 = NeuralPDE.get_number(eqs, dict_indvars, dict_depvars) -println(eqs[1]) -println("Arguments: ", c1) -println("Variables: ", c2) -println("Numbers: ", c3) -println() \ No newline at end of file diff --git a/src/mwe2.jl b/src/mwe2.jl deleted file mode 100644 index 10320407a0..0000000000 --- a/src/mwe2.jl +++ /dev/null @@ -1,59 +0,0 @@ -using Flux, NeuralPDE, Test -using Optimization, OptimizationOptimJL, OptimizationOptimisers -using QuasiMonteCarlo -import ModelingToolkit: Interval, infimum, supremum -using DomainSets -using Random -using CUDA -import Lux - -Random.seed!(110) - -println("Approximation of function 1D") - -@parameters x -@variables u(..) -func(x) = @. cos(pi * x) * x -eq = [u(x) - u(0) ~ func(x)] -#eq = [u(x) ~ func(x)] -bc = [u(0) ~ u(0)] - -x0 = 0 -x_end = 0.5 -domain = [x ∈ Interval(x0, x_end)] - -hidden = 20 -chain = Lux.Chain(Lux.Dense(1, hidden, Lux.sin), - Lux.Dense(hidden, hidden, Lux.sin), - Lux.Dense(hidden, hidden, Lux.sin), - Lux.Dense(hidden, 1, bias=false)) - -# Train on GPU -ps = Lux.setup(Random.default_rng(), chain)[1] -ps = ps |> Lux.ComponentArray |> gpu .|> Float64 - -#strategy = NeuralPDE.GridTraining(0.01) -strategy = NeuralPDE.QuasiRandomTraining(10000;bcs_points=0) - -discretization = NeuralPDE.PhysicsInformedNN(chain, strategy, init_params = ps) -@named pde_system = PDESystem(eq, bc, domain, [x], [u(x)]) -sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization) -prob = NeuralPDE.discretize(pde_system, discretization) - -res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01), maxiters = 500) -prob = remake(prob, u0 = res.minimizer) -res = Optimization.solve(prob, OptimizationOptimJL.BFGS(), maxiters = 1000) - -dx = 0.01 -xs = collect(x0:dx:x_end) -func_s = func(xs) -# func_approx(x) = discretization.phi(x, res.u) .- discretization.phi(0., res.u) -func_approx(x) = first.(vec(Array(discretization.phi(gpu(x), res.u)) .- Array(discretization.phi(gpu(0.), res.u)))) - - -using Plots - -plot(xs, func(xs')', label="True function") -plot!(xs, func_approx(xs'), label="Approximate function") - -@test func_approx(xs')≈func(xs') rtol=0.01 \ No newline at end of file diff --git a/src/mwe3.jl b/src/mwe3.jl deleted file mode 100644 index b3f72f0817..0000000000 --- a/src/mwe3.jl +++ /dev/null @@ -1,83 +0,0 @@ -using NeuralPDE -import ModelingToolkit: Interval, infimum, supremum -using DomainSets - -# Set up problem - -@parameters x -@variables u(..) - -func(x) = @. cos(5pi * x) * x -eqs = [u(x) - u(0) ~ func(x)] -bcs = [u(0) ~ u(0)] - -x0 = 0 -x_end = 4 -domains = [x ∈ Interval(x0, x_end)] - -# Set up pinnrep - -eq_params = SciMLBase.NullParameters() -defaults = Dict{Any, Any}() -default_p = nothing -param_estim = false -additional_loss = nothing -adaloss = nothing -depvars, indvars, dict_indvars, dict_depvars, dict_depvar_input = NeuralPDE.get_vars([x],[u(x)]) -logger = nothing -multioutput = false -iteration = [1] -init_params = nothing -flat_init_params = nothing -phi = nothing -derivative = nothing -strategy = NeuralPDE.GridTraining(0.01) - -pde_indvars = if true# strategy isa QuadratureTraining - NeuralPDE.get_argument(eqs, dict_indvars, dict_depvars) -else - NeuralPDE.get_variables(eqs, dict_indvars, dict_depvars) -end - -bc_indvars = if strategy isa QuadratureTraining - NeuralPDE.get_argument(bcs, dict_indvars, dict_depvars) -else - NeuralPDE.get_variables(bcs, dict_indvars, dict_depvars) -end - -pde_integration_vars = NeuralPDE.get_integration_variables(eqs, dict_indvars, dict_depvars) -bc_integration_vars = NeuralPDE.get_integration_variables(bcs, dict_indvars, dict_depvars) - -pinnrep = NeuralPDE.PINNRepresentation(eqs, bcs, domains, eq_params, defaults, default_p, - param_estim, additional_loss, adaloss, depvars, indvars, - dict_indvars, dict_depvars, dict_depvar_input, logger, - multioutput, iteration, init_params, flat_init_params, phi, - derivative, - strategy, pde_indvars, bc_indvars, pde_integration_vars, - bc_integration_vars, nothing, nothing, nothing, nothing) - -# Attemp to build loss function expressions - -eq = first(pinnrep.eqs) - -eq_rhs = isequal(expand_derivatives(eq.rhs), 0) ? eq.rhs : expand_derivatives(eq.rhs) -eq_rhs_expr = toexpr(eq_rhs) -right_expr = NeuralPDE._transform_expression(pinnrep, eq_rhs_expr) - -eq_lhs = isequal(expand_derivatives(eq.lhs), 0) ? eq.lhs : expand_derivatives(eq.lhs) -eq_lhs_expr = toexpr(eq_lhs) -left_expr = NeuralPDE._transform_expression(pinnrep, eq_lhs_expr) - -println("left_expr = ", left_expr) - -left_expr_dot = NeuralPDE._dot_(left_expr) - -println("_dot_(left_expr) = ", left_expr_dot) - -println("parse_equation(", eq,") = ", NeuralPDE.parse_equation(pinnrep, eq)) - -#a = Expr(:$, :x) -#b = :((vcat)(Any[$a])) -#println(NeuralPDE._dot_(b)) -#c = :( $(Expr(:$, :u))($b) ) -#println(NeuralPDE._dot_(c)) diff --git a/src/mwe4.jl b/src/mwe4.jl deleted file mode 100644 index aa2d1d346a..0000000000 --- a/src/mwe4.jl +++ /dev/null @@ -1,9 +0,0 @@ -ex = :(u(x,0,y-5)) -indvars = ex.args[2:end] - -var_ = :($(Expr(:$, :u))) - -ex2 = :() -ex2.head = ex.head -#ex2.args = [var_, :( $indvars )] -ex2.args = [var_, :( [$(indvars...)] )] \ No newline at end of file From d0df2a37c338cce6fdb870775d6411d56afa03ce Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:54:33 +1300 Subject: [PATCH 10/42] Updated _vcat to accept multiple arguments --- src/symbolic_utilities.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index 5f9da4b182..27cfbd7ee9 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -19,7 +19,7 @@ julia> _dot_(e) dottable_(x) = Broadcast.dottable(x) dottable_(x::Function) = true -_vcat(x) = vcat(x) +_vcat(x...) = vcat(x...) dottable_(x::typeof(_vcat)) = false _dot_(x) = x From 41a75f6cab4668fa83198fcee5617ddffa8ecccc Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Thu, 12 Jan 2023 15:57:24 +1300 Subject: [PATCH 11/42] get_argument returns all args no just first per eq --- src/symbolic_utilities.jl | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index 27cfbd7ee9..7b3049bcb0 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -143,7 +143,7 @@ function _transform_expression(pinnrep::PINNRepresentation, ex; is_integral = fa else [ var_, - Symbol(:cord, num_depvar), + :( (_vcat)($(indvars...)) ), Symbol(:($θ), num_depvar), Symbol(:phi, num_depvar), ] @@ -443,27 +443,48 @@ function get_argument(eqs, _indvars::Array, _depvars::Array) get_argument(eqs, dict_indvars, dict_depvars) end function get_argument(eqs, dict_indvars, dict_depvars) + """Equations, as expressions""" exprs = toexpr.(eqs) - vars = map(exprs) do expr + """Instances of each dependent variable that appears in the expression, by dependent variable, by equation""" + vars = map(exprs) do expr # For each equation,... + """Arrays of instances of each dependent variable, by dependent variable""" _vars = map(depvar -> find_thing_in_expr(expr, depvar), collect(keys(dict_depvars))) + """Arrays of instances of each dependent variable that appears in the expression, by dependent variable""" f_vars = filter(x -> !isempty(x), _vars) - map(x -> first(x), f_vars) end +# vars = [depvar for expr in vars for depvar in expr ] args_ = map(vars) do _vars - ind_args_ = map(var -> var.args[2:end], _vars) + """Arguments of all instances of dependent variable, by instance, by dependent variable""" + ind_args_ = map.(var -> var.args[2:end], _vars) syms = Set{Symbol}() - filter(vcat(ind_args_...)) do ind_arg + """All arguments in any instance of a dependent variable""" + all_ind_args = vcat((ind_args_...)...) + + # Add any independent variables from expression dependent variable calls + for ind_arg in all_ind_args + if ind_arg isa Expr + for ind_var in collect(keys(dict_indvars)) + if !isempty(NeuralPDE.find_thing_in_expr(ind_arg, ind_var)) + push!(all_ind_args, ind_var) + end + end + end + end + + filter(all_ind_args) do ind_arg # For each argument if ind_arg isa Symbol if ind_arg ∈ syms - false + false # remove symbols that have already occurred else push!(syms, ind_arg) - true + true # keep symbols that haven't occurred yet, but note their occurance end + elseif ind_arg isa Expr # we've already taken what we wanted from the expressions + false else - true + true # keep all non-symbols end end end - return args_ # TODO for all arguments + return args_ end From c5d99606acf677111be5f41bac43db01010e3269 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Thu, 12 Jan 2023 15:59:02 +1300 Subject: [PATCH 12/42] Added implicit 1D and another 2D test case --- test/direct_function_tests.jl | 96 +++++++++++++++++++++++++++++++++-- 1 file changed, 93 insertions(+), 3 deletions(-) diff --git a/test/direct_function_tests.jl b/test/direct_function_tests.jl index 7287b4fdae..949ad0ea7e 100644 --- a/test/direct_function_tests.jl +++ b/test/direct_function_tests.jl @@ -44,10 +44,11 @@ prob = remake(prob, u0 = res.minimizer) res = Optimization.solve(prob, OptimizationOptimJL.BFGS(initial_stepnorm = 0.01), maxiters = 500) -@test discretization.phi(xs', res.u)≈func(xs') rtol=0.01 +func_approx(x) = discretization.phi(x, res.u) +@test func_approx(xs')≈func(xs') rtol=0.01 # plot(xs,func(xs)) -# plot!(xs, discretization.phi(xs',res.u)') +# plot!(xs,func_approx(xs')') ## Approximation of function 1D 2 println("Approximation of function 1D 2") @@ -86,7 +87,49 @@ func_approx(x) = discretization.phi(x, res.u) .- discretization.phi(0., res.u) @test func_approx(xs')≈func(xs') rtol=0.01 # plot(xs,func(xs)) -# plot!(xs, discretization.phi(xs',res.u)') +# plot!(xs, func_approx(xs')') + +## Approximation of implicit function 1D +println("Approximation of implicit function 1D") + +@parameters x +@variables u(..) +eq = [u(sin(x)) ~ cos(x) + cos(2*x)] +bc = [u(0) ~ u(0)] + +x0 = pi/2 +x_end = 3*pi/2 +domain = [x ∈ Interval(x0, x_end)] + +hidden = 20 +chain = Lux.Chain(Lux.Dense(1, hidden, Lux.tanh), + Lux.Dense(hidden, hidden, Lux.tanh), + Lux.Dense(hidden, hidden, Lux.tanh), + Lux.Dense(hidden, 1)) + +strategy = NeuralPDE.GridTraining(0.01) + +discretization = NeuralPDE.PhysicsInformedNN(chain, strategy) +@named pde_system = PDESystem(eq, bc, domain, [x], [u(x)]) +sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization) +prob = NeuralPDE.discretize(pde_system, discretization) + +res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01), maxiters = 500) +prob = remake(prob, u0 = res.minimizer) +res = Optimization.solve(prob, OptimizationOptimJL.BFGS(), maxiters = 1000) + +dy = 0.01 +y0 = -1 +y_end = 1 +ys = collect(y0:dy:y_end) + +func(y) = @. -sqrt(1 - y^2) + 1 - 2 * y^2 +func_s = func(ys) +func_approx(y) = discretization.phi(y, res.u) +@test func_approx(ys')≈func(ys') rtol=0.01 + +# plot(ys,func(ys)) +# plot!(ys, func_approx(ys')') ## Approximation of function 2D println("Approximation of function 2D") @@ -138,3 +181,50 @@ diff_u = abs.(u_predict .- u_real) # p2 = plot(xs, ys, u_predict, st=:surface,title = "predict"); # p3 = plot(xs, ys, diff_u,st=:surface,title = "error"); # plot(p1,p2,p3) + +## Approximation of function 2D +println("Approximation of function 2D 2") + +@parameters x, y +@variables u(..) +func(x, y) = -sin(x) * sin(y) * exp(-((x - pi)^2 + (y - pi)^2)) +eq = [u(x, y) - u(0,0) ~ func(x, y)] +bc = [u(0, 0) ~ u(0, 0)] + +x0 = -10 +x_end = 10 +y0 = -10 +y_end = 10 +d = 0.4 + +domain = [x ∈ Interval(x0, x_end), y ∈ Interval(y0, y_end)] + +hidden = 25 +chain = Lux.Chain(Lux.Dense(2, hidden, Lux.tanh), + Lux.Dense(hidden, hidden, Lux.tanh), + Lux.Dense(hidden, hidden, Lux.tanh), + Lux.Dense(hidden, 1)) + +strategy = NeuralPDE.GridTraining(d) +discretization = NeuralPDE.PhysicsInformedNN(chain, strategy) +@named pde_system = PDESystem(eq, bc, domain, [x, y], [u(x, y)]) +prob = NeuralPDE.discretize(pde_system, discretization) +symprob = NeuralPDE.symbolic_discretize(pde_system, discretization) +symprob.loss_functions.full_loss_function(symprob.flat_init_params, nothing) + +res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01), maxiters = 1000) +prob = remake(prob, u0 = res.minimizer) +res = Optimization.solve(prob, OptimizationOptimJL.BFGS(), maxiters = 1000) +prob = remake(prob, u0 = res.minimizer) +res = Optimization.solve(prob, OptimizationOptimJL.BFGS(), maxiters = 1000) +phi = discretization.phi + +xs = collect(x0:0.1:x_end) +ys = collect(y0:0.1:y_end) + +func_approx(x,y) = first(phi([x,y], res.minimizer)) .- first(phi([0.,0.], res.minimizer)) +u_predict = reshape([func_approx(x, y) for x in xs for y in ys], (length(xs), length(ys))) +u_real = reshape([ func(x, y) for x in xs for y in ys], (length(xs), length(ys))) +diff_u = abs.(u_predict .- u_real) + +@test u_predict≈u_real rtol=0.05 \ No newline at end of file From 64b56de7aa7e10643c630ac9f9a3d3e75b3a7c28 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Thu, 12 Jan 2023 16:00:12 +1300 Subject: [PATCH 13/42] generate gridtrain trainsets based of pde vars --- src/discretize.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/discretize.jl b/src/discretize.jl index 187550ad51..7c20024b59 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -256,7 +256,7 @@ function generate_training_sets(domains, dx, eqs, bcs, eltypeθ, dict_indvars::D hcat(vec(map(points -> collect(points), Iterators.product(bc_data...)))...)) - pde_train_sets = map(pde_args) do bt + pde_train_sets = map(pde_vars) do bt span = map(b -> get(dict_var_span_, b, b), bt) _set = adapt(eltypeθ, hcat(vec(map(points -> collect(points), Iterators.product(span...)))...)) From 55fa847a703ac7be4c9ed0a4eb0664a1174831ff Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Thu, 12 Jan 2023 16:00:59 +1300 Subject: [PATCH 14/42] added OptimJL and OptimOptimisers --- Project.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Project.toml b/Project.toml index fd81e284c3..78ece43ed0 100644 --- a/Project.toml +++ b/Project.toml @@ -23,6 +23,8 @@ ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" Optim = "429524aa-4258-5aef-a3af-852621145aeb" Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" +OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" +OptimizationOptimisers = "42dfb2eb-d2b4-4451-abcd-913932933ac1" QuasiMonteCarlo = "8a4e6c94-4038-4cdc-81c3-7e6ffdb2a71b" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" From b7e3d7ab0c2a5521a4573d29cdbfe40ccfb4d357 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Fri, 13 Jan 2023 11:34:11 +1300 Subject: [PATCH 15/42] get_bounds works with new transform_expression --- src/discretize.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/discretize.jl b/src/discretize.jl index 7c20024b59..829bfea577 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -292,7 +292,7 @@ function get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, dict_lower_bound = Dict([Symbol(d.variables) => infimum(d.domain) for d in domains]) dict_upper_bound = Dict([Symbol(d.variables) => supremum(d.domain) for d in domains]) - pde_args = get_argument(eqs, dict_indvars, dict_depvars) + pde_args = get_variables(eqs, dict_indvars, dict_depvars) pde_lower_bounds = map(pde_args) do pd span = map(p -> get(dict_lower_bound, p, p), pd) From fb199e4ac3b02054bfb06de00029d3daf89124ea Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Fri, 13 Jan 2023 11:53:18 +1300 Subject: [PATCH 16/42] Added test of ODE with hard constraint ic --- test/NNPDE_tests.jl | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/test/NNPDE_tests.jl b/test/NNPDE_tests.jl index e27c86a5d1..9d5a86bc06 100644 --- a/test/NNPDE_tests.jl +++ b/test/NNPDE_tests.jl @@ -15,18 +15,25 @@ callback = function (p, l) end ## Example 1, 1D ode -function test_ode(strategy_) - println("Example 1, 1D ode: strategy: $(nameof(typeof(strategy_)))") +function test_ode(strategy_, ic_hard_constraint) + if ic_hard_constraint + println("Example 1, 1D ode, initial condition as hard constraint: strategy: $(nameof(typeof(strategy_)))") + else + println("Example 1, 1D ode, initial condition as soft constraint: strategy: $(nameof(typeof(strategy_)))") + end @parameters θ @variables u(..) Dθ = Differential(θ) # 1D ODE - eq = Dθ(u(θ)) ~ θ^3 + 2 * θ + (θ^2) * ((1 + 3 * (θ^2)) / (1 + θ + (θ^3))) - + eq = ic_hard_constraint ? + Dθ(u(θ)) ~ θ^3 + 2 * θ + (θ^2) * ((1 + 3 * (θ^2)) / (1 + θ + (θ^3))) - + (u(θ) - u(0.) + 1.0) * (θ + ((1 + 3 * (θ^2)) / (1 + θ + θ^3))) : + Dθ(u(θ)) ~ θ^3 + 2 * θ + (θ^2) * ((1 + 3 * (θ^2)) / (1 + θ + (θ^3))) - u(θ) * (θ + ((1 + 3 * (θ^2)) / (1 + θ + θ^3))) # Initial and boundary conditions - bcs = [u(0.0) ~ 1.0] + bcs = ic_hard_constraint ? [u(0.0) ~ u(0.0)] : [u(0.0) ~ 1.0] # Space and time domains domains = [θ ∈ Interval(0.0, 1.0)] @@ -50,7 +57,9 @@ function test_ode(strategy_) analytic_sol_func(t) = exp(-(t^2) / 2) / (1 + t + t^3) + t^2 ts = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] u_real = [analytic_sol_func(t) for t in ts] - u_predict = [first(phi(t, res.minimizer)) for t in ts] + u_predict = ic_hard_constraint ? + [first(phi(t, res.minimizer)) - first(phi(0., res.minimizer)) + 1.0 for t in ts] : + [first(phi(t, res.minimizer)) for t in ts] @test u_predict≈u_real atol=0.1 # using Plots @@ -156,8 +165,8 @@ strategies = [ quadrature_strategy, ] -map(strategies) do strategy_ - test_ode(strategy_) +map(Iterators.product(strategies, [true, false])) do (strategy_, ic_hard_constraint) + test_ode(strategy_, ic_hard_constraint) end # map(strategies) do strategy_ # test_heterogeneous_system(strategy_) From 2572dbf588164eb48a544d3682f6bf993f755692 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Tue, 24 Jan 2023 18:11:02 -0500 Subject: [PATCH 17/42] _vcat now fills out scalar inputs to match batches --- src/symbolic_utilities.jl | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index 7b3049bcb0..680aa9786a 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -1,4 +1,5 @@ using Base.Broadcast +using LinearAlgebra """ Override `Broadcast.__dot__` with `Broadcast.dottable(x::Function) = true` @@ -19,6 +20,16 @@ julia> _dot_(e) dottable_(x) = Broadcast.dottable(x) dottable_(x::Function) = true +_vcat(x::Number...) = vcat(x...) +_vcat(x::(LinearAlgebra._DenseConcatGroup.b)...) = vcat(x...) +# If the arguments are a mix of numbers and matrices/vectors/arrays, +# the numbers need to be copied for the dimensions to match +function _vcat(x::LinearAlgebra._DenseConcatGroup...) + example = first(Iterators.filter(e -> !(e isa Number), x)) + dims = (1, size(example)[2:end]...) + x = map( el -> el isa Number ? fill(el, dims) : el , x) + _vcat(x...) +end _vcat(x...) = vcat(x...) dottable_(x::typeof(_vcat)) = false @@ -133,10 +144,7 @@ function _transform_expression(pinnrep::PINNRepresentation, ex; is_integral = fa if e in keys(dict_depvars) depvar = _args[1] num_depvar = dict_depvars[depvar] - indvars = _args[2:end] - for i in eachindex(indvars) - indvars[i] = transform_expression(pinnrep, indvars[i]) - end + indvars = map( (indvar_) -> transform_expression(pinnrep, indvar_), _args[2:end]) var_ = is_integral ? :(u) : :($(Expr(:$, :u))) ex.args = if !multioutput [var_, :( (_vcat)($(indvars...)) ), :($θ), :phi] From 3e36fbe437d93affc59505c62ee69a071a2f5b00 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Thu, 26 Jan 2023 18:16:08 -0500 Subject: [PATCH 18/42] cord now only has variables that show up in the eq --- src/discretize.jl | 15 ++++----------- src/symbolic_utilities.jl | 8 ++++---- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/src/discretize.jl b/src/discretize.jl index 829bfea577..56e3502065 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -142,17 +142,10 @@ function build_symbolic_loss_function(pinnrep::PINNRepresentation, eqs; vcat_expr = Expr(:block, :($(eq_pair_expr...))) vcat_expr_loss_functions = Expr(:block, vcat_expr, loss_function) # TODO rename - if strategy isa QuadratureTraining - indvars_ex = get_indvars_ex(bc_indvars) - left_arg_pairs, right_arg_pairs = this_eq_indvars, indvars_ex - vars_eq = Expr(:(=), build_expr(:tuple, left_arg_pairs), - build_expr(:tuple, right_arg_pairs)) - else - indvars_ex = [:($:cord[[$i], :]) for (i, x) in enumerate(this_eq_indvars)] - left_arg_pairs, right_arg_pairs = this_eq_indvars, indvars_ex - vars_eq = Expr(:(=), build_expr(:tuple, left_arg_pairs), - build_expr(:tuple, right_arg_pairs)) - end + indvars_ex = [:($:cord[[$i], :]) for (i, x) in enumerate(this_eq_indvars)] + left_arg_pairs, right_arg_pairs = this_eq_indvars, indvars_ex + vars_eq = Expr(:(=), build_expr(:tuple, left_arg_pairs), + build_expr(:tuple, right_arg_pairs)) if !(dict_transformation_vars isa Nothing) transformation_expr_ = Expr[] diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index 680aa9786a..e9eec591ea 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -167,7 +167,7 @@ function _transform_expression(pinnrep::PINNRepresentation, ex; is_integral = fa end depvar = _args[1] num_depvar = dict_depvars[depvar] - indvars = _args[2:end] + indvars = map( (indvar_) -> transform_expression(pinnrep, indvar_), _args[2:end]) dict_interior_indvars = Dict([indvar .=> j for (j, indvar) in enumerate(dict_depvar_input[depvar])]) dim_l = length(dict_interior_indvars) @@ -178,13 +178,13 @@ function _transform_expression(pinnrep::PINNRepresentation, ex; is_integral = fa εs_dnv = [εs[d] for d in undv] ex.args = if !multioutput - [var_, :phi, :u, Symbol(:cord, num_depvar), εs_dnv, order, :($θ)] + [var_, :phi, :u, :( (_vcat)($(indvars...)) ), εs_dnv, order, :($θ)] else [ var_, Symbol(:phi, num_depvar), :u, - Symbol(:cord, num_depvar), + :( (_vcat)($(indvars...)) ), εs_dnv, order, Symbol(:($θ), num_depvar), @@ -352,7 +352,7 @@ function pair(eq, depvars, dict_depvars, dict_depvar_input) expr = toexpr(eq) pair_ = map(depvars) do depvar if !isempty(find_thing_in_expr(expr, depvar)) - dict_depvars[depvar] => dict_depvar_input[depvar] + dict_depvars[depvar] => filter(arg -> !isempty(find_thing_in_expr(expr, arg)), dict_depvar_input[depvar]) end end Dict(filter(p -> p !== nothing, pair_)) From d115eae24a599161bc75409e11c9c773cf014328 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Tue, 7 Feb 2023 15:36:33 -0500 Subject: [PATCH 19/42] GridTraining train_sets now work on the GPU --- src/training_strategies.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/training_strategies.jl b/src/training_strategies.jl index e590c3d42a..a85ffd4f93 100644 --- a/src/training_strategies.jl +++ b/src/training_strategies.jl @@ -22,15 +22,15 @@ function merge_strategy_with_loss_function(pinnrep::PINNRepresentation, datafree_bc_loss_function) @unpack domains, eqs, bcs, dict_indvars, dict_depvars, flat_init_params = pinnrep dx = strategy.dx - eltypeθ = eltype(pinnrep.flat_init_params) + eltypeθ = eltype(flat_init_params) train_sets = generate_training_sets(domains, dx, eqs, bcs, eltypeθ, dict_indvars, dict_depvars) # the points in the domain and on the boundary pde_train_sets, bcs_train_sets = train_sets - pde_train_sets = adapt.(typeof(flat_init_params), pde_train_sets) - bcs_train_sets = adapt.(typeof(flat_init_params), bcs_train_sets) + pde_train_sets = map(train_set -> reshape(adapt(typeof(flat_init_params), vec(train_set)), size(train_set)), pde_train_sets) + bcs_train_sets = map(train_set -> reshape(adapt(typeof(flat_init_params), vec(train_set)), size(train_set)), bcs_train_sets) pde_loss_functions = [get_loss_function(_loss, _set, eltypeθ, strategy) for (_loss, _set) in zip(datafree_pde_loss_function, pde_train_sets)] From abb85a823aac543bc5659238bf65f0317ded4a63 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Tue, 7 Feb 2023 15:37:23 -0500 Subject: [PATCH 20/42] _vcat maintains Array types when filling --- src/symbolic_utilities.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index e9eec591ea..75b225df03 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -21,13 +21,13 @@ dottable_(x) = Broadcast.dottable(x) dottable_(x::Function) = true _vcat(x::Number...) = vcat(x...) -_vcat(x::(LinearAlgebra._DenseConcatGroup.b)...) = vcat(x...) +_vcat(x::AbstractArray{<:Number}...) = vcat(x...) # If the arguments are a mix of numbers and matrices/vectors/arrays, # the numbers need to be copied for the dimensions to match -function _vcat(x::LinearAlgebra._DenseConcatGroup...) +function _vcat(x::Union{Number, AbstractArray{<:Number}}...) example = first(Iterators.filter(e -> !(e isa Number), x)) dims = (1, size(example)[2:end]...) - x = map( el -> el isa Number ? fill(el, dims) : el , x) + x = map( el -> el isa Number ? (typeof(example))(fill(el, dims)) : el , x) _vcat(x...) end _vcat(x...) = vcat(x...) From c7d3dc5d21db143bbed40ba46698f84c93a3844e Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Tue, 7 Feb 2023 15:42:18 -0500 Subject: [PATCH 21/42] Formatting change --- src/training_strategies.jl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/training_strategies.jl b/src/training_strategies.jl index a85ffd4f93..f41f97b3bf 100644 --- a/src/training_strategies.jl +++ b/src/training_strategies.jl @@ -29,8 +29,10 @@ function merge_strategy_with_loss_function(pinnrep::PINNRepresentation, # the points in the domain and on the boundary pde_train_sets, bcs_train_sets = train_sets - pde_train_sets = map(train_set -> reshape(adapt(typeof(flat_init_params), vec(train_set)), size(train_set)), pde_train_sets) - bcs_train_sets = map(train_set -> reshape(adapt(typeof(flat_init_params), vec(train_set)), size(train_set)), bcs_train_sets) + pde_train_sets = map(train_set -> reshape(adapt(typeof(flat_init_params), vec(train_set)), size(train_set)), + pde_train_sets) + bcs_train_sets = map(train_set -> reshape(adapt(typeof(flat_init_params), vec(train_set)), size(train_set)), + bcs_train_sets) pde_loss_functions = [get_loss_function(_loss, _set, eltypeθ, strategy) for (_loss, _set) in zip(datafree_pde_loss_function, pde_train_sets)] From d9da546737c214f320e7271043a136619e7e44f6 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Fri, 17 Feb 2023 15:09:02 -0500 Subject: [PATCH 22/42] StochasticTraining now actually uses bcs_points --- src/training_strategies.jl | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/training_strategies.jl b/src/training_strategies.jl index f41f97b3bf..f6168a98ab 100644 --- a/src/training_strategies.jl +++ b/src/training_strategies.jl @@ -88,18 +88,17 @@ function merge_strategy_with_loss_function(pinnrep::PINNRepresentation, strategy) pde_bounds, bcs_bounds = bounds - pde_loss_functions = [get_loss_function(_loss, bound, eltypeθ, strategy) + pde_loss_functions = [get_loss_function(_loss, bound, eltypeθ, strategy, strategy.points) for (_loss, bound) in zip(datafree_pde_loss_function, pde_bounds)] - bc_loss_functions = [get_loss_function(_loss, bound, eltypeθ, strategy) + bc_loss_functions = [get_loss_function(_loss, bound, eltypeθ, strategy, strategy.bcs_points) for (_loss, bound) in zip(datafree_bc_loss_function, bcs_bounds)] pde_loss_functions, bc_loss_functions end -function get_loss_function(loss_function, bound, eltypeθ, strategy::StochasticTraining; +function get_loss_function(loss_function, bound, eltypeθ, strategy::StochasticTraining, points = strategy.points; τ = nothing) - points = strategy.points loss = (θ) -> begin sets = generate_random_points(points, bound, eltypeθ) From 18338d3fee7df8cc99ec0408b42bb547fe9ca419 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Fri, 17 Feb 2023 15:18:29 -0500 Subject: [PATCH 23/42] get_bounds uses bcs_points --- src/discretize.jl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/discretize.jl b/src/discretize.jl index 56e3502065..f051180de4 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -325,9 +325,14 @@ function get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, str bds[1, :], bds[2, :] end + dx_bcs = 1 / strateg.bcs_points + dict_span_bcs = Dict([Symbol(d.variables) => [ + infimum(d.domain) + dx_bcs, + supremum(d.domain) - dx_bcs, + ] for d in domains]) bound_args = get_argument(bcs, dict_indvars, dict_depvars) bcs_bounds = map(bound_args) do bound_arg - bds = mapreduce(s -> get(dict_span, s, fill(s, 2)), hcat, bound_arg) + bds = mapreduce(s -> get(dict_span_bcs, s, fill(s, 2)), hcat, bound_arg) bds = eltypeθ.(bds) bds[1, :], bds[2, :] end From cee31db8443bbc9129ec5fbb5f10244a97f321d9 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Fri, 17 Feb 2023 16:09:55 -0500 Subject: [PATCH 24/42] get_bounds uses get_variables --- src/discretize.jl | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/discretize.jl b/src/discretize.jl index f051180de4..c62f5dfd0b 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -318,21 +318,21 @@ function get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, str ] for d in domains]) # pde_bounds = [[infimum(d.domain),supremum(d.domain)] for d in domains] - pde_args = get_argument(eqs, dict_indvars, dict_depvars) - pde_bounds = map(pde_args) do pde_arg - bds = mapreduce(s -> get(dict_span, s, fill(s, 2)), hcat, pde_arg) + pde_vars = get_variables(eqs, dict_indvars, dict_depvars) + pde_bounds = map(pde_vars) do pde_var + bds = mapreduce(s -> get(dict_span, s, fill(s, 2)), hcat, pde_var) bds = eltypeθ.(bds) bds[1, :], bds[2, :] end - dx_bcs = 1 / strateg.bcs_points + dx_bcs = 1 / strategy.bcs_points dict_span_bcs = Dict([Symbol(d.variables) => [ infimum(d.domain) + dx_bcs, supremum(d.domain) - dx_bcs, ] for d in domains]) - bound_args = get_argument(bcs, dict_indvars, dict_depvars) - bcs_bounds = map(bound_args) do bound_arg - bds = mapreduce(s -> get(dict_span_bcs, s, fill(s, 2)), hcat, bound_arg) + bound_vars = get_variables(bcs, dict_indvars, dict_depvars) + bcs_bounds = map(bound_vars) do bound_var + bds = mapreduce(s -> get(dict_span_bcs, s, fill(s, 2)), hcat, bound_var) bds = eltypeθ.(bds) bds[1, :], bds[2, :] end From be3abf1f7354d504b61928c8df323c44ca349b33 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Mon, 20 Feb 2023 11:37:49 -0500 Subject: [PATCH 25/42] Increased test number of points --- test/NNPDE_tests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/NNPDE_tests.jl b/test/NNPDE_tests.jl index 9d5a86bc06..fb11ce79f2 100644 --- a/test/NNPDE_tests.jl +++ b/test/NNPDE_tests.jl @@ -351,7 +351,7 @@ domains = [x ∈ Interval(0.0, 1.0)] chain = [[Lux.Chain(Lux.Dense(1, 12, Lux.tanh), Lux.Dense(12, 12, Lux.tanh), Lux.Dense(12, 1)) for _ in 1:3] [Lux.Chain(Lux.Dense(1, 4, Lux.tanh), Lux.Dense(4, 1)) for _ in 1:2]] -quasirandom_strategy = NeuralPDE.QuasiRandomTraining(100; #points +quasirandom_strategy = NeuralPDE.QuasiRandomTraining(130; #points sampling_alg = LatinHypercubeSample()) discretization = NeuralPDE.PhysicsInformedNN(chain, quasirandom_strategy) From 308454c2f39f49eb3c96f85c7536e9ecce47a70d Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Mon, 20 Feb 2023 11:43:10 -0500 Subject: [PATCH 26/42] get_bounds is now okay with eqs with no variables --- src/discretize.jl | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/src/discretize.jl b/src/discretize.jl index c62f5dfd0b..21d991d5b6 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -320,9 +320,13 @@ function get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, str # pde_bounds = [[infimum(d.domain),supremum(d.domain)] for d in domains] pde_vars = get_variables(eqs, dict_indvars, dict_depvars) pde_bounds = map(pde_vars) do pde_var - bds = mapreduce(s -> get(dict_span, s, fill(s, 2)), hcat, pde_var) - bds = eltypeθ.(bds) - bds[1, :], bds[2, :] + if !isempty(pde_var) + bds = mapreduce(s -> get(dict_span, s, fill(s, 2)), hcat, pde_var) + bds = eltypeθ.(bds) + bds[1, :], bds[2, :] + else + [eltypeθ(0.)], [eltypeθ(0.)] + end end dx_bcs = 1 / strategy.bcs_points @@ -332,10 +336,15 @@ function get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, str ] for d in domains]) bound_vars = get_variables(bcs, dict_indvars, dict_depvars) bcs_bounds = map(bound_vars) do bound_var - bds = mapreduce(s -> get(dict_span_bcs, s, fill(s, 2)), hcat, bound_var) - bds = eltypeθ.(bds) - bds[1, :], bds[2, :] + if !isempty(bound_var) + bds = mapreduce(s -> get(dict_span_bcs, s, fill(s, 2)), hcat, bound_var) + bds = eltypeθ.(bds) + bds[1, :], bds[2, :] + else + [eltypeθ(0.)], [eltypeθ(0.)] + end end + return pde_bounds, bcs_bounds end From 09b6cf6ceb240c787e6d96c2375902d27c77f99e Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Mon, 20 Feb 2023 15:48:34 -0500 Subject: [PATCH 27/42] symbolic_utilities doesn't need LinearAlgebra --- src/symbolic_utilities.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index 75b225df03..37981b265d 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -1,5 +1,4 @@ using Base.Broadcast -using LinearAlgebra """ Override `Broadcast.__dot__` with `Broadcast.dottable(x::Function) = true` From 55d142a385a71219faec3b58e3b52c29d2df18ae Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Tue, 21 Feb 2023 12:28:29 -0500 Subject: [PATCH 28/42] Can now handle Ix(u(x,1)) and not just Ix(u(x,y)) --- src/discretize.jl | 2 +- src/symbolic_utilities.jl | 7 +++++++ test/IDE_tests.jl | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/src/discretize.jl b/src/discretize.jl index df80dd3084..999453d151 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -61,7 +61,7 @@ function build_symbolic_loss_function(pinnrep::PINNRepresentation, eqs; this_eq_pair = pair(eqs, depvars, dict_depvars, dict_depvar_input) this_eq_indvars = unique(vcat(values(this_eq_pair)...)) else - this_eq_pair = Dict(map(intvars -> dict_depvars[intvars] => dict_depvar_input[intvars], + this_eq_pair = Dict(map(intvars -> dict_depvars[intvars] => filter(arg -> !isempty(find_thing_in_expr(integrand, arg)), dict_depvar_input[intvars]), integrating_depvars)) this_eq_indvars = transformation_vars isa Nothing ? unique(vcat(values(this_eq_pair)...)) : transformation_vars diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index 37981b265d..0686d58be0 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -434,6 +434,13 @@ function find_thing_in_expr(ex::Expr, thing; ans = []) return collect(Set(ans)) end +function find_thing_in_expr(ex::Symbol, thing::Symbol; ans = []) + if thing == ex + push!(ans, ex) + end + return ans +end + """ ```julia get_argument(eqs,_indvars::Array,_depvars::Array) diff --git a/test/IDE_tests.jl b/test/IDE_tests.jl index 2424a295f5..ec6548ebf4 100644 --- a/test/IDE_tests.jl +++ b/test/IDE_tests.jl @@ -69,6 +69,40 @@ u_real = [x^2 / cos(x) for x in xs] # plot(xs,u_real) # plot!(xs,u_predict) +## Simple Integral Test 2 +println("Simple Integral Test 2") + +@parameters x y +@variables u(..) +Ix = Integral(x in DomainSets.ClosedInterval(0, x)) +# eq = Ix(u(x, y) * cos(x)) ~ y * (x^3) / 3 # This is the same, but we're testing the parsing of the version below +eqs = [ Ix(u(x, 1) * cos(x)) ~ (x^3) / 3, + u(x, y) ~ y * u(x, 1.)] + +bcs = [u(0.0, y) ~ 0.0] +domains = [x ∈ Interval(0.0, 1.00), + y ∈ Interval(0.5, 2.00)] +# chain = Chain(Dense(1,15,Flux.σ),Dense(15,1)) +chain = Lux.Chain(Lux.Dense(2, 15, Flux.σ), Lux.Dense(15, 1)) +strategy_ = NeuralPDE.GridTraining(0.1) +discretization = NeuralPDE.PhysicsInformedNN(chain, + strategy_) +@named pde_system = PDESystem(eqs, bcs, domains, [x, y], [u(x,y)]) +prob = NeuralPDE.discretize(pde_system, discretization) +sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization) +res = Optimization.solve(prob, OptimizationOptimJL.BFGS(); callback = callback, + maxiters = 500) +xys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] +xs = Iterators.product(xys...) +phi = discretization.phi +u_predict = [first(phi([x...], res.minimizer)) for x in xs] +u_real = [x[1]^2 / cos(x[1]) * x[2] for x in xs] +@test Flux.mse(u_real, u_predict) < 0.001 + +# p1 = plot(xys..., u_real', linetype=:contourf, title="Analytic"); +# p2 = plot(xys..., u_predict', linetype=:contourf, title="Predicted"); +# plot(p1,p2) + #simple multidimensitonal integral test println("simple multidimensitonal integral test") From a9b6b47a9809b7887477b52a8009e0a5338d1cf1 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Tue, 21 Feb 2023 16:10:08 -0500 Subject: [PATCH 29/42] import ComponentArrays used in training_strategies --- src/training_strategies.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/training_strategies.jl b/src/training_strategies.jl index 45586c6e47..13540093ec 100644 --- a/src/training_strategies.jl +++ b/src/training_strategies.jl @@ -16,6 +16,8 @@ struct GridTraining{T} <: AbstractTrainingStrategy dx::T end +import ComponentArrays + function merge_strategy_with_loss_function(pinnrep::PINNRepresentation, strategy::GridTraining, datafree_pde_loss_function, From f81546924ed98fb34b71f2315e6be798fc0f8bc5 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Tue, 21 Feb 2023 20:15:50 -0500 Subject: [PATCH 30/42] Added import ComponentArrays statements --- src/discretize.jl | 2 ++ src/ode_solve.jl | 2 ++ src/pinn_types.jl | 2 ++ 3 files changed, 6 insertions(+) diff --git a/src/discretize.jl b/src/discretize.jl index 999453d151..609ef681cb 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -1,3 +1,5 @@ +import ComponentArrays + """ Build a loss function for a PDE or a boundary condition diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 062e448fc4..185f546714 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -1,5 +1,7 @@ abstract type NeuralPDEAlgorithm <: DiffEqBase.AbstractODEAlgorithm end +import ComponentArrays + """ ```julia NNODE(chain, opt=OptimizationPolyalgorithms.PolyOpt(), init_params = nothing; diff --git a/src/pinn_types.jl b/src/pinn_types.jl index ebb69be08c..5fc6cd5c0a 100644 --- a/src/pinn_types.jl +++ b/src/pinn_types.jl @@ -1,3 +1,5 @@ +import ComponentArrays + """ ??? """ From 5889a1b61f9a4952caa303e4f6f635327d208994 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Wed, 22 Feb 2023 14:00:41 -0500 Subject: [PATCH 31/42] Revert "Added import ComponentArrays statements" This reverts commit f81546924ed98fb34b71f2315e6be798fc0f8bc5. --- src/discretize.jl | 2 -- src/ode_solve.jl | 2 -- src/pinn_types.jl | 2 -- 3 files changed, 6 deletions(-) diff --git a/src/discretize.jl b/src/discretize.jl index 609ef681cb..999453d151 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -1,5 +1,3 @@ -import ComponentArrays - """ Build a loss function for a PDE or a boundary condition diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 185f546714..062e448fc4 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -1,7 +1,5 @@ abstract type NeuralPDEAlgorithm <: DiffEqBase.AbstractODEAlgorithm end -import ComponentArrays - """ ```julia NNODE(chain, opt=OptimizationPolyalgorithms.PolyOpt(), init_params = nothing; diff --git a/src/pinn_types.jl b/src/pinn_types.jl index 5fc6cd5c0a..ebb69be08c 100644 --- a/src/pinn_types.jl +++ b/src/pinn_types.jl @@ -1,5 +1,3 @@ -import ComponentArrays - """ ??? """ From 424a7ef8e4a51362c8ee12473c52f54a06eddf1c Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Wed, 22 Feb 2023 14:00:51 -0500 Subject: [PATCH 32/42] Revert "import ComponentArrays used in training_strategies" This reverts commit a9b6b47a9809b7887477b52a8009e0a5338d1cf1. --- src/training_strategies.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/training_strategies.jl b/src/training_strategies.jl index 13540093ec..45586c6e47 100644 --- a/src/training_strategies.jl +++ b/src/training_strategies.jl @@ -16,8 +16,6 @@ struct GridTraining{T} <: AbstractTrainingStrategy dx::T end -import ComponentArrays - function merge_strategy_with_loss_function(pinnrep::PINNRepresentation, strategy::GridTraining, datafree_pde_loss_function, From d5818897800eb5f22b643edbc2ebbe5ece0e841f Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Wed, 22 Feb 2023 14:05:34 -0500 Subject: [PATCH 33/42] Revert "added OptimJL and OptimOptimisers" This reverts commit 55fa847a703ac7be4c9ed0a4eb0664a1174831ff. --- Project.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/Project.toml b/Project.toml index e550f5487e..6147955822 100644 --- a/Project.toml +++ b/Project.toml @@ -23,8 +23,6 @@ ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78" Optim = "429524aa-4258-5aef-a3af-852621145aeb" Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" -OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" -OptimizationOptimisers = "42dfb2eb-d2b4-4451-abcd-913932933ac1" QuasiMonteCarlo = "8a4e6c94-4038-4cdc-81c3-7e6ffdb2a71b" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" From edcb1a7e92a0702a8f4721421f10ddde89b63cd3 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Wed, 22 Feb 2023 16:21:01 -0500 Subject: [PATCH 34/42] Replaced Lux.ComponentArray with using Co...Arrays --- test/NNPDE_tests_gpu_Lux.jl | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/NNPDE_tests_gpu_Lux.jl b/test/NNPDE_tests_gpu_Lux.jl index 72ec1ad8bf..42ff33ca10 100644 --- a/test/NNPDE_tests_gpu_Lux.jl +++ b/test/NNPDE_tests_gpu_Lux.jl @@ -2,6 +2,7 @@ using Lux, OptimizationOptimisers using Test, NeuralPDE using Optimization using CUDA, QuasiMonteCarlo +using ComponentArrays import ModelingToolkit: Interval, infimum, supremum using Random @@ -41,7 +42,7 @@ chain = Chain(Dense(1, inner, Lux.σ), Dense(inner, 1)) strategy = NeuralPDE.GridTraining(dt) -ps = Lux.setup(Random.default_rng(), chain)[1] |> Lux.ComponentArray |> gpu .|> Float64 +ps = Lux.setup(Random.default_rng(), chain)[1] |> ComponentArray |> gpu .|> Float64 discretization = NeuralPDE.PhysicsInformedNN(chain, strategy; init_params = ps) @@ -90,7 +91,7 @@ chain = Lux.Chain(Dense(2, inner, Lux.σ), Dense(inner, 1)) strategy = NeuralPDE.StochasticTraining(500) -ps = Lux.setup(Random.default_rng(), chain)[1] |> Lux.ComponentArray |> gpu .|> Float64 +ps = Lux.setup(Random.default_rng(), chain)[1] |> ComponentArray |> gpu .|> Float64 discretization = NeuralPDE.PhysicsInformedNN(chain, strategy; init_params = ps) @@ -148,7 +149,7 @@ strategy = NeuralPDE.QuasiRandomTraining(500; #points sampling_alg = SobolSample(), resampling = false, minibatch = 30) -ps = Lux.setup(Random.default_rng(), chain)[1] |> Lux.ComponentArray |> gpu .|> Float64 +ps = Lux.setup(Random.default_rng(), chain)[1] |> ComponentArray |> gpu .|> Float64 discretization = NeuralPDE.PhysicsInformedNN(chain, strategy; init_params = ps) @@ -213,7 +214,7 @@ chain = Lux.Chain(Dense(3, inner, Lux.σ), Dense(inner, 1)) strategy = NeuralPDE.GridTraining(0.05) -ps = Lux.setup(Random.default_rng(), chain)[1] |> Lux.ComponentArray |> gpu .|> Float64 +ps = Lux.setup(Random.default_rng(), chain)[1] |> ComponentArray |> gpu .|> Float64 discretization = NeuralPDE.PhysicsInformedNN(chain, strategy; init_params = ps) From b07ae13738d70a7684708fdc25098c858b365d41 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Thu, 23 Feb 2023 10:19:09 -0500 Subject: [PATCH 35/42] Formatted with JuliaFormtter --- src/discretize.jl | 18 ++++++++++-------- src/symbolic_utilities.jl | 29 ++++++++++++++++------------- src/training_strategies.jl | 12 +++++++----- test/IDE_tests.jl | 8 ++++---- test/NNPDE_tests.jl | 15 ++++++++------- test/direct_function_tests.jl | 18 ++++++++++-------- 6 files changed, 55 insertions(+), 45 deletions(-) diff --git a/src/discretize.jl b/src/discretize.jl index 999453d151..3dde5007da 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -61,7 +61,9 @@ function build_symbolic_loss_function(pinnrep::PINNRepresentation, eqs; this_eq_pair = pair(eqs, depvars, dict_depvars, dict_depvar_input) this_eq_indvars = unique(vcat(values(this_eq_pair)...)) else - this_eq_pair = Dict(map(intvars -> dict_depvars[intvars] => filter(arg -> !isempty(find_thing_in_expr(integrand, arg)), dict_depvar_input[intvars]), + this_eq_pair = Dict(map(intvars -> dict_depvars[intvars] => filter(arg -> !isempty(find_thing_in_expr(integrand, + arg)), + dict_depvar_input[intvars]), integrating_depvars)) this_eq_indvars = transformation_vars isa Nothing ? unique(vcat(values(this_eq_pair)...)) : transformation_vars @@ -145,7 +147,7 @@ function build_symbolic_loss_function(pinnrep::PINNRepresentation, eqs; indvars_ex = [:($:cord[[$i], :]) for (i, x) in enumerate(this_eq_indvars)] left_arg_pairs, right_arg_pairs = this_eq_indvars, indvars_ex vars_eq = Expr(:(=), build_expr(:tuple, left_arg_pairs), - build_expr(:tuple, right_arg_pairs)) + build_expr(:tuple, right_arg_pairs)) if !(dict_transformation_vars isa Nothing) transformation_expr_ = Expr[] @@ -325,15 +327,15 @@ function get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, str bds = eltypeθ.(bds) bds[1, :], bds[2, :] else - [eltypeθ(0.)], [eltypeθ(0.)] + [eltypeθ(0.0)], [eltypeθ(0.0)] end end dx_bcs = 1 / strategy.bcs_points dict_span_bcs = Dict([Symbol(d.variables) => [ - infimum(d.domain) + dx_bcs, - supremum(d.domain) - dx_bcs, - ] for d in domains]) + infimum(d.domain) + dx_bcs, + supremum(d.domain) - dx_bcs, + ] for d in domains]) bound_vars = get_variables(bcs, dict_indvars, dict_depvars) bcs_bounds = map(bound_vars) do bound_var if !isempty(bound_var) @@ -341,10 +343,10 @@ function get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, str bds = eltypeθ.(bds) bds[1, :], bds[2, :] else - [eltypeθ(0.)], [eltypeθ(0.)] + [eltypeθ(0.0)], [eltypeθ(0.0)] end end - + return pde_bounds, bcs_bounds end diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index 0686d58be0..ea6363d32b 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -23,10 +23,10 @@ _vcat(x::Number...) = vcat(x...) _vcat(x::AbstractArray{<:Number}...) = vcat(x...) # If the arguments are a mix of numbers and matrices/vectors/arrays, # the numbers need to be copied for the dimensions to match -function _vcat(x::Union{Number, AbstractArray{<:Number}}...) +function _vcat(x::Union{Number, AbstractArray{<:Number}}...) example = first(Iterators.filter(e -> !(e isa Number), x)) dims = (1, size(example)[2:end]...) - x = map( el -> el isa Number ? (typeof(example))(fill(el, dims)) : el , x) + x = map(el -> el isa Number ? (typeof(example))(fill(el, dims)) : el, x) _vcat(x...) end _vcat(x...) = vcat(x...) @@ -143,14 +143,15 @@ function _transform_expression(pinnrep::PINNRepresentation, ex; is_integral = fa if e in keys(dict_depvars) depvar = _args[1] num_depvar = dict_depvars[depvar] - indvars = map( (indvar_) -> transform_expression(pinnrep, indvar_), _args[2:end]) + indvars = map((indvar_) -> transform_expression(pinnrep, indvar_), + _args[2:end]) var_ = is_integral ? :(u) : :($(Expr(:$, :u))) ex.args = if !multioutput - [var_, :( (_vcat)($(indvars...)) ), :($θ), :phi] + [var_, :((_vcat)($(indvars...))), :($θ), :phi] else [ var_, - :( (_vcat)($(indvars...)) ), + :((_vcat)($(indvars...))), Symbol(:($θ), num_depvar), Symbol(:phi, num_depvar), ] @@ -166,7 +167,8 @@ function _transform_expression(pinnrep::PINNRepresentation, ex; is_integral = fa end depvar = _args[1] num_depvar = dict_depvars[depvar] - indvars = map( (indvar_) -> transform_expression(pinnrep, indvar_), _args[2:end]) + indvars = map((indvar_) -> transform_expression(pinnrep, indvar_), + _args[2:end]) dict_interior_indvars = Dict([indvar .=> j for (j, indvar) in enumerate(dict_depvar_input[depvar])]) dim_l = length(dict_interior_indvars) @@ -177,13 +179,13 @@ function _transform_expression(pinnrep::PINNRepresentation, ex; is_integral = fa εs_dnv = [εs[d] for d in undv] ex.args = if !multioutput - [var_, :phi, :u, :( (_vcat)($(indvars...)) ), εs_dnv, order, :($θ)] + [var_, :phi, :u, :((_vcat)($(indvars...))), εs_dnv, order, :($θ)] else [ var_, Symbol(:phi, num_depvar), :u, - :( (_vcat)($(indvars...)) ), + :((_vcat)($(indvars...))), εs_dnv, order, Symbol(:($θ), num_depvar), @@ -351,7 +353,8 @@ function pair(eq, depvars, dict_depvars, dict_depvar_input) expr = toexpr(eq) pair_ = map(depvars) do depvar if !isempty(find_thing_in_expr(expr, depvar)) - dict_depvars[depvar] => filter(arg -> !isempty(find_thing_in_expr(expr, arg)), dict_depvar_input[depvar]) + dict_depvars[depvar] => filter(arg -> !isempty(find_thing_in_expr(expr, arg)), + dict_depvar_input[depvar]) end end Dict(filter(p -> p !== nothing, pair_)) @@ -466,7 +469,7 @@ function get_argument(eqs, dict_indvars, dict_depvars) """Arrays of instances of each dependent variable that appears in the expression, by dependent variable""" f_vars = filter(x -> !isempty(x), _vars) end -# vars = [depvar for expr in vars for depvar in expr ] + # vars = [depvar for expr in vars for depvar in expr ] args_ = map(vars) do _vars """Arguments of all instances of dependent variable, by instance, by dependent variable""" ind_args_ = map.(var -> var.args[2:end], _vars) @@ -475,8 +478,8 @@ function get_argument(eqs, dict_indvars, dict_depvars) all_ind_args = vcat((ind_args_...)...) # Add any independent variables from expression dependent variable calls - for ind_arg in all_ind_args - if ind_arg isa Expr + for ind_arg in all_ind_args + if ind_arg isa Expr for ind_var in collect(keys(dict_indvars)) if !isempty(NeuralPDE.find_thing_in_expr(ind_arg, ind_var)) push!(all_ind_args, ind_var) @@ -500,5 +503,5 @@ function get_argument(eqs, dict_indvars, dict_depvars) end end end - return args_ + return args_ end diff --git a/src/training_strategies.jl b/src/training_strategies.jl index 45586c6e47..3dbeb3e226 100644 --- a/src/training_strategies.jl +++ b/src/training_strategies.jl @@ -29,7 +29,7 @@ function merge_strategy_with_loss_function(pinnrep::PINNRepresentation, # the points in the domain and on the boundary pde_train_sets, bcs_train_sets = train_sets - + pde_train_sets = adapt.(parameterless_type(ComponentArrays.getdata(flat_init_params)), pde_train_sets) bcs_train_sets = adapt.(parameterless_type(ComponentArrays.getdata(flat_init_params)), @@ -90,18 +90,20 @@ function merge_strategy_with_loss_function(pinnrep::PINNRepresentation, strategy) pde_bounds, bcs_bounds = bounds - pde_loss_functions = [get_loss_function(_loss, bound, eltypeθ, strategy, strategy.points) + pde_loss_functions = [get_loss_function(_loss, bound, eltypeθ, strategy, + strategy.points) for (_loss, bound) in zip(datafree_pde_loss_function, pde_bounds)] - bc_loss_functions = [get_loss_function(_loss, bound, eltypeθ, strategy, strategy.bcs_points) + bc_loss_functions = [get_loss_function(_loss, bound, eltypeθ, strategy, + strategy.bcs_points) for (_loss, bound) in zip(datafree_bc_loss_function, bcs_bounds)] pde_loss_functions, bc_loss_functions end -function get_loss_function(loss_function, bound, eltypeθ, strategy::StochasticTraining, points = strategy.points; +function get_loss_function(loss_function, bound, eltypeθ, strategy::StochasticTraining, + points = strategy.points; τ = nothing) - loss = (θ) -> begin sets = generate_random_points(points, bound, eltypeθ) sets_ = adapt(parameterless_type(ComponentArrays.getdata(θ)), sets) diff --git a/test/IDE_tests.jl b/test/IDE_tests.jl index ec6548ebf4..f07d1fea55 100644 --- a/test/IDE_tests.jl +++ b/test/IDE_tests.jl @@ -76,18 +76,18 @@ println("Simple Integral Test 2") @variables u(..) Ix = Integral(x in DomainSets.ClosedInterval(0, x)) # eq = Ix(u(x, y) * cos(x)) ~ y * (x^3) / 3 # This is the same, but we're testing the parsing of the version below -eqs = [ Ix(u(x, 1) * cos(x)) ~ (x^3) / 3, - u(x, y) ~ y * u(x, 1.)] +eqs = [Ix(u(x, 1) * cos(x)) ~ (x^3) / 3, + u(x, y) ~ y * u(x, 1.0)] bcs = [u(0.0, y) ~ 0.0] domains = [x ∈ Interval(0.0, 1.00), - y ∈ Interval(0.5, 2.00)] + y ∈ Interval(0.5, 2.00)] # chain = Chain(Dense(1,15,Flux.σ),Dense(15,1)) chain = Lux.Chain(Lux.Dense(2, 15, Flux.σ), Lux.Dense(15, 1)) strategy_ = NeuralPDE.GridTraining(0.1) discretization = NeuralPDE.PhysicsInformedNN(chain, strategy_) -@named pde_system = PDESystem(eqs, bcs, domains, [x, y], [u(x,y)]) +@named pde_system = PDESystem(eqs, bcs, domains, [x, y], [u(x, y)]) prob = NeuralPDE.discretize(pde_system, discretization) sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization) res = Optimization.solve(prob, OptimizationOptimJL.BFGS(); callback = callback, diff --git a/test/NNPDE_tests.jl b/test/NNPDE_tests.jl index fb11ce79f2..d345bd8702 100644 --- a/test/NNPDE_tests.jl +++ b/test/NNPDE_tests.jl @@ -26,10 +26,10 @@ function test_ode(strategy_, ic_hard_constraint) Dθ = Differential(θ) # 1D ODE - eq = ic_hard_constraint ? - Dθ(u(θ)) ~ θ^3 + 2 * θ + (θ^2) * ((1 + 3 * (θ^2)) / (1 + θ + (θ^3))) - - (u(θ) - u(0.) + 1.0) * (θ + ((1 + 3 * (θ^2)) / (1 + θ + θ^3))) : - Dθ(u(θ)) ~ θ^3 + 2 * θ + (θ^2) * ((1 + 3 * (θ^2)) / (1 + θ + (θ^3))) - + eq = ic_hard_constraint ? + Dθ(u(θ)) ~ θ^3 + 2 * θ + (θ^2) * ((1 + 3 * (θ^2)) / (1 + θ + (θ^3))) - + (u(θ) - u(0.0) + 1.0) * (θ + ((1 + 3 * (θ^2)) / (1 + θ + θ^3))) : + Dθ(u(θ)) ~ θ^3 + 2 * θ + (θ^2) * ((1 + 3 * (θ^2)) / (1 + θ + (θ^3))) - u(θ) * (θ + ((1 + 3 * (θ^2)) / (1 + θ + θ^3))) # Initial and boundary conditions @@ -57,9 +57,10 @@ function test_ode(strategy_, ic_hard_constraint) analytic_sol_func(t) = exp(-(t^2) / 2) / (1 + t + t^3) + t^2 ts = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] u_real = [analytic_sol_func(t) for t in ts] - u_predict = ic_hard_constraint ? - [first(phi(t, res.minimizer)) - first(phi(0., res.minimizer)) + 1.0 for t in ts] : - [first(phi(t, res.minimizer)) for t in ts] + u_predict = ic_hard_constraint ? + [first(phi(t, res.minimizer)) - first(phi(0.0, res.minimizer)) + 1.0 + for t in ts] : + [first(phi(t, res.minimizer)) for t in ts] @test u_predict≈u_real atol=0.1 # using Plots diff --git a/test/direct_function_tests.jl b/test/direct_function_tests.jl index 949ad0ea7e..2f18cfe095 100644 --- a/test/direct_function_tests.jl +++ b/test/direct_function_tests.jl @@ -83,7 +83,7 @@ res = Optimization.solve(prob, OptimizationOptimJL.BFGS(), maxiters = 1000) dx = 0.01 xs = collect(x0:dx:x_end) func_s = func(xs) -func_approx(x) = discretization.phi(x, res.u) .- discretization.phi(0., res.u) +func_approx(x) = discretization.phi(x, res.u) .- discretization.phi(0.0, res.u) @test func_approx(xs')≈func(xs') rtol=0.01 # plot(xs,func(xs)) @@ -94,11 +94,11 @@ println("Approximation of implicit function 1D") @parameters x @variables u(..) -eq = [u(sin(x)) ~ cos(x) + cos(2*x)] +eq = [u(sin(x)) ~ cos(x) + cos(2 * x)] bc = [u(0) ~ u(0)] -x0 = pi/2 -x_end = 3*pi/2 +x0 = pi / 2 +x_end = 3 * pi / 2 domain = [x ∈ Interval(x0, x_end)] hidden = 20 @@ -188,7 +188,7 @@ println("Approximation of function 2D 2") @parameters x, y @variables u(..) func(x, y) = -sin(x) * sin(y) * exp(-((x - pi)^2 + (y - pi)^2)) -eq = [u(x, y) - u(0,0) ~ func(x, y)] +eq = [u(x, y) - u(0, 0) ~ func(x, y)] bc = [u(0, 0) ~ u(0, 0)] x0 = -10 @@ -222,9 +222,11 @@ phi = discretization.phi xs = collect(x0:0.1:x_end) ys = collect(y0:0.1:y_end) -func_approx(x,y) = first(phi([x,y], res.minimizer)) .- first(phi([0.,0.], res.minimizer)) +function func_approx(x, y) + first(phi([x, y], res.minimizer)) .- first(phi([0.0, 0.0], res.minimizer)) +end u_predict = reshape([func_approx(x, y) for x in xs for y in ys], (length(xs), length(ys))) -u_real = reshape([ func(x, y) for x in xs for y in ys], (length(xs), length(ys))) +u_real = reshape([func(x, y) for x in xs for y in ys], (length(xs), length(ys))) diff_u = abs.(u_predict .- u_real) -@test u_predict≈u_real rtol=0.05 \ No newline at end of file +@test u_predict≈u_real rtol=0.05 From 7a1e0b559812a312cb0dc2b7cc99741a614c25ff Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Tue, 7 Mar 2023 12:07:52 -0500 Subject: [PATCH 36/42] Docstrings were counting against code coverage --- src/symbolic_utilities.jl | 12 ++++++------ test/NNPDE_tests.jl | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index ea6363d32b..da7fa8fd7e 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -460,21 +460,21 @@ function get_argument(eqs, _indvars::Array, _depvars::Array) get_argument(eqs, dict_indvars, dict_depvars) end function get_argument(eqs, dict_indvars, dict_depvars) - """Equations, as expressions""" + "Equations, as expressions" exprs = toexpr.(eqs) - """Instances of each dependent variable that appears in the expression, by dependent variable, by equation""" + "Instances of each dependent variable that appears in the expression, by dependent variable, by equation" vars = map(exprs) do expr # For each equation,... - """Arrays of instances of each dependent variable, by dependent variable""" + "Arrays of instances of each dependent variable, by dependent variable" _vars = map(depvar -> find_thing_in_expr(expr, depvar), collect(keys(dict_depvars))) - """Arrays of instances of each dependent variable that appears in the expression, by dependent variable""" + "Arrays of instances of each dependent variable that appears in the expression, by dependent variable" f_vars = filter(x -> !isempty(x), _vars) end # vars = [depvar for expr in vars for depvar in expr ] args_ = map(vars) do _vars - """Arguments of all instances of dependent variable, by instance, by dependent variable""" + "Arguments of all instances of dependent variable, by instance, by dependent variable" ind_args_ = map.(var -> var.args[2:end], _vars) syms = Set{Symbol}() - """All arguments in any instance of a dependent variable""" + "All arguments in any instance of a dependent variable" all_ind_args = vcat((ind_args_...)...) # Add any independent variables from expression dependent variable calls diff --git a/test/NNPDE_tests.jl b/test/NNPDE_tests.jl index d345bd8702..6808b90dff 100644 --- a/test/NNPDE_tests.jl +++ b/test/NNPDE_tests.jl @@ -352,7 +352,7 @@ domains = [x ∈ Interval(0.0, 1.0)] chain = [[Lux.Chain(Lux.Dense(1, 12, Lux.tanh), Lux.Dense(12, 12, Lux.tanh), Lux.Dense(12, 1)) for _ in 1:3] [Lux.Chain(Lux.Dense(1, 4, Lux.tanh), Lux.Dense(4, 1)) for _ in 1:2]] -quasirandom_strategy = NeuralPDE.QuasiRandomTraining(130; #points +quasirandom_strategy = NeuralPDE.QuasiRandomTraining(200; #points sampling_alg = LatinHypercubeSample()) discretization = NeuralPDE.PhysicsInformedNN(chain, quasirandom_strategy) @@ -372,7 +372,7 @@ cb_ = function (p, l) return false end -res = solve(prob, OptimizationOptimJL.BFGS(); maxiters = 1000) +res = solve(prob, OptimizationOptimJL.BFGS(); maxiters = 1500) phi = discretization.phi[1] analytic_sol_func(x) = (π * x * (-x + (π^2) * (2 * x - 3) + 1) - sin(π * x)) / (π^3) From 7f527c7ed4d4ad2cbed53f3b73bc0349dae6ad84 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Wed, 8 Mar 2023 11:45:39 -0500 Subject: [PATCH 37/42] Improperly used docstrings changed to comments --- src/symbolic_utilities.jl | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index da7fa8fd7e..24da508855 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -460,24 +460,25 @@ function get_argument(eqs, _indvars::Array, _depvars::Array) get_argument(eqs, dict_indvars, dict_depvars) end function get_argument(eqs, dict_indvars, dict_depvars) - "Equations, as expressions" - exprs = toexpr.(eqs) - "Instances of each dependent variable that appears in the expression, by dependent variable, by equation" + exprs = toexpr.(eqs) # Equations, as expressions + # vars is an array of arrays of arrays, representing instances of each dependent variable that appears in the expression, by dependent variable, by equation vars = map(exprs) do expr # For each equation,... - "Arrays of instances of each dependent variable, by dependent variable" + # For each dependent variable, make an array of instances of the dependent variable _vars = map(depvar -> find_thing_in_expr(expr, depvar), collect(keys(dict_depvars))) - "Arrays of instances of each dependent variable that appears in the expression, by dependent variable" + # Remove any empty arrays, representing dependent variables that don't appear in the equation f_vars = filter(x -> !isempty(x), _vars) end - # vars = [depvar for expr in vars for depvar in expr ] - args_ = map(vars) do _vars - "Arguments of all instances of dependent variable, by instance, by dependent variable" + + args_ = map(vars) do _vars # For each equation, ... + # _vars is an array of arrays of instances of each dependent variables that appears in the equation, by dependent variable + + # For each dependent variable, for each instance of the dependent variable, get all arguments of that instance ind_args_ = map.(var -> var.args[2:end], _vars) - syms = Set{Symbol}() - "All arguments in any instance of a dependent variable" - all_ind_args = vcat((ind_args_...)...) - # Add any independent variables from expression dependent variable calls + # Get all arguments used in any instance of any dependent variable + all_ind_args = reduce(vcat, reduce(vcat, ind_args_)) + + # Add any independent variables from expression-typed dependent variable calls for ind_arg in all_ind_args if ind_arg isa Expr for ind_var in collect(keys(dict_indvars)) @@ -488,6 +489,7 @@ function get_argument(eqs, dict_indvars, dict_depvars) end end + syms = Set{Symbol}() filter(all_ind_args) do ind_arg # For each argument if ind_arg isa Symbol if ind_arg ∈ syms From 530d50ead65a0408a8b9f9ac8a1d0615dbeffd3f Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Wed, 8 Mar 2023 13:50:51 -0500 Subject: [PATCH 38/42] Added comments for _vcat --- src/symbolic_utilities.jl | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index 24da508855..b909fff98d 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -19,10 +19,27 @@ julia> _dot_(e) dottable_(x) = Broadcast.dottable(x) dottable_(x::Function) = true +""" + _vcat(x...) + +Wraps vcat, but isn't dottable. Also, if x contains a mixture of arrays and +scalars, it fills the scalars to match the dimensions of the arrays. + +# Examples +```julia-repl +julia> _vcat([1 2], [3 4]) +2×2 Matrix{Int64}: + 1 2 + 3 4 + +julia> _vcat(0, [1 2]) +2×2 Matrix{Int64}: + 0 0 + 1 2 +``` +""" _vcat(x::Number...) = vcat(x...) _vcat(x::AbstractArray{<:Number}...) = vcat(x...) -# If the arguments are a mix of numbers and matrices/vectors/arrays, -# the numbers need to be copied for the dimensions to match function _vcat(x::Union{Number, AbstractArray{<:Number}}...) example = first(Iterators.filter(e -> !(e isa Number), x)) dims = (1, size(example)[2:end]...) @@ -140,15 +157,17 @@ function _transform_expression(pinnrep::PINNRepresentation, ex; is_integral = fa _args = ex.args for (i, e) in enumerate(_args) if !(e isa Expr) - if e in keys(dict_depvars) + if e in keys(dict_depvars) # _args represents a call to a dependent variable depvar = _args[1] num_depvar = dict_depvars[depvar] indvars = map((indvar_) -> transform_expression(pinnrep, indvar_), _args[2:end]) var_ = is_integral ? :(u) : :($(Expr(:$, :u))) ex.args = if !multioutput + # Make something like u(x,y) into u([x,y], θ, phi), since the neural net needs to be called with parameters + # Note that [x,y] is achieved with _vcat, which can also fill scalars, as in the u(0,x) case, where vcat(0,x) would fail if x were a row vector [var_, :((_vcat)($(indvars...))), :($θ), :phi] - else + else # If multioutput, there are different θ and phir for each dependent variable [ var_, :((_vcat)($(indvars...))), From e4f15368852dfa124ec8b58fab717aafb71c377d Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Thu, 9 Mar 2023 12:27:17 -0500 Subject: [PATCH 39/42] Updated docstring for build_symbolic_loss_function --- src/discretize.jl | 104 +++++++++++++++++++++++++++++++++------------- 1 file changed, 75 insertions(+), 29 deletions(-) diff --git a/src/discretize.jl b/src/discretize.jl index decb77e728..2e775137cd 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -10,35 +10,81 @@ Take expressions in the form: to -:((cord, θ, phi, derivative, u)->begin - #= ... =# - #= ... =# - begin - (θ1, θ2) = (θ[1:33], θ"[34:66]) - (phi1, phi2) = (phi[1], phi[2]) - let (x, y) = (cord[1], cord[2]) - [(+)(derivative(phi1, u, [x, y], [[ε, 0.0]], 1, θ1), (*)(4, derivative(phi2, u, [x, y], [[0.0, ε]], 1, θ2))) - 0, - (+)(derivative(phi2, u, [x, y], [[ε, 0.0]], 1, θ2), (*)(9, derivative(phi1, u, [x, y], [[0.0, ε]], 1, θ1))) - 0] - end - end - end) - -for Flux.Chain, and - -:((cord, θ, phi, derivative, u)->begin - #= ... =# - #= ... =# - begin - (u1, u2) = (θ.depvar.u1, θ.depvar.u2) - (phi1, phi2) = (phi[1], phi[2]) - let (x, y) = (cord[1], cord[2]) - [(+)(derivative(phi1, u, [x, y], [[ε, 0.0]], 1, u1), (*)(4, derivative(phi2, u, [x, y], [[0.0, ε]], 1, u1))) - 0, - (+)(derivative(phi2, u, [x, y], [[ε, 0.0]], 1, u2), (*)(9, derivative(phi1, u, [x, y], [[0.0, ε]], 1, u2))) - 0] - end - end - end) - -for Lux.AbstractExplicitLayer +:((cord, θ, phi, derivative, integral, u, p)->begin + #= ... =# + #= ... =# + begin + (θ1, θ2) = (θ[1:205], θ[206:410]) + (phi1, phi2) = (phi[1], phi[2]) + let (x, y) = (cord[[1], :], cord[[2], :]) + begin + cord2 = vcat(x, y) + cord1 = vcat(x, y) + end + (+).((*).(4, derivative(phi2, u, _vcat(x, y), [[0.0, ε]], 1, θ2)), derivative(phi1, u, _vcat(x, y), [[ε, 0.0]], 1, θ1)) .- 0 + end + end + end) + +for Dx(u1(x,y)) + 4*Dy(u2(x,y)) ~ 0, and + +:((cord, θ, phi, derivative, integral, u, p)->begin + #= ... =# + #= ... =# + begin + (θ1, θ2) = (θ[1:205], θ[206:410]) + (phi1, phi2) = (phi[1], phi[2]) + let (x, y) = (cord[[1], :], cord[[2], :]) + begin + cord2 = vcat(x, y) + cord1 = vcat(x, y) + end + (+).((*).(9, derivative(phi1, u, _vcat(x, y), [[0.0, ε]], 1, θ1)), derivative(phi2, u, _vcat(x, y), [[ε, 0.0]], 1, θ2)) .- 0 + end + end + end) + +for Dx(u2(x,y)) + 9*Dy(u1(x,y)) ~ 0 (i.e., separate loss functions are created for each equation) + +with Flux.Chain; and + +:((cord, θ, phi, derivative, integral, u, p)->begin + #= ... =# + #= ... =# + begin + (θ1, θ2) = (θ.depvar.u1, θ.depvar.u2) + (phi1, phi2) = (phi[1], phi[2]) + let (x, y) = (cord[[1], :], cord[[2], :]) + begin + cord2 = vcat(x, y) + cord1 = vcat(x, y) + end + (+).((*).(4, derivative(phi2, u, _vcat(x, y), [[0.0, ε]], 1, θ2)), derivative(phi1, u, _vcat(x, y), [[ε, 0.0]], 1, θ1)) .- 0 + end + end + end) + +for Dx(u1(x,y)) + 4*Dy(u2(x,y)) ~ 0 and + +:((cord, θ, phi, derivative, integral, u, p)->begin + #= ... =# + #= ... =# + begin + (θ1, θ2) = (θ.depvar.u1, θ.depvar.u2) + (phi1, phi2) = (phi[1], phi[2]) + let (x, y) = (cord[[1], :], cord[[2], :]) + begin + cord2 = vcat(x, y) + cord1 = vcat(x, y) + end + (+).((*).(9, derivative(phi1, u, _vcat(x, y), [[0.0, ε]], 1, θ1)), derivative(phi2, u, _vcat(x, y), [[ε, 0.0]], 1, θ2)) .- 0 + end + end + end) + +for Dx(u2(x,y)) + 9*Dy(u1(x,y)) ~ 0 + +with Lux.Chain """ function build_symbolic_loss_function(pinnrep::PINNRepresentation, eqs; eq_params = SciMLBase.NullParameters(), From 238b3151623c5b6b3778df556ec3fe337e48e018 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Thu, 9 Mar 2023 19:15:35 -0500 Subject: [PATCH 40/42] Reductions needed inits for cases like u(0)=0 --- src/symbolic_utilities.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index b909fff98d..9887ee53c9 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -495,7 +495,7 @@ function get_argument(eqs, dict_indvars, dict_depvars) ind_args_ = map.(var -> var.args[2:end], _vars) # Get all arguments used in any instance of any dependent variable - all_ind_args = reduce(vcat, reduce(vcat, ind_args_)) + all_ind_args = reduce(vcat, reduce(vcat, ind_args_,init=Any[]),init=Any[]) # Add any independent variables from expression-typed dependent variable calls for ind_arg in all_ind_args From 44f3a28a5d01a54b0d0f671ffe6b0a7dd945749f Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Thu, 9 Mar 2023 20:03:42 -0500 Subject: [PATCH 41/42] Formatted with JuliaFormatter --- docs/src/index.md | 4 ++-- src/symbolic_utilities.jl | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/src/index.md b/docs/src/index.md index ec96b49fcb..7098faffed 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -81,8 +81,8 @@ While one may think this recreates the neural network to act in `Float64` precis and instead its values will silently downgrade everything to `Float32`. This is only fixed by `Chain(Dense(2, 10, tanh), Dense(10, 1)) |> f64`. Similar cases will [lead to dropped gradients with complex numbers](https://github.com/FluxML/Optimisers.jl/issues/95). This is not an issue with the automatic differentiation library commonly associated with Flux (Zygote.jl) but rather due to choices in the neural network library's -decision for how to approach type handling and precision. Thus when using DiffEqFlux.jl with Flux, the user must be very careful to ensure that -the precision of the arguments are correct, and anything that requires alternative types (like `TrackerAdjoint` tracked values, +decision for how to approach type handling and precision. Thus when using DiffEqFlux.jl with Flux, the user must be very careful to ensure that +the precision of the arguments are correct, and anything that requires alternative types (like `TrackerAdjoint` tracked values, `ForwardDiffSensitivity` dual numbers, and TaylorDiff.jl differentiation) are suspect. Lux.jl has none of these issues, is simpler to work with due to the parameters in its function calls being explicit rather than implicit global diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index 9887ee53c9..87ecb8de07 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -495,7 +495,7 @@ function get_argument(eqs, dict_indvars, dict_depvars) ind_args_ = map.(var -> var.args[2:end], _vars) # Get all arguments used in any instance of any dependent variable - all_ind_args = reduce(vcat, reduce(vcat, ind_args_,init=Any[]),init=Any[]) + all_ind_args = reduce(vcat, reduce(vcat, ind_args_, init = Any[]), init = Any[]) # Add any independent variables from expression-typed dependent variable calls for ind_arg in all_ind_args From fc7d36c277abf7e6496366fba671c351d457b012 Mon Sep 17 00:00:00 2001 From: Nicholas Klugman <13633349+nicholaskl97@users.noreply.github.com> Date: Mon, 3 Apr 2023 10:53:52 -0400 Subject: [PATCH 42/42] Added a new integral test --- src/NeuralPDE.jl | 2 +- test/IDE_tests.jl | 53 ++++++++++++++++++++++++++++++++++++----------- 2 files changed, 42 insertions(+), 13 deletions(-) diff --git a/src/NeuralPDE.jl b/src/NeuralPDE.jl index 3871446e1e..d607c10b3d 100644 --- a/src/NeuralPDE.jl +++ b/src/NeuralPDE.jl @@ -24,7 +24,7 @@ import ModelingToolkit: value, nameof, toexpr, build_expr, expand_derivatives import DomainSets: Domain, ClosedInterval import ModelingToolkit: Interval, infimum, supremum #,Ball import SciMLBase: @add_kwonly, parameterless_type -using Flux: @nograd +using Zygote: @nograd import Optimisers import UnPack: @unpack import RecursiveArrayTools diff --git a/test/IDE_tests.jl b/test/IDE_tests.jl index f07d1fea55..95541c0552 100644 --- a/test/IDE_tests.jl +++ b/test/IDE_tests.jl @@ -46,8 +46,7 @@ println("Simple Integral Test") @parameters x @variables u(..) Ix = Integral(x in DomainSets.ClosedInterval(0, x)) -# eq = Ix(u(x)) ~ (x^3)/3 -eq = Ix(u(x) * cos(x)) ~ (x^3) / 3 +eq = Ix(u(x)) ~ (x^3) / 3 bcs = [u(0.0) ~ 0.0] domains = [x ∈ Interval(0.0, 1.00)] @@ -63,7 +62,7 @@ res = Optimization.solve(prob, OptimizationOptimJL.BFGS(); callback = callback, xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] phi = discretization.phi u_predict = [first(phi([x], res.minimizer)) for x in xs] -u_real = [x^2 / cos(x) for x in xs] +u_real = [x^2 for x in xs] @test Flux.mse(u_real, u_predict) < 0.001 # plot(xs,u_real) @@ -74,14 +73,15 @@ println("Simple Integral Test 2") @parameters x y @variables u(..) -Ix = Integral(x in DomainSets.ClosedInterval(0, x)) -# eq = Ix(u(x, y) * cos(x)) ~ y * (x^3) / 3 # This is the same, but we're testing the parsing of the version below -eqs = [Ix(u(x, 1) * cos(x)) ~ (x^3) / 3, - u(x, y) ~ y * u(x, 1.0)] +Iy = Integral(y in DomainSets.ClosedInterval(0, y)) +# eq = Iy(u(x, y) * cos(y)) ~ x * (y^3) / 3 # This is the same, but we're testing the parsing of the version below +eqs = [Iy(u(1.0, y) * cos(y)) ~ (y^3) / 3, + u(x, y) ~ x * u(1.0, y)] -bcs = [u(0.0, y) ~ 0.0] -domains = [x ∈ Interval(0.0, 1.00), - y ∈ Interval(0.5, 2.00)] +bcs = [u(x, 0.0) ~ 0.0] +domains = [ x ∈ Interval(0.5, 2.00), + y ∈ Interval(0.0, 1.00) + ] # chain = Chain(Dense(1,15,Flux.σ),Dense(15,1)) chain = Lux.Chain(Lux.Dense(2, 15, Flux.σ), Lux.Dense(15, 1)) strategy_ = NeuralPDE.GridTraining(0.1) @@ -91,18 +91,47 @@ discretization = NeuralPDE.PhysicsInformedNN(chain, prob = NeuralPDE.discretize(pde_system, discretization) sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization) res = Optimization.solve(prob, OptimizationOptimJL.BFGS(); callback = callback, - maxiters = 500) + maxiters = 2000) xys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] xs = Iterators.product(xys...) phi = discretization.phi u_predict = [first(phi([x...], res.minimizer)) for x in xs] -u_real = [x[1]^2 / cos(x[1]) * x[2] for x in xs] +u_real = [x[2]^2 / cos(x[2]) * x[1] for x in xs] @test Flux.mse(u_real, u_predict) < 0.001 # p1 = plot(xys..., u_real', linetype=:contourf, title="Analytic"); # p2 = plot(xys..., u_predict', linetype=:contourf, title="Predicted"); # plot(p1,p2) +## Simple Integral Test 3 +println("Simple Integral Test 3") + +@parameters x +@variables u(..) +Ix = Integral(x in DomainSets.ClosedInterval(0, x)) +eq = Ix(u(sin(pi * x / 2))) ~ (x^3) / 3 + +bcs = [u(0.0) ~ 0.0] +domains = [x ∈ Interval(0.0, 1.00)] +# chain = Chain(Dense(1,15,Flux.σ),Dense(15,1)) +chain = Lux.Chain(Lux.Dense(1, 15, Flux.σ), Lux.Dense(15, 1)) +strategy_ = NeuralPDE.GridTraining(0.1) +discretization = NeuralPDE.PhysicsInformedNN(chain, + strategy_) +@named pde_system = PDESystem(eq, bcs, domains, [x], [u(x)]) +prob = NeuralPDE.discretize(pde_system, discretization) +sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization) +res = Optimization.solve(prob, OptimizationOptimJL.BFGS(); callback = callback, + maxiters = 300) +xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] +phi = discretization.phi +u_predict = [first(phi([sin(pi * x / 2)], res.minimizer)) for x in xs] +u_real = [x^2 for x in xs] +@test Flux.mse(u_real, u_predict) < 0.001 + +# plot(xs,u_real) +# plot!(xs,u_predict) + #simple multidimensitonal integral test println("simple multidimensitonal integral test")