From db83c715b54dfdeef15cad5f5e826596eb5a3b2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Rueda-Ram=C3=ADrez?= Date: Mon, 18 Nov 2024 21:01:02 +0100 Subject: [PATCH 01/41] Output the mpi rank to element_variables (SaveSolutionCallback) for visualization with trixi2vtk (#2132) * Output the mpi rank to element_variables for visualization * Added comments * Added functionality for all parallel mesh types * Update src/solvers/dg.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Use the right array data structure for the mpi rank * Update src/solvers/dgsem_tree/dg_parallel.jl Co-authored-by: Michael Schlottke-Lakemper --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Co-authored-by: Michael Schlottke-Lakemper --- src/solvers/dg.jl | 12 ++++++++++++ src/solvers/dgsem_tree/dg_parallel.jl | 10 ++++++++++ 2 files changed, 22 insertions(+) diff --git a/src/solvers/dg.jl b/src/solvers/dg.jl index 0e4d667fbc2..a0fb0d95079 100644 --- a/src/solvers/dg.jl +++ b/src/solvers/dg.jl @@ -12,6 +12,17 @@ function get_element_variables!(element_variables, u, mesh, equations, nothing end +# Function to define "element variables" for the SaveSolutionCallback. It does +# nothing by default, but can be specialized for certain mesh types. For instance, +# parallel meshes output the mpi rank as an "element variable". +function get_element_variables!(element_variables, mesh, dg, cache) + nothing +end + +# Function to define "element variables" for the SaveSolutionCallback. It does +# nothing by default, but can be specialized for certain volume integral types. +# For instance, shock capturing volume integrals output the blending factor +# as an "element variable". function get_node_variables!(node_variables, mesh, equations, volume_integral::AbstractVolumeIntegral, dg, cache) nothing @@ -427,6 +438,7 @@ Base.summary(io::IO, dg::DG) = print(io, "DG(" * summary(dg.basis) * ")") function get_element_variables!(element_variables, u, mesh, equations, dg::DG, cache) get_element_variables!(element_variables, u, mesh, equations, dg.volume_integral, dg, cache) + get_element_variables!(element_variables, mesh, dg, cache) end function get_node_variables!(node_variables, mesh, equations, dg::DG, cache) diff --git a/src/solvers/dgsem_tree/dg_parallel.jl b/src/solvers/dgsem_tree/dg_parallel.jl index c614fe0d0e6..3b33c9c33a5 100644 --- a/src/solvers/dgsem_tree/dg_parallel.jl +++ b/src/solvers/dgsem_tree/dg_parallel.jl @@ -5,6 +5,16 @@ @muladd begin #! format: noindent +# Function to output the mpi rank for visualization +function get_element_variables!(element_variables, + mesh::Union{ParallelTreeMesh, ParallelP4estMesh, + ParallelT8codeMesh}, + dg, cache) + element_variables[:mpi_rank] = ones(real(dg), nelements(mesh, dg, cache)) * + mpi_rank() + return nothing +end + # Initialize MPI data structures. This works for both the # `TreeMesh` and the `P4estMesh` and is dimension-agnostic. function init_mpi_data_structures(mpi_neighbor_interfaces, mpi_neighbor_mortars, n_dims, From 21a5a8d27042aa852ed421928d0617eb8744092f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Rueda-Ram=C3=ADrez?= Date: Wed, 20 Nov 2024 17:56:19 +0100 Subject: [PATCH 02/41] MPI implementation for non-conservative equations using `ParallelP4estMesh{3}` (#2126) * Added first working version of MPI with non-conservative systems for CONFORMING meshes (analysis routines need work) * Added a first version of calc_mpi_mortar_flux! for non-conservative equations (not working yet) * Added non-unique mortar fluxes to be able to handle non-conservative equations * format * Updated p4est 3d tests * Updated parabolic and parallel p4est 3d implementations for new naming convention * format * Adapted MPI mortars to new mortar fluxes fstar_primary and fstar_secondary * Fixed problem with parabolic non-conforming discretization * One solution for the MHD analysis problem with MPI * Added parallel MHD test * Selected the right type for inizialization of integral in integrate_via_indices * Added minimal test to reproduce issues with nranks > ntrees * Fixed GLM speed callback for parallel meshes * Fixed the computation of :linf_divb for parallel meshes and improved reduction operations for GLM speed * Added non-conforming non-conservative parallel test with 1 tree to test nranks > ntrees * Removed unused MHD conforming test * Apply suggestions from code review Change the occurrences of `Float64` literals to `Float32`-compatible literals * Format * Apply suggestions from code review Co-authored-by: Hendrik Ranocha --------- Co-authored-by: Hendrik Ranocha --- src/callbacks_step/analysis_dg3d.jl | 10 ++ src/callbacks_step/analysis_dg3d_parallel.jl | 10 +- src/callbacks_step/glm_speed_dg.jl | 14 +++ src/solvers/dgsem_p4est/dg_3d_parallel.jl | 99 +++++++++++++++++--- test/test_mpi_p4est_3d.jl | 74 +++++++++++++++ 5 files changed, 188 insertions(+), 19 deletions(-) diff --git a/src/callbacks_step/analysis_dg3d.jl b/src/callbacks_step/analysis_dg3d.jl index fd501a7257c..85880380c61 100644 --- a/src/callbacks_step/analysis_dg3d.jl +++ b/src/callbacks_step/analysis_dg3d.jl @@ -376,6 +376,11 @@ function analyze(::Val{:linf_divb}, du, u, t, end end + if mpi_isparallel() + # Base.max instead of max needed, see comment in src/auxiliary/math.jl + linf_divb = MPI.Allreduce!(Ref(linf_divb), Base.max, mpi_comm())[] + end + return linf_divb end @@ -412,6 +417,11 @@ function analyze(::Val{:linf_divb}, du, u, t, end end + if mpi_isparallel() + # Base.max instead of max needed, see comment in src/auxiliary/math.jl + linf_divb = MPI.Allreduce!(Ref(linf_divb), Base.max, mpi_comm())[] + end + return linf_divb end end # @muladd diff --git a/src/callbacks_step/analysis_dg3d_parallel.jl b/src/callbacks_step/analysis_dg3d_parallel.jl index 70a616367cd..285f0f62de6 100644 --- a/src/callbacks_step/analysis_dg3d_parallel.jl +++ b/src/callbacks_step/analysis_dg3d_parallel.jl @@ -72,10 +72,12 @@ function integrate_via_indices(func::Func, u, @unpack weights = dg.basis # Initialize integral with zeros of the right shape - # Pass `zero(SVector{nvariables(equations), eltype(u))}` to `func` since `u` might be empty, if the - # current rank has no elements, see also https://github.com/trixi-framework/Trixi.jl/issues/1096. - integral = zero(func(zero(SVector{nvariables(equations), eltype(u)}), 1, 1, 1, 1, - equations, dg, args...)) + # Pass `zeros(eltype(u), nvariables(equations), nnodes(dg), nnodes(dg), nnodes(dg), 1)` + # to `func` since `u` might be empty, if the current rank has no elements. + # See also https://github.com/trixi-framework/Trixi.jl/issues/1096, and + # https://github.com/trixi-framework/Trixi.jl/pull/2126/files/7cbc57cfcba93e67353566e10fce1f3edac27330#r1814483243. + integral = zero(func(zeros(eltype(u), nvariables(equations), nnodes(dg), nnodes(dg), + nnodes(dg), 1), 1, 1, 1, 1, equations, dg, args...)) volume = zero(real(mesh)) # Use quadrature to numerically integrate over entire domain diff --git a/src/callbacks_step/glm_speed_dg.jl b/src/callbacks_step/glm_speed_dg.jl index 302aae356ab..4d9e4eee23f 100644 --- a/src/callbacks_step/glm_speed_dg.jl +++ b/src/callbacks_step/glm_speed_dg.jl @@ -13,6 +13,14 @@ function calc_dt_for_cleaning_speed(cfl::Real, mesh, # Cartesian meshes max_scaled_speed_for_c_h = maximum(cache.elements.inverse_jacobian) * ndims(equations) + + if mpi_isparallel() + # Base.max instead of max needed, see comment in src/auxiliary/math.jl + max_scaled_speed_for_c_h = MPI.Allreduce!(Ref(max_scaled_speed_for_c_h), + Base.max, + mpi_comm())[] + end + # OBS! This depends on the implementation details of the StepsizeCallback and needs to be adapted # as well if that callback changes. return cfl * 2 / (nnodes(dg) * max_scaled_speed_for_c_h) @@ -29,6 +37,12 @@ function calc_dt_for_cleaning_speed(cfl::Real, mesh, # Copies implementation behavior of `calc_dt_for_cleaning_speed` for DGSEM discretizations. max_scaled_speed_for_c_h = inv(minimum(md.J)) * ndims(equations) + if mpi_isparallel() + # Base.max instead of max needed, see comment in src/auxiliary/math.jl + max_scaled_speed_for_c_h = MPI.Allreduce!(Ref(max_scaled_speed_for_c_h), + Base.max, + mpi_comm())[] + end # This mimics `max_dt` for `TreeMesh`, except that `nnodes(dg)` is replaced by # `polydeg+1`. This is because `nnodes(dg)` returns the total number of # multi-dimensional nodes for DGMulti solver types, while `nnodes(dg)` returns diff --git a/src/solvers/dgsem_p4est/dg_3d_parallel.jl b/src/solvers/dgsem_p4est/dg_3d_parallel.jl index 3daca10e821..e51836e8a51 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parallel.jl @@ -267,6 +267,40 @@ end end end +# Inlined version of the interface flux computation for non-conservative equations +@inline function calc_mpi_interface_flux!(surface_flux_values, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, + nonconservative_terms::True, equations, + surface_integral, dg::DG, cache, + interface_index, normal_direction, + interface_i_node_index, + interface_j_node_index, local_side, + surface_i_node_index, surface_j_node_index, + local_direction_index, local_element_index) + @unpack u = cache.mpi_interfaces + surface_flux, nonconservative_flux = surface_integral.surface_flux + + u_ll, u_rr = get_surface_node_vars(u, equations, dg, + interface_i_node_index, interface_j_node_index, + interface_index) + + # Compute flux and non-conservative term for this side of the interface + if local_side == 1 + flux_ = surface_flux(u_ll, u_rr, normal_direction, equations) + noncons_flux_ = nonconservative_flux(u_ll, u_rr, normal_direction, equations) + else # local_side == 2 + flux_ = -surface_flux(u_ll, u_rr, -normal_direction, equations) + noncons_flux_ = -nonconservative_flux(u_rr, u_ll, -normal_direction, equations) + end + + for v in eachvariable(equations) + surface_flux_values[v, surface_i_node_index, surface_j_node_index, + local_direction_index, local_element_index] = flux_[v] + + 0.5f0 * noncons_flux_[v] + end +end + function prolong2mpimortars!(cache, u, mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, equations, @@ -384,12 +418,13 @@ function calc_mpi_mortar_flux!(surface_flux_values, surface_integral, dg::DG, cache) @unpack local_neighbor_ids, local_neighbor_positions, node_indices = cache.mpi_mortars @unpack contravariant_vectors = cache.elements - @unpack fstar_primary_threaded, fstar_tmp_threaded = cache + @unpack fstar_primary_threaded, fstar_secondary_threaded, fstar_tmp_threaded = cache index_range = eachnode(dg) @threaded for mortar in eachmpimortar(dg, cache) # Choose thread-specific pre-allocated container - fstar = fstar_primary_threaded[Threads.threadid()] + fstar_primary = fstar_primary_threaded[Threads.threadid()] + fstar_secondary = fstar_secondary_threaded[Threads.threadid()] fstar_tmp = fstar_tmp_threaded[Threads.threadid()] # Get index information on the small elements @@ -412,7 +447,8 @@ function calc_mpi_mortar_flux!(surface_flux_values, normal_direction = get_normal_direction(cache.mpi_mortars, i, j, position, mortar) - calc_mpi_mortar_flux!(fstar, mesh, nonconservative_terms, equations, + calc_mpi_mortar_flux!(fstar_primary, fstar_secondary, mesh, + nonconservative_terms, equations, surface_integral, dg, cache, mortar, position, normal_direction, i, j) @@ -433,14 +469,15 @@ function calc_mpi_mortar_flux!(surface_flux_values, mpi_mortar_fluxes_to_elements!(surface_flux_values, mesh, equations, mortar_l2, dg, cache, - mortar, fstar, u_buffer, fstar_tmp) + mortar, fstar_primary, fstar_secondary, u_buffer, + fstar_tmp) end return nothing end # Inlined version of the mortar flux computation on small elements for conservation laws -@inline function calc_mpi_mortar_flux!(fstar, +@inline function calc_mpi_mortar_flux!(fstar_primary, fstar_secondary, mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, nonconservative_terms::False, equations, @@ -456,16 +493,48 @@ end flux = surface_flux(u_ll, u_rr, normal_direction, equations) # Copy flux to buffer - set_node_vars!(fstar, flux, equations, dg, i_node_index, j_node_index, + set_node_vars!(fstar_primary, flux, equations, dg, i_node_index, j_node_index, + position_index) + set_node_vars!(fstar_secondary, flux, equations, dg, i_node_index, j_node_index, position_index) end +# Inlined version of the mortar flux computation on small elements for non-conservative equations +@inline function calc_mpi_mortar_flux!(fstar_primary, fstar_secondary, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, + nonconservative_terms::True, equations, + surface_integral, dg::DG, cache, + mortar_index, position_index, normal_direction, + i_node_index, j_node_index) + @unpack u = cache.mpi_mortars + surface_flux, nonconservative_flux = surface_integral.surface_flux + + u_ll, u_rr = get_surface_node_vars(u, equations, dg, position_index, i_node_index, + j_node_index, mortar_index) + + flux = surface_flux(u_ll, u_rr, normal_direction, equations) + noncons_flux_primary = nonconservative_flux(u_ll, u_rr, normal_direction, equations) + noncons_flux_secondary = nonconservative_flux(u_rr, u_ll, normal_direction, + equations) + + for v in eachvariable(equations) + fstar_primary[v, i_node_index, j_node_index, position_index] = flux[v] + + 0.5f0 * + noncons_flux_primary[v] + fstar_secondary[v, i_node_index, j_node_index, position_index] = flux[v] + + 0.5f0 * + noncons_flux_secondary[v] + end +end + @inline function mpi_mortar_fluxes_to_elements!(surface_flux_values, mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, equations, mortar_l2::LobattoLegendreMortarL2, - dg::DGSEM, cache, mortar, fstar, + dg::DGSEM, cache, mortar, fstar_primary, + fstar_secondary, u_buffer, fstar_tmp) @unpack local_neighbor_ids, local_neighbor_positions, node_indices = cache.mpi_mortars index_range = eachnode(dg) @@ -487,22 +556,22 @@ end # Project small fluxes to large element. multiply_dimensionwise!(u_buffer, mortar_l2.reverse_lower, mortar_l2.reverse_lower, - view(fstar, .., 1), + view(fstar_secondary, .., 1), fstar_tmp) add_multiply_dimensionwise!(u_buffer, mortar_l2.reverse_upper, mortar_l2.reverse_lower, - view(fstar, .., 2), + view(fstar_secondary, .., 2), fstar_tmp) add_multiply_dimensionwise!(u_buffer, mortar_l2.reverse_lower, mortar_l2.reverse_upper, - view(fstar, .., 3), + view(fstar_secondary, .., 3), fstar_tmp) add_multiply_dimensionwise!(u_buffer, mortar_l2.reverse_upper, mortar_l2.reverse_upper, - view(fstar, .., 4), + view(fstar_secondary, .., 4), fstar_tmp) # The flux is calculated in the outward direction of the small elements, # so the sign must be switched to get the flux in outward direction @@ -536,10 +605,10 @@ end for j in eachnode(dg) for i in eachnode(dg) for v in eachvariable(equations) - surface_flux_values[v, i, j, small_direction, element] = fstar[v, - i, - j, - position] + surface_flux_values[v, i, j, small_direction, element] = fstar_primary[v, + i, + j, + position] end end end diff --git a/test/test_mpi_p4est_3d.jl b/test/test_mpi_p4est_3d.jl index d6018e51c81..663732b4ded 100644 --- a/test/test_mpi_p4est_3d.jl +++ b/test/test_mpi_p4est_3d.jl @@ -235,6 +235,80 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + + @trixi_testset "elixir_mhd_alfven_wave_nonconforming.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_mhd_alfven_wave_nonconforming.jl"), + l2=[ + 0.0001788543743594658, + 0.000624334205581902, + 0.00022892869974368887, + 0.0007223464581156573, + 0.0006651366626523314, + 0.0006287275014743352, + 0.000344484339916008, + 0.0007179788287557142, + 8.632896980651243e-7 + ], + linf=[ + 0.0010730565632763867, + 0.004596749809344033, + 0.0013235269262853733, + 0.00468874234888117, + 0.004719267084104306, + 0.004228339352211896, + 0.0037503625505571625, + 0.005104176909383168, + 9.738081186490818e-6 + ], + tspan=(0.0, 0.25), + coverage_override=(trees_per_dimension = (1, 1, 1),)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # Same test as above but with only one tree in the mesh + # We use it to test meshes with elements of different size in each partition + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_mhd_alfven_wave_nonconforming.jl"), + l2=[ + 0.0019054118500017054, + 0.006957977226608083, + 0.003429930594167365, + 0.009051598556176287, + 0.0077261662742688425, + 0.008210851821439208, + 0.003763030674412298, + 0.009175470744760567, + 2.881690753923244e-5 + ], + linf=[ + 0.010983704624623503, + 0.04584128974425262, + 0.02022630484954286, + 0.04851342295826149, + 0.040710154751363525, + 0.044722299260292586, + 0.036591209423654236, + 0.05701669133068068, + 0.00024182906501186622 + ], + tspan=(0.0, 0.25), trees_per_dimension=(1, 1, 1), + coverage_override=(trees_per_dimension = (1, 1, 1),)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end end # P4estMesh MPI From 9db0209503e96b60af5e653b90c7078b620f54e1 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Thu, 21 Nov 2024 11:58:56 +0100 Subject: [PATCH 03/41] Use velocity in Navier-Stokes convergence tests (#2170) * Use velocity in Navier-Stokes convergence tests * fmt * CI test vals * Update examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl * fix allocs BCs * relabel all eq_para * use tuple --- .../dgmulti_2d/elixir_advection_diffusion.jl | 10 +- .../elixir_navierstokes_convergence.jl | 10 +- .../elixir_navierstokes_convergence_curved.jl | 10 +- .../elixir_navierstokes_lid_driven_cavity.jl | 6 +- .../elixir_navierstokes_convergence.jl | 10 +- .../elixir_navierstokes_convergence_curved.jl | 10 +- ...xir_navierstokes_NACA0012airfoil_mach08.jl | 18 +- .../elixir_navierstokes_convergence.jl | 9 +- ...ir_navierstokes_convergence_nonperiodic.jl | 10 +- .../elixir_navierstokes_lid_driven_cavity.jl | 6 +- ...ixir_navierstokes_lid_driven_cavity_amr.jl | 6 +- .../elixir_navierstokes_convergence.jl | 9 +- .../elixir_navierstokes_convergence_walls.jl | 22 ++- ...ixir_navierstokes_convergence_walls_amr.jl | 22 ++- .../elixir_navierstokes_convergence.jl | 9 +- .../elixir_navierstokes_lid_driven_cavity.jl | 6 +- .../elixir_navierstokes_convergence.jl | 8 +- test/test_parabolic_1d.jl | 20 +- test/test_parabolic_2d.jl | 156 +++++++-------- test/test_parabolic_3d.jl | 184 +++++++++--------- 20 files changed, 283 insertions(+), 258 deletions(-) diff --git a/examples/dgmulti_2d/elixir_advection_diffusion.jl b/examples/dgmulti_2d/elixir_advection_diffusion.jl index 35e048b6b54..b253150bec8 100644 --- a/examples/dgmulti_2d/elixir_advection_diffusion.jl +++ b/examples/dgmulti_2d/elixir_advection_diffusion.jl @@ -21,11 +21,11 @@ cells_per_dimension = (16, 16) mesh = DGMultiMesh(dg, cells_per_dimension; is_on_boundary) # BC types -boundary_condition_left = BoundaryConditionDirichlet((x, t, equations) -> SVector(1 + - 0.1 * - x[2])) -boundary_condition_zero = BoundaryConditionDirichlet((x, t, equations) -> SVector(0.0)) -boundary_condition_neumann_zero = BoundaryConditionNeumann((x, t, equations) -> SVector(0.0)) +boundary_condition_left = BoundaryConditionDirichlet((x, t, equations_parabolic) -> SVector(1 + + 0.1 * + x[2])) +boundary_condition_zero = BoundaryConditionDirichlet((x, t, equations_parabolic) -> SVector(0.0)) +boundary_condition_neumann_zero = BoundaryConditionNeumann((x, t, equations_parabolic) -> SVector(0.0)) # define inviscid boundary conditions boundary_conditions = (; :left => boundary_condition_left, diff --git a/examples/dgmulti_2d/elixir_navierstokes_convergence.jl b/examples/dgmulti_2d/elixir_navierstokes_convergence.jl index b529b43a41d..3cc908b77d6 100644 --- a/examples/dgmulti_2d/elixir_navierstokes_convergence.jl +++ b/examples/dgmulti_2d/elixir_navierstokes_convergence.jl @@ -178,10 +178,12 @@ end initial_condition = initial_condition_navier_stokes_convergence_test # BC types -velocity_bc_top_bottom = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, - t, - equations)[2:3]) -heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0) +velocity_bc_top_bottom = NoSlip() do x, t, equations_parabolic + u_cons = initial_condition_navier_stokes_convergence_test(x, t, equations_parabolic) + return SVector(u_cons[2] / u_cons[1], u_cons[3] / u_cons[1]) +end + +heat_bc_top_bottom = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom) diff --git a/examples/dgmulti_2d/elixir_navierstokes_convergence_curved.jl b/examples/dgmulti_2d/elixir_navierstokes_convergence_curved.jl index 92216bf6c29..21a2fc23c5f 100644 --- a/examples/dgmulti_2d/elixir_navierstokes_convergence_curved.jl +++ b/examples/dgmulti_2d/elixir_navierstokes_convergence_curved.jl @@ -186,10 +186,12 @@ end initial_condition = initial_condition_navier_stokes_convergence_test # BC types -velocity_bc_top_bottom = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, - t, - equations)[2:3]) -heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0) +velocity_bc_top_bottom = NoSlip() do x, t, equations_parabolic + u_cons = initial_condition_navier_stokes_convergence_test(x, t, equations_parabolic) + return SVector(u_cons[2] / u_cons[1], u_cons[3] / u_cons[1]) +end + +heat_bc_top_bottom = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom) diff --git a/examples/dgmulti_2d/elixir_navierstokes_lid_driven_cavity.jl b/examples/dgmulti_2d/elixir_navierstokes_lid_driven_cavity.jl index 6be384138e2..86a440918b3 100644 --- a/examples/dgmulti_2d/elixir_navierstokes_lid_driven_cavity.jl +++ b/examples/dgmulti_2d/elixir_navierstokes_lid_driven_cavity.jl @@ -33,9 +33,9 @@ end initial_condition = initial_condition_cavity # BC types -velocity_bc_lid = NoSlip((x, t, equations) -> SVector(1.0, 0.0)) -velocity_bc_cavity = NoSlip((x, t, equations) -> SVector(0.0, 0.0)) -heat_bc = Adiabatic((x, t, equations) -> 0.0) +velocity_bc_lid = NoSlip((x, t, equations_parabolic) -> SVector(1.0, 0.0)) +velocity_bc_cavity = NoSlip((x, t, equations_parabolic) -> SVector(0.0, 0.0)) +heat_bc = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_lid = BoundaryConditionNavierStokesWall(velocity_bc_lid, heat_bc) boundary_condition_cavity = BoundaryConditionNavierStokesWall(velocity_bc_cavity, heat_bc) diff --git a/examples/dgmulti_3d/elixir_navierstokes_convergence.jl b/examples/dgmulti_3d/elixir_navierstokes_convergence.jl index 5fa0ad7ce60..3a59b39eeba 100644 --- a/examples/dgmulti_3d/elixir_navierstokes_convergence.jl +++ b/examples/dgmulti_3d/elixir_navierstokes_convergence.jl @@ -221,10 +221,12 @@ end initial_condition = initial_condition_navier_stokes_convergence_test # BC types -velocity_bc_top_bottom = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, - t, - equations)[2:4]) -heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0) +velocity_bc_top_bottom = NoSlip() do x, t, equations_parabolic + u_cons = initial_condition_navier_stokes_convergence_test(x, t, equations_parabolic) + return SVector(u_cons[2] / u_cons[1], u_cons[3] / u_cons[1], u_cons[4] / u_cons[1]) +end + +heat_bc_top_bottom = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom) diff --git a/examples/dgmulti_3d/elixir_navierstokes_convergence_curved.jl b/examples/dgmulti_3d/elixir_navierstokes_convergence_curved.jl index c58d78d2581..37e9b106d8b 100644 --- a/examples/dgmulti_3d/elixir_navierstokes_convergence_curved.jl +++ b/examples/dgmulti_3d/elixir_navierstokes_convergence_curved.jl @@ -229,10 +229,12 @@ end initial_condition = initial_condition_navier_stokes_convergence_test # BC types -velocity_bc_top_bottom = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, - t, - equations)[2:4]) -heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0) +velocity_bc_top_bottom = NoSlip() do x, t, equations_parabolic + u_cons = initial_condition_navier_stokes_convergence_test(x, t, equations_parabolic) + return SVector(u_cons[2] / u_cons[1], u_cons[3] / u_cons[1], u_cons[4] / u_cons[1]) +end + +heat_bc_top_bottom = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom) diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_NACA0012airfoil_mach08.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_NACA0012airfoil_mach08.jl index 43ce7ac1ed6..39a2107ef8e 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_NACA0012airfoil_mach08.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_NACA0012airfoil_mach08.jl @@ -76,21 +76,23 @@ boundary_conditions = Dict(:Left => boundary_condition_subsonic_constant, :AirfoilBottom => boundary_condition_slip_wall, :AirfoilTop => boundary_condition_slip_wall) -velocity_airfoil = NoSlip((x, t, equations) -> SVector(0.0, 0.0)) +velocity_airfoil = NoSlip((x, t, equations_parabolic) -> SVector(0.0, 0.0)) -heat_airfoil = Adiabatic((x, t, equations) -> 0.0) +heat_airfoil = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_conditions_airfoil = BoundaryConditionNavierStokesWall(velocity_airfoil, heat_airfoil) -function momenta_initial_condition_mach08_flow(x, t, equations) - u = initial_condition_mach08_flow(x, t, equations) - momenta = SVector(u[2], u[3]) +function velocities_initial_condition_mach08_flow(x, t, equations) + u_cons = initial_condition_mach08_flow(x, t, equations) + return SVector(u_cons[2] / u_cons[1], u_cons[3] / u_cons[1]) end -velocity_bc_square = NoSlip((x, t, equations) -> momenta_initial_condition_mach08_flow(x, t, - equations)) -heat_bc_square = Adiabatic((x, t, equations) -> 0.0) +velocity_bc_square = NoSlip((x, t, equations_parabolic) -> velocities_initial_condition_mach08_flow(x, + t, + equations)) + +heat_bc_square = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_square = BoundaryConditionNavierStokesWall(velocity_bc_square, heat_bc_square) diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl index 54ec09d2be8..5c2fad57c79 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl @@ -178,11 +178,12 @@ end initial_condition = initial_condition_navier_stokes_convergence_test # BC types -velocity_bc_top_bottom = NoSlip() do x, t, equations - u = initial_condition_navier_stokes_convergence_test(x, t, equations) - return SVector(u[2], u[3]) +velocity_bc_top_bottom = NoSlip() do x, t, equations_parabolic + u_cons = initial_condition_navier_stokes_convergence_test(x, t, equations_parabolic) + return SVector(u_cons[2] / u_cons[1], u_cons[3] / u_cons[1]) end -heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0) + +heat_bc_top_bottom = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom) diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_convergence_nonperiodic.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence_nonperiodic.jl index b4177fe8538..7942f61c985 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_convergence_nonperiodic.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence_nonperiodic.jl @@ -178,10 +178,12 @@ end initial_condition = initial_condition_navier_stokes_convergence_test # BC types -velocity_bc_top_bottom = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, - t, - equations)[2:3]) -heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0) +velocity_bc_top_bottom = NoSlip() do x, t, equations_parabolic + u_cons = initial_condition_navier_stokes_convergence_test(x, t, equations_parabolic) + return SVector(u_cons[2] / u_cons[1], u_cons[3] / u_cons[1]) +end + +heat_bc_top_bottom = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom) diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl index b56f1a613b4..c1261d05b99 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl @@ -34,9 +34,9 @@ end initial_condition = initial_condition_cavity # BC types -velocity_bc_lid = NoSlip((x, t, equations) -> SVector(1.0, 0.0)) -velocity_bc_cavity = NoSlip((x, t, equations) -> SVector(0.0, 0.0)) -heat_bc = Adiabatic((x, t, equations) -> 0.0) +velocity_bc_lid = NoSlip((x, t, equations_parabolic) -> SVector(1.0, 0.0)) +velocity_bc_cavity = NoSlip((x, t, equations_parabolic) -> SVector(0.0, 0.0)) +heat_bc = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_lid = BoundaryConditionNavierStokesWall(velocity_bc_lid, heat_bc) boundary_condition_cavity = BoundaryConditionNavierStokesWall(velocity_bc_cavity, heat_bc) diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl index 14a05058dd4..4c90e89b7ff 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl @@ -34,9 +34,9 @@ end initial_condition = initial_condition_cavity # BC types -velocity_bc_lid = NoSlip((x, t, equations) -> SVector(1.0, 0.0)) -velocity_bc_cavity = NoSlip((x, t, equations) -> SVector(0.0, 0.0)) -heat_bc = Adiabatic((x, t, equations) -> 0.0) +velocity_bc_lid = NoSlip((x, t, equations_parabolic) -> SVector(1.0, 0.0)) +velocity_bc_cavity = NoSlip((x, t, equations_parabolic) -> SVector(0.0, 0.0)) +heat_bc = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_lid = BoundaryConditionNavierStokesWall(velocity_bc_lid, heat_bc) boundary_condition_cavity = BoundaryConditionNavierStokesWall(velocity_bc_cavity, heat_bc) diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl index c640b255b05..f14100f61e9 100644 --- a/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl @@ -222,11 +222,12 @@ end initial_condition = initial_condition_navier_stokes_convergence_test # BC types -velocity_bc_top_bottom = NoSlip() do x, t, equations - u = initial_condition_navier_stokes_convergence_test(x, t, equations) - return SVector(u[2], u[3], u[4]) +velocity_bc_top_bottom = NoSlip() do x, t, equations_parabolic + u_cons = initial_condition_navier_stokes_convergence_test(x, t, equations_parabolic) + return SVector(u_cons[2] / u_cons[1], u_cons[3] / u_cons[1], u_cons[4] / u_cons[1]) end -heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0) + +heat_bc_top_bottom = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom) diff --git a/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls.jl b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls.jl index d534b7b189b..74fc24db513 100644 --- a/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls.jl +++ b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls.jl @@ -121,15 +121,19 @@ end initial_condition = initial_condition_navier_stokes_convergence_test # BC types -velocity_bc_left_right = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, - t, - equations)[2]) - -heat_bc_left = Isothermal((x, t, equations) -> Trixi.temperature(initial_condition_navier_stokes_convergence_test(x, - t, - equations), - equations_parabolic)) -heat_bc_right = Adiabatic((x, t, equations) -> 0.0) +velocity_bc_left_right = NoSlip() do x, t, equations_parabolic + u_cons = initial_condition_navier_stokes_convergence_test(x, t, equations_parabolic) + return u_cons[2] / u_cons[1] +end + +heat_bc_left = Isothermal() do x, t, equations_parabolic + Trixi.temperature(initial_condition_navier_stokes_convergence_test(x, + t, + equations_parabolic), + equations_parabolic) +end + +heat_bc_right = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_left = BoundaryConditionNavierStokesWall(velocity_bc_left_right, heat_bc_left) diff --git a/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl index 4f9cd6365fa..607470c1115 100644 --- a/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl +++ b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl @@ -121,15 +121,19 @@ end initial_condition = initial_condition_navier_stokes_convergence_test # BC types -velocity_bc_left_right = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, - t, - equations)[2]) - -heat_bc_left = Isothermal((x, t, equations) -> Trixi.temperature(initial_condition_navier_stokes_convergence_test(x, - t, - equations), - equations_parabolic)) -heat_bc_right = Adiabatic((x, t, equations) -> 0.0) +velocity_bc_left_right = NoSlip() do x, t, equations_parabolic + u_cons = initial_condition_navier_stokes_convergence_test(x, t, equations_parabolic) + return u_cons[2] / u_cons[1] +end + +heat_bc_left = Isothermal() do x, t, equations_parabolic + Trixi.temperature(initial_condition_navier_stokes_convergence_test(x, + t, + equations_parabolic), + equations_parabolic) +end + +heat_bc_right = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_left = BoundaryConditionNavierStokesWall(velocity_bc_left_right, heat_bc_left) diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl b/examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl index b0c8678baad..c76e7290e01 100644 --- a/examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl +++ b/examples/tree_2d_dgsem/elixir_navierstokes_convergence.jl @@ -178,11 +178,12 @@ end initial_condition = initial_condition_navier_stokes_convergence_test # BC types -velocity_bc_top_bottom = NoSlip() do x, t, equations - u = initial_condition_navier_stokes_convergence_test(x, t, equations) - return SVector(u[2], u[3]) +velocity_bc_top_bottom = NoSlip() do x, t, equations_parabolic + u_cons = initial_condition_navier_stokes_convergence_test(x, t, equations_parabolic) + # This may be an `SVector` or simply a `Tuple` + return (u_cons[2] / u_cons[1], u_cons[3] / u_cons[1]) end -heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0) +heat_bc_top_bottom = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom) diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl b/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl index d46b1a334ee..a923844a6e5 100644 --- a/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl +++ b/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl @@ -33,9 +33,9 @@ end initial_condition = initial_condition_cavity # BC types -velocity_bc_lid = NoSlip((x, t, equations) -> SVector(1.0, 0.0)) -velocity_bc_cavity = NoSlip((x, t, equations) -> SVector(0.0, 0.0)) -heat_bc = Adiabatic((x, t, equations) -> 0.0) +velocity_bc_lid = NoSlip((x, t, equations_parabolic) -> SVector(1.0, 0.0)) +velocity_bc_cavity = NoSlip((x, t, equations_parabolic) -> SVector(0.0, 0.0)) +heat_bc = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_lid = BoundaryConditionNavierStokesWall(velocity_bc_lid, heat_bc) boundary_condition_cavity = BoundaryConditionNavierStokesWall(velocity_bc_cavity, heat_bc) diff --git a/examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl b/examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl index 3ada9503c6a..ab9d37aa6b0 100644 --- a/examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl +++ b/examples/tree_3d_dgsem/elixir_navierstokes_convergence.jl @@ -222,11 +222,11 @@ end initial_condition = initial_condition_navier_stokes_convergence_test # BC types -velocity_bc_top_bottom = NoSlip() do x, t, equations - u = initial_condition_navier_stokes_convergence_test(x, t, equations) - return SVector(u[2], u[3], u[4]) +velocity_bc_top_bottom = NoSlip() do x, t, equations_parabolic + u_cons = initial_condition_navier_stokes_convergence_test(x, t, equations_parabolic) + return SVector(u_cons[2] / u_cons[1], u_cons[3] / u_cons[1], u_cons[4] / u_cons[1]) end -heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0) +heat_bc_top_bottom = Adiabatic((x, t, equations_parabolic) -> 0.0) boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom) diff --git a/test/test_parabolic_1d.jl b/test/test_parabolic_1d.jl index 48b2b9ce4d6..7de57a45178 100644 --- a/test/test_parabolic_1d.jl +++ b/test/test_parabolic_1d.jl @@ -130,12 +130,12 @@ end @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", "elixir_navierstokes_convergence_walls.jl"), l2=[ - 0.00047023310868269237, - 0.00032181736027057234, - 0.0014966266486095025 + 0.0004702331100298379, + 0.0003218173539588441, + 0.001496626616191212 ], linf=[ - 0.002996375101363302, + 0.0029963751636357117, 0.0028639041695096433, 0.012691132694550689 ]) @@ -157,14 +157,14 @@ end Prandtl = prandtl_number(), gradient_variables = GradientVariablesEntropy()), l2=[ - 0.0004608500483647771, - 0.00032431091222851285, - 0.0015159733360626845 + 0.00046085004909354776, + 0.0003243109084492897, + 0.0015159733164383632 ], linf=[ - 0.002754803146635787, - 0.0028567713744625124, - 0.012941793784197131 + 0.0027548031865172184, + 0.0028567713569609024, + 0.012941793735691931 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl index ceefb65e99b..dc3e776154f 100644 --- a/test/test_parabolic_2d.jl +++ b/test/test_parabolic_2d.jl @@ -124,16 +124,16 @@ end "elixir_navierstokes_convergence.jl"), cells_per_dimension=(4, 4), tspan=(0.0, 0.1), l2=[ - 0.0015355076812510957, - 0.0033843168272696756, - 0.0036531858107443434, - 0.009948436427519214 + 0.0015355076812512347, + 0.0033843168272690953, + 0.0036531858107444093, + 0.009948436427520873 ], linf=[ - 0.005522560467190019, - 0.013425258500730508, - 0.013962115643482154, - 0.027483102120502423 + 0.005522560467186466, + 0.013425258500736614, + 0.013962115643462503, + 0.0274831021205042 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -150,16 +150,16 @@ end "elixir_navierstokes_convergence_curved.jl"), cells_per_dimension=(4, 4), tspan=(0.0, 0.1), l2=[ - 0.004255101916146187, - 0.011118488923215765, - 0.011281831283462686, - 0.03573656447388509 + 0.004255101916146402, + 0.011118488923215394, + 0.011281831283462784, + 0.035736564473886 ], linf=[ - 0.015071710669706473, - 0.04103132025858458, - 0.03990424085750277, - 0.1309401718598764 + 0.015071710669707805, + 0.04103132025860057, + 0.03990424085748012, + 0.13094017185987106 ],) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -273,16 +273,16 @@ end energy_internal, enstrophy)), l2=[ - 0.002111672530658797, - 0.0034322351490857846, - 0.0038742528195910416, - 0.012469246082568561 + 0.0021116725306624543, + 0.003432235149083229, + 0.003874252819605527, + 0.012469246082535005 ], linf=[ - 0.012006418939223495, - 0.035520871209746126, - 0.024512747492231427, - 0.11191122588756564 + 0.012006418939279007, + 0.03552087120962882, + 0.02451274749189282, + 0.11191122588626357 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -303,16 +303,16 @@ end equations), equations)), l2=[ - 0.002103629650383915, - 0.003435843933396454, - 0.00386735987813341, - 0.012670355349235728 + 0.0021036296503840883, + 0.003435843933397192, + 0.003867359878114748, + 0.012670355349293195 ], linf=[ - 0.012006261793147788, - 0.03550212518982032, - 0.025107947319661185, - 0.11647078036571124 + 0.01200626179308184, + 0.03550212518997239, + 0.025107947320178275, + 0.11647078036751068 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -330,16 +330,16 @@ end initial_refinement_level=2, tspan=(0.0, 0.1), gradient_variables=GradientVariablesEntropy(), l2=[ - 0.0021403742517389513, - 0.0034258287094908572, - 0.0038915122886898517, - 0.012506862343013842 + 0.002140374251729127, + 0.003425828709496601, + 0.0038915122887358097, + 0.012506862342858291 ], linf=[ - 0.012244412004628336, - 0.03507559186162224, - 0.024580892345558894, - 0.11425600758350107 + 0.012244412004772665, + 0.03507559186131113, + 0.02458089234472249, + 0.11425600758024679 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -361,16 +361,16 @@ end equations), equations)), l2=[ - 0.0021349737347844907, - 0.0034301388278203033, - 0.0038928324474291572, - 0.012693611436230873 + 0.0021349737347923716, + 0.0034301388278178365, + 0.0038928324473968836, + 0.012693611436338 ], linf=[ - 0.01224423627586213, - 0.035054066314102905, - 0.025099598504931965, - 0.11795616324751634 + 0.012244236275761766, + 0.03505406631430898, + 0.025099598505644406, + 0.11795616324985403 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -388,16 +388,16 @@ end initial_refinement_level=2, tspan=(0.0, 0.1), volume_integral=VolumeIntegralFluxDifferencing(flux_central), l2=[ - 0.0021116725306633594, - 0.0034322351490827557, - 0.0038742528196093542, - 0.012469246082526909 + 0.0021116725306612075, + 0.0034322351490838703, + 0.0038742528196011594, + 0.012469246082545557 ], linf=[ - 0.012006418939291663, - 0.035520871209594115, - 0.024512747491801577, - 0.11191122588591007 + 0.012006418939262131, + 0.0355208712096602, + 0.024512747491999436, + 0.11191122588669522 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -431,11 +431,15 @@ end ode_default_options()..., callback = callbacks) l2_error, linf_error = analysis_callback(sol) @test l2_error ≈ - [0.00024296959173852447; 0.0002093263158670915; 0.0005390572390977262; - 0.00026753561392341537] + [0.00024296959174050973; + 0.00020932631586399853; + 0.0005390572390981241; + 0.00026753561391316933] @test linf_error ≈ - [0.0016210102053424436; 0.002593287648655501; 0.002953907343823712; - 0.002077119120180271] + [0.0016210102053486608; + 0.0025932876486537016; + 0.0029539073438284817; + 0.0020771191202548778] # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -613,16 +617,16 @@ end "elixir_navierstokes_convergence.jl"), initial_refinement_level=1, tspan=(0.0, 0.2), l2=[ - 0.0003811978985836709, - 0.0005874314969169538, - 0.0009142898787923481, - 0.0011613918899727263 + 0.0003811978986531135, + 0.0005874314969137914, + 0.0009142898787681551, + 0.0011613918893790497 ], linf=[ - 0.0021633623982135752, - 0.009484348274135372, - 0.004231572066492217, - 0.011661660275365193 + 0.0021633623985426453, + 0.009484348273965089, + 0.0042315720663082534, + 0.011661660264076446 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -639,16 +643,16 @@ end "elixir_navierstokes_convergence_nonperiodic.jl"), initial_refinement_level=1, tspan=(0.0, 0.2), l2=[ - 0.00040364962558511795, - 0.0005869762481506936, - 0.00091488537427274, - 0.0011984191566376762 + 0.0004036496258545996, + 0.0005869762480189079, + 0.0009148853742181908, + 0.0011984191532764543 ], linf=[ - 0.0024993634941723464, - 0.009487866203944725, - 0.004505829506628117, - 0.011634902776245681 + 0.0024993634989209923, + 0.009487866203496731, + 0.004505829506103787, + 0.011634902753554499 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl index 2690a08cbb9..99ad05c7f70 100644 --- a/test/test_parabolic_3d.jl +++ b/test/test_parabolic_3d.jl @@ -17,18 +17,18 @@ isdir(outdir) && rm(outdir, recursive = true) "elixir_navierstokes_convergence.jl"), cells_per_dimension=(4, 4, 4), tspan=(0.0, 0.1), l2=[ - 0.0005532847115849239, - 0.000659263490965341, - 0.0007776436127362806, - 0.0006592634909662951, - 0.0038073628897809185 + 0.000553284711585015, + 0.0006592634909666629, + 0.0007776436127373607, + 0.0006592634909664286, + 0.0038073628897819524 ], linf=[ - 0.0017039861523615585, - 0.002628561703560073, - 0.003531057425112172, - 0.0026285617036090336, - 0.015587829540351095 + 0.0017039861523628907, + 0.002628561703550747, + 0.0035310574250866367, + 0.002628561703585053, + 0.015587829540340437 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -45,18 +45,18 @@ end "elixir_navierstokes_convergence_curved.jl"), cells_per_dimension=(4, 4, 4), tspan=(0.0, 0.1), l2=[ - 0.0014027227251207474, - 0.0021322235533273513, - 0.0027873741447455194, - 0.0024587473070627423, - 0.00997836818019202 + 0.001402722725120774, + 0.0021322235533272546, + 0.002787374144745514, + 0.002458747307062751, + 0.009978368180191861 ], linf=[ - 0.006341750402837576, - 0.010306014252246865, - 0.01520740250924979, - 0.010968264045485565, - 0.047454389831591115 + 0.0063417504028402405, + 0.010306014252245865, + 0.015207402509253565, + 0.010968264045485343, + 0.04745438983155026 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -101,18 +101,18 @@ end "elixir_navierstokes_convergence.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), l2=[ - 0.0019582188528512257, - 0.002653449504302844, - 0.002898264205184629, - 0.002653449504302853, - 0.009511572365085706 + 0.0019582188528520267, + 0.002653449504302849, + 0.002898264205184317, + 0.0026534495043028534, + 0.009511572365092744 ], linf=[ - 0.013680656759085918, - 0.0356910450154318, - 0.023526343547736236, - 0.035691045015431855, - 0.11482570604041165 + 0.013680656759089693, + 0.03569104501543785, + 0.023526343547761893, + 0.03569104501543733, + 0.11482570604049513 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -133,18 +133,18 @@ end equations), equations)), l2=[ - 0.00195468651965362, + 0.001954686519653731, 0.0026554367897028506, - 0.002892730402724066, - 0.002655436789702817, - 0.009596351796609566 + 0.0028927304027240026, + 0.0026554367897028437, + 0.00959635179660988 ], linf=[ - 0.013680508110645473, - 0.035673446359424356, - 0.024024936779729028, - 0.03567344635942474, - 0.11839497110809383 + 0.013680508110646583, + 0.03567344635942522, + 0.024024936779738822, + 0.035673446359425674, + 0.11839497110814179 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -162,18 +162,18 @@ end initial_refinement_level=2, tspan=(0.0, 0.1), gradient_variables=GradientVariablesEntropy(), l2=[ - 0.0019770444875099307, - 0.0026524750946399327, - 0.00290860030832445, - 0.0026524750946399396, - 0.009509568981439294 + 0.0019770444875097737, + 0.002652475094640119, + 0.0029086003083239236, + 0.002652475094640097, + 0.009509568981441823 ], linf=[ - 0.01387936112914212, - 0.03526260609304053, - 0.023554197097368997, - 0.035262606093040896, - 0.11719963716509518 + 0.013879361129145007, + 0.035262606093049195, + 0.02355419709739138, + 0.03526260609304984, + 0.11719963716518933 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -195,18 +195,18 @@ end equations), equations)), l2=[ - 0.001974631423398113, - 0.002654768259143932, - 0.002907031063651286, - 0.002654768259143901, - 0.009587792882971452 + 0.0019746314233993435, + 0.0026547682591448896, + 0.0029070310636460494, + 0.0026547682591448922, + 0.00958779288300152 ], linf=[ - 0.01387919380137137, - 0.035244084526358944, - 0.02398614622061363, - 0.03524408452635828, - 0.12005056512506407 + 0.013879193801400458, + 0.03524408452641245, + 0.023986146220843566, + 0.035244084526412915, + 0.1200505651257302 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -224,18 +224,18 @@ end initial_refinement_level=2, tspan=(0.0, 0.1), volume_integral=VolumeIntegralFluxDifferencing(flux_central), l2=[ - 0.0019582188528180213, - 0.002653449504301736, - 0.0028982642051960006, - 0.0026534495043017384, - 0.009511572364811033 + 0.0019582188528208754, + 0.0026534495043017935, + 0.002898264205195059, + 0.0026534495043017917, + 0.009511572364832972 ], linf=[ - 0.013680656758949583, - 0.035691045015224444, - 0.02352634354676752, - 0.035691045015223424, - 0.11482570603751441 + 0.013680656758958687, + 0.03569104501523916, + 0.02352634354684648, + 0.03569104501523987, + 0.11482570603774533 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -268,20 +268,18 @@ end dt = 1e-5, ode_default_options()..., callback = callbacks) l2_error, linf_error = analysis_callback(sol) - @test l2_error ≈ [ - 0.0003109336253407314, - 0.0006473493036803503, - 0.0007705277238213672, - 0.0006280517917198335, - 0.000903927789884075 - ] - @test linf_error ≈ [ - 0.0023694155365339142, - 0.010634932622402863, - 0.006772070862236412, - 0.010640551561726901, - 0.019256819038719897 - ] + @test l2_error ≈ + [0.00031093362536287433; + 0.0006473493036800964; + 0.0007705277238221976; + 0.0006280517917194624; + 0.0009039277899421355] + @test linf_error ≈ + [0.0023694155363713776; + 0.01063493262248095; + 0.006772070862041679; + 0.010640551561807883; + 0.019256819037817507] # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -373,18 +371,18 @@ end "elixir_navierstokes_convergence.jl"), initial_refinement_level=2, tspan=(0.0, 0.1), l2=[ - 0.00026599105554982194, - 0.000461877794472316, - 0.0005424899076052261, - 0.0004618777944723191, - 0.0015846392581126832 + 0.00026599105557723507, + 0.00046187779448444603, + 0.0005424899076194272, + 0.00046187779448445546, + 0.0015846392584275121 ], linf=[ - 0.0025241668929956163, - 0.006308461681816373, - 0.004334939663169113, - 0.006308461681804009, - 0.03176343480493493 + 0.0025241668964857134, + 0.006308461684409397, + 0.004334939668473314, + 0.006308461684396753, + 0.03176343483364796 ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) From 861bd7ecac8b408f34b8aa98daae426021fbf6e8 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 21 Nov 2024 14:11:54 +0100 Subject: [PATCH 04/41] set version to v0.9.7 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index e0ce86c23ba..a420ea23fc0 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.9.7-DEV" +version = "0.9.7" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" From 4bc14f988628bc19a5dbaaa549db7a444d9ff480 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 21 Nov 2024 14:12:37 +0100 Subject: [PATCH 05/41] set development version to v0.9.8-DEV --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index a420ea23fc0..ac1af2c49eb 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.9.7" +version = "0.9.8-DEV" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" From b5711d4add18cc1de4e0f1335014cf38e0d01fe2 Mon Sep 17 00:00:00 2001 From: Arpit Babbar Date: Fri, 22 Nov 2024 11:47:31 +0100 Subject: [PATCH 06/41] Fix the spelling of "Siddhartha Mishra" (#2174) --- src/equations/shallow_water_1d.jl | 12 ++++++------ src/equations/shallow_water_2d.jl | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/equations/shallow_water_1d.jl b/src/equations/shallow_water_1d.jl index 3c218eee9d9..29642470262 100644 --- a/src/equations/shallow_water_1d.jl +++ b/src/equations/shallow_water_1d.jl @@ -201,7 +201,7 @@ end Non-symmetric two-point volume flux discretizing the nonconservative (source) term that contains the gradient of the bottom topography [`ShallowWaterEquations1D`](@ref). -Gives entropy conservation and well-balancedness on both the volume and surface when combined with +Gives entropy conservation and well-balancedness on both the volume and surface when combined with [`flux_wintermeyer_etal`](@ref). Further details are available in the papers: @@ -210,7 +210,7 @@ Further details are available in the papers: shallow water equations on unstructured curvilinear meshes with discontinuous bathymetry [DOI: 10.1016/j.jcp.2017.03.036](https://doi.org/10.1016/j.jcp.2017.03.036) - Patrick Ersing, Andrew R. Winters (2023) - An entropy stable discontinuous Galerkin method for the two-layer shallow water equations on + An entropy stable discontinuous Galerkin method for the two-layer shallow water equations on curvilinear meshes [DOI: 10.48550/arXiv.2306.12699](https://doi.org/10.48550/arXiv.2306.12699) """ @@ -237,7 +237,7 @@ This flux can be used together with [`flux_fjordholm_etal`](@ref) at interfaces conservation and well-balancedness. Further details for the original finite volume formulation are available in -- Ulrik S. Fjordholm, Siddhartha Mishr and Eitan Tadmor (2011) +- Ulrik S. Fjordholm, Siddhartha Mishra and Eitan Tadmor (2011) Well-balanced and energy stable schemes for the shallow water equations with discontinuous topography [DOI: 10.1016/j.jcp.2011.03.042](https://doi.org/10.1016/j.jcp.2011.03.042) and for curvilinear 2D case in the paper: @@ -310,7 +310,7 @@ is nonzero this should only be used as a surface flux otherwise the scheme will For well-balancedness in the volume flux use [`flux_wintermeyer_etal`](@ref). Details are available in Eq. (4.1) in the paper: -- Ulrik S. Fjordholm, Siddhartha Mishr and Eitan Tadmor (2011) +- Ulrik S. Fjordholm, Siddhartha Mishra and Eitan Tadmor (2011) Well-balanced and energy stable schemes for the shallow water equations with discontinuous topography [DOI: 10.1016/j.jcp.2011.03.042](https://doi.org/10.1016/j.jcp.2011.03.042) """ @@ -582,7 +582,7 @@ end equations::ShallowWaterEquations1D) Calculate Roe-averaged velocity `v_roe` and wavespeed `c_roe = sqrt{g * h_roe}` -See for instance equation (62) in +See for instance equation (62) in - Paul A. Ullrich, Christiane Jablonowski, and Bram van Leer (2010) High-order finite-volume methods for the shallow-water equations on the sphere [DOI: 10.1016/j.jcp.2010.04.044](https://doi.org/10.1016/j.jcp.2010.04.044) @@ -631,7 +631,7 @@ end end # Calculate the error for the "lake-at-rest" test case where H = h+b should -# be a constant value over time. +# be a constant value over time. @inline function lake_at_rest_error(u, equations::ShallowWaterEquations1D) h, _, b = u diff --git a/src/equations/shallow_water_2d.jl b/src/equations/shallow_water_2d.jl index 7ba36d6ec92..510d4560f9b 100644 --- a/src/equations/shallow_water_2d.jl +++ b/src/equations/shallow_water_2d.jl @@ -329,7 +329,7 @@ This flux can be used together with [`flux_fjordholm_etal`](@ref) at interfaces conservation and well-balancedness. Further details for the original finite volume formulation are available in -- Ulrik S. Fjordholm, Siddhartha Mishr and Eitan Tadmor (2011) +- Ulrik S. Fjordholm, Siddhartha Mishra and Eitan Tadmor (2011) Well-balanced and energy stable schemes for the shallow water equations with discontinuous topography [DOI: 10.1016/j.jcp.2011.03.042](https://doi.org/10.1016/j.jcp.2011.03.042) and for curvilinear 2D case in the paper: @@ -491,7 +491,7 @@ is nonzero this should only be used as a surface flux otherwise the scheme will For well-balancedness in the volume flux use [`flux_wintermeyer_etal`](@ref). Details are available in Eq. (4.1) in the paper: -- Ulrik S. Fjordholm, Siddhartha Mishr and Eitan Tadmor (2011) +- Ulrik S. Fjordholm, Siddhartha Mishra and Eitan Tadmor (2011) Well-balanced and energy stable schemes for the shallow water equations with discontinuous topography [DOI: 10.1016/j.jcp.2011.03.042](https://doi.org/10.1016/j.jcp.2011.03.042) """ From 62ff764e1027cccb905225982209d3358e48ce93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Rueda-Ram=C3=ADrez?= Date: Sat, 23 Nov 2024 11:48:57 +0100 Subject: [PATCH 07/41] Unify analysis routines for div(B) to make them equation-agnostic (#2176) * Unify analysis routines for div(B) to make them equation-agnostic * Apply suggestions from code review Co-authored-by: Andrew Winters --------- Co-authored-by: Andrew Winters --- src/callbacks_step/analysis_dg2d.jl | 70 ++++++------------- src/callbacks_step/analysis_dg3d.jl | 57 +++++++++------ src/equations/ideal_glm_mhd_2d.jl | 3 + src/equations/ideal_glm_mhd_3d.jl | 3 + .../ideal_glm_mhd_multicomponent_2d.jl | 4 ++ 5 files changed, 67 insertions(+), 70 deletions(-) diff --git a/src/callbacks_step/analysis_dg2d.jl b/src/callbacks_step/analysis_dg2d.jl index 4cd92ce7c5f..e089133fa17 100644 --- a/src/callbacks_step/analysis_dg2d.jl +++ b/src/callbacks_step/analysis_dg2d.jl @@ -279,30 +279,17 @@ end function analyze(::Val{:l2_divb}, du, u, t, mesh::TreeMesh{2}, - equations::IdealGlmMhdEquations2D, dg::DGSEM, cache) + equations, dg::DGSEM, cache) integrate_via_indices(u, mesh, equations, dg, cache, cache, dg.basis.derivative_matrix) do u, i, j, element, equations, dg, cache, derivative_matrix divb = zero(eltype(u)) for k in eachnode(dg) - divb += (derivative_matrix[i, k] * u[6, k, j, element] + - derivative_matrix[j, k] * u[7, i, k, element]) - end - divb *= cache.elements.inverse_jacobian[element] - divb^2 - end |> sqrt -end + B1_kj, _, _ = magnetic_field(u[:, k, j, element], equations) + _, B2_ik, _ = magnetic_field(u[:, i, k, element], equations) -function analyze(::Val{:l2_divb}, du, u, t, - mesh::TreeMesh{2}, equations::IdealGlmMhdMulticomponentEquations2D, - dg::DG, cache) - integrate_via_indices(u, mesh, equations, dg, cache, cache, - dg.basis.derivative_matrix) do u, i, j, element, equations, - dg, cache, derivative_matrix - divb = zero(eltype(u)) - for k in eachnode(dg) - divb += (derivative_matrix[i, k] * u[5, k, j, element] + - derivative_matrix[j, k] * u[6, i, k, element]) + divb += (derivative_matrix[i, k] * B1_kj + + derivative_matrix[j, k] * B2_ik) end divb *= cache.elements.inverse_jacobian[element] divb^2 @@ -312,7 +299,7 @@ end function analyze(::Val{:l2_divb}, du, u, t, mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2}, T8codeMesh{2}}, - equations::IdealGlmMhdEquations2D, dg::DGSEM, cache) + equations, dg::DGSEM, cache) @unpack contravariant_vectors = cache.elements integrate_via_indices(u, mesh, equations, dg, cache, cache, dg.basis.derivative_matrix) do u, i, j, element, equations, @@ -323,10 +310,13 @@ function analyze(::Val{:l2_divb}, du, u, t, Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, element) # Compute the transformed divergence for k in eachnode(dg) + B1_kj, B2_kj, _ = magnetic_field(u[:, k, j, element], equations) + B1_ik, B2_ik, _ = magnetic_field(u[:, i, k, element], equations) + divb += (derivative_matrix[i, k] * - (Ja11 * u[6, k, j, element] + Ja12 * u[7, k, j, element]) + + (Ja11 * B1_kj + Ja12 * B2_kj) + derivative_matrix[j, k] * - (Ja21 * u[6, i, k, element] + Ja22 * u[7, i, k, element])) + (Ja21 * B1_ik + Ja22 * B2_ik)) end divb *= cache.elements.inverse_jacobian[i, j, element] divb^2 @@ -335,7 +325,7 @@ end function analyze(::Val{:linf_divb}, du, u, t, mesh::TreeMesh{2}, - equations::IdealGlmMhdEquations2D, dg::DGSEM, cache) + equations, dg::DGSEM, cache) @unpack derivative_matrix, weights = dg.basis # integrate over all elements to get the divergence-free condition errors @@ -344,30 +334,11 @@ function analyze(::Val{:linf_divb}, du, u, t, for j in eachnode(dg), i in eachnode(dg) divb = zero(eltype(u)) for k in eachnode(dg) - divb += (derivative_matrix[i, k] * u[6, k, j, element] + - derivative_matrix[j, k] * u[7, i, k, element]) - end - divb *= cache.elements.inverse_jacobian[element] - linf_divb = max(linf_divb, abs(divb)) - end - end - - return linf_divb -end - -function analyze(::Val{:linf_divb}, du, u, t, - mesh::TreeMesh{2}, equations::IdealGlmMhdMulticomponentEquations2D, - dg::DG, cache) - @unpack derivative_matrix, weights = dg.basis + B1_kj, _, _ = magnetic_field(u[:, k, j, element], equations) + _, B2_ik, _ = magnetic_field(u[:, i, k, element], equations) - # integrate over all elements to get the divergence-free condition errors - linf_divb = zero(eltype(u)) - for element in eachelement(dg, cache) - for j in eachnode(dg), i in eachnode(dg) - divb = zero(eltype(u)) - for k in eachnode(dg) - divb += (derivative_matrix[i, k] * u[5, k, j, element] + - derivative_matrix[j, k] * u[6, i, k, element]) + divb += (derivative_matrix[i, k] * B1_kj + + derivative_matrix[j, k] * B2_ik) end divb *= cache.elements.inverse_jacobian[element] linf_divb = max(linf_divb, abs(divb)) @@ -380,7 +351,7 @@ end function analyze(::Val{:linf_divb}, du, u, t, mesh::Union{StructuredMesh{2}, UnstructuredMesh2D, P4estMesh{2}, T8codeMesh{2}}, - equations::IdealGlmMhdEquations2D, dg::DGSEM, cache) + equations, dg::DGSEM, cache) @unpack derivative_matrix, weights = dg.basis @unpack contravariant_vectors = cache.elements @@ -396,10 +367,13 @@ function analyze(::Val{:linf_divb}, du, u, t, element) # Compute the transformed divergence for k in eachnode(dg) + B1_kj, B2_kj, _ = magnetic_field(u[:, k, j, element], equations) + B1_ik, B2_ik, _ = magnetic_field(u[:, i, k, element], equations) + divb += (derivative_matrix[i, k] * - (Ja11 * u[6, k, j, element] + Ja12 * u[7, k, j, element]) + + (Ja11 * B1_kj + Ja12 * B2_kj) + derivative_matrix[j, k] * - (Ja21 * u[6, i, k, element] + Ja22 * u[7, i, k, element])) + (Ja21 * B1_ik + Ja22 * B2_ik)) end divb *= cache.elements.inverse_jacobian[i, j, element] linf_divb = max(linf_divb, abs(divb)) diff --git a/src/callbacks_step/analysis_dg3d.jl b/src/callbacks_step/analysis_dg3d.jl index 85880380c61..3c9c3a69f5e 100644 --- a/src/callbacks_step/analysis_dg3d.jl +++ b/src/callbacks_step/analysis_dg3d.jl @@ -307,16 +307,20 @@ function analyze(::typeof(entropy_timederivative), du, u, t, end function analyze(::Val{:l2_divb}, du, u, t, - mesh::TreeMesh{3}, equations::IdealGlmMhdEquations3D, + mesh::TreeMesh{3}, equations, dg::DGSEM, cache) integrate_via_indices(u, mesh, equations, dg, cache, cache, dg.basis.derivative_matrix) do u, i, j, k, element, equations, dg, cache, derivative_matrix divb = zero(eltype(u)) for l in eachnode(dg) - divb += (derivative_matrix[i, l] * u[6, l, j, k, element] + - derivative_matrix[j, l] * u[7, i, l, k, element] + - derivative_matrix[k, l] * u[8, i, j, l, element]) + B_ljk = magnetic_field(u[:, l, j, k, element], equations) + B_ilk = magnetic_field(u[:, i, l, k, element], equations) + B_ijl = magnetic_field(u[:, i, j, l, element], equations) + + divb += (derivative_matrix[i, l] * B_ljk[1] + + derivative_matrix[j, l] * B_ilk[2] + + derivative_matrix[k, l] * B_ijl[3]) end divb *= cache.elements.inverse_jacobian[element] divb^2 @@ -325,7 +329,7 @@ end function analyze(::Val{:l2_divb}, du, u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, - equations::IdealGlmMhdEquations3D, + equations, dg::DGSEM, cache) @unpack contravariant_vectors = cache.elements integrate_via_indices(u, mesh, equations, dg, cache, cache, @@ -341,15 +345,16 @@ function analyze(::Val{:l2_divb}, du, u, t, element) # Compute the transformed divergence for l in eachnode(dg) + B_ljk = magnetic_field(u[:, l, j, k, element], equations) + B_ilk = magnetic_field(u[:, i, l, k, element], equations) + B_ijl = magnetic_field(u[:, i, j, l, element], equations) + divb += (derivative_matrix[i, l] * - (Ja11 * u[6, l, j, k, element] + Ja12 * u[7, l, j, k, element] + - Ja13 * u[8, l, j, k, element]) + + (Ja11 * B_ljk[1] + Ja12 * B_ljk[2] + Ja13 * B_ljk[3]) + derivative_matrix[j, l] * - (Ja21 * u[6, i, l, k, element] + Ja22 * u[7, i, l, k, element] + - Ja23 * u[8, i, l, k, element]) + + (Ja21 * B_ilk[1] + Ja22 * B_ilk[2] + Ja23 * B_ilk[3]) + derivative_matrix[k, l] * - (Ja31 * u[6, i, j, l, element] + Ja32 * u[7, i, j, l, element] + - Ja33 * u[8, i, j, l, element])) + (Ja31 * B_ijl[1] + Ja32 * B_ijl[2] + Ja33 * B_ijl[3])) end divb *= cache.elements.inverse_jacobian[i, j, k, element] divb^2 @@ -357,7 +362,7 @@ function analyze(::Val{:l2_divb}, du, u, t, end function analyze(::Val{:linf_divb}, du, u, t, - mesh::TreeMesh{3}, equations::IdealGlmMhdEquations3D, + mesh::TreeMesh{3}, equations, dg::DGSEM, cache) @unpack derivative_matrix, weights = dg.basis @@ -367,9 +372,13 @@ function analyze(::Val{:linf_divb}, du, u, t, for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) divb = zero(eltype(u)) for l in eachnode(dg) - divb += (derivative_matrix[i, l] * u[6, l, j, k, element] + - derivative_matrix[j, l] * u[7, i, l, k, element] + - derivative_matrix[k, l] * u[8, i, j, l, element]) + B_ljk = magnetic_field(u[:, l, j, k, element], equations) + B_ilk = magnetic_field(u[:, i, l, k, element], equations) + B_ijl = magnetic_field(u[:, i, j, l, element], equations) + + divb += (derivative_matrix[i, l] * B_ljk[1] + + derivative_matrix[j, l] * B_ilk[2] + + derivative_matrix[k, l] * B_ijl[3]) end divb *= cache.elements.inverse_jacobian[element] linf_divb = max(linf_divb, abs(divb)) @@ -386,7 +395,7 @@ end function analyze(::Val{:linf_divb}, du, u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, - equations::IdealGlmMhdEquations3D, + equations, dg::DGSEM, cache) @unpack derivative_matrix, weights = dg.basis @unpack contravariant_vectors = cache.elements @@ -405,12 +414,16 @@ function analyze(::Val{:linf_divb}, du, u, t, k, element) # Compute the transformed divergence for l in eachnode(dg) - divb += (derivative_matrix[i, l] * (Ja11 * u[6, l, j, k, element] + - Ja12 * u[7, l, j, k, element] + Ja13 * u[8, l, j, k, element]) + - derivative_matrix[j, l] * (Ja21 * u[6, i, l, k, element] + - Ja22 * u[7, i, l, k, element] + Ja23 * u[8, i, l, k, element]) + - derivative_matrix[k, l] * (Ja31 * u[6, i, j, l, element] + - Ja32 * u[7, i, j, l, element] + Ja33 * u[8, i, j, l, element])) + B_ljk = magnetic_field(u[:, l, j, k, element], equations) + B_ilk = magnetic_field(u[:, i, l, k, element], equations) + B_ijl = magnetic_field(u[:, i, j, l, element], equations) + + divb += (derivative_matrix[i, l] * (Ja11 * B_ljk[1] + + Ja12 * B_ljk[2] + Ja13 * B_ljk[3]) + + derivative_matrix[j, l] * (Ja21 * B_ilk[1] + + Ja22 * B_ilk[2] + Ja23 * B_ilk[3]) + + derivative_matrix[k, l] * (Ja31 * B_ijl[1] + + Ja32 * B_ijl[2] + Ja33 * B_ijl[3])) end divb *= cache.elements.inverse_jacobian[i, j, k, element] linf_divb = max(linf_divb, abs(divb)) diff --git a/src/equations/ideal_glm_mhd_2d.jl b/src/equations/ideal_glm_mhd_2d.jl index 9383fdbe961..ba362525228 100644 --- a/src/equations/ideal_glm_mhd_2d.jl +++ b/src/equations/ideal_glm_mhd_2d.jl @@ -46,6 +46,9 @@ function default_analysis_integrals(::IdealGlmMhdEquations2D) (entropy_timederivative, Val(:l2_divb), Val(:linf_divb)) end +# Helper function to extract the magnetic field vector from the conservative variables +magnetic_field(u, equations::IdealGlmMhdEquations2D) = SVector(u[6], u[7], u[8]) + # Set initial conditions at physical location `x` for time `t` """ initial_condition_constant(x, t, equations::IdealGlmMhdEquations2D) diff --git a/src/equations/ideal_glm_mhd_3d.jl b/src/equations/ideal_glm_mhd_3d.jl index 8d7d0461398..d8c859b7a8a 100644 --- a/src/equations/ideal_glm_mhd_3d.jl +++ b/src/equations/ideal_glm_mhd_3d.jl @@ -44,6 +44,9 @@ function default_analysis_integrals(::IdealGlmMhdEquations3D) (entropy_timederivative, Val(:l2_divb), Val(:linf_divb)) end +# Helper function to extract the magnetic field vector from the conservative variables +magnetic_field(u, equations::IdealGlmMhdEquations3D) = SVector(u[6], u[7], u[8]) + # Set initial conditions at physical location `x` for time `t` """ initial_condition_constant(x, t, equations::IdealGlmMhdEquations3D) diff --git a/src/equations/ideal_glm_mhd_multicomponent_2d.jl b/src/equations/ideal_glm_mhd_multicomponent_2d.jl index 99fe391301d..509a2507cd9 100644 --- a/src/equations/ideal_glm_mhd_multicomponent_2d.jl +++ b/src/equations/ideal_glm_mhd_multicomponent_2d.jl @@ -101,6 +101,10 @@ function default_analysis_integrals(::IdealGlmMhdMulticomponentEquations2D) (entropy_timederivative, Val(:l2_divb), Val(:linf_divb)) end +# Helper function to extract the magnetic field vector from the conservative variables +magnetic_field(u, equations::IdealGlmMhdMulticomponentEquations2D) = SVector(u[5], u[6], + u[7]) + """ initial_condition_convergence_test(x, t, equations::IdealGlmMhdMulticomponentEquations2D) From 3609f39b979de2e894bb78ecb9c67e8fe75ddf9f Mon Sep 17 00:00:00 2001 From: Warisa Roongaraya <81345089+warisa-r@users.noreply.github.com> Date: Mon, 25 Nov 2024 10:50:13 +0100 Subject: [PATCH 08/41] Modification on PERK time integrator to handle cases where s = e (#2177) * handle s=consistency_order * minor change --- ext/TrixiConvexECOSExt.jl | 12 +++++++-- .../methods_PERK2.jl | 17 +++++++------ .../methods_PERK3.jl | 25 ++++++++++--------- 3 files changed, 32 insertions(+), 22 deletions(-) diff --git a/ext/TrixiConvexECOSExt.jl b/ext/TrixiConvexECOSExt.jl index 8251fe3eed9..9b897436dbb 100644 --- a/ext/TrixiConvexECOSExt.jl +++ b/ext/TrixiConvexECOSExt.jl @@ -60,7 +60,11 @@ function stability_polynomials!(pnoms, consistency_order, num_stage_evals, end # For optimization only the maximum is relevant - return maximum(abs(pnoms)) + if consistency_order - num_stage_evals == 0 + return maximum(abs.(pnoms)) # If there is no variable to optimize, we need to use the broadcast operator. + else + return maximum(abs(pnoms)) + end end #= @@ -151,7 +155,11 @@ function Trixi.bisect_stability_polynomial(consistency_order, num_eig_vals, println("Concluded stability polynomial optimization \n") end - gamma_opt = evaluate(gamma) + if consistency_order - num_stage_evals != 0 + gamma_opt = evaluate(gamma) + else + gamma_opt = nothing # If there is no variable to optimize, return gamma_opt as nothing. + end # Catch case S = 3 (only one opt. variable) if isa(gamma_opt, Number) diff --git a/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl b/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl index 0ff648556c4..2451680a505 100644 --- a/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl +++ b/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl @@ -49,15 +49,16 @@ function compute_PairedExplicitRK2_butcher_tableau(num_stages, eig_vals, tspan, dtmax, dteps, eig_vals; verbose) - monomial_coeffs = undo_normalization!(monomial_coeffs, consistency_order, - num_stages) - num_monomial_coeffs = length(monomial_coeffs) - @assert num_monomial_coeffs == coeffs_max - A = compute_a_coeffs(num_stages, stage_scaling_factors, monomial_coeffs) - - a_matrix[:, 1] -= A - a_matrix[:, 2] = A + if num_stages != consistency_order + monomial_coeffs = undo_normalization!(monomial_coeffs, consistency_order, + num_stages) + num_monomial_coeffs = length(monomial_coeffs) + @assert num_monomial_coeffs == coeffs_max + A = compute_a_coeffs(num_stages, stage_scaling_factors, monomial_coeffs) + a_matrix[:, 1] -= A + a_matrix[:, 2] = A + end return a_matrix, c, dt_opt end diff --git a/src/time_integration/paired_explicit_runge_kutta/methods_PERK3.jl b/src/time_integration/paired_explicit_runge_kutta/methods_PERK3.jl index 477748b28c5..02bc3eeba36 100644 --- a/src/time_integration/paired_explicit_runge_kutta/methods_PERK3.jl +++ b/src/time_integration/paired_explicit_runge_kutta/methods_PERK3.jl @@ -33,21 +33,22 @@ function compute_PairedExplicitRK3_butcher_tableau(num_stages, tspan, # Initialize the array of our solution a_unknown = zeros(num_stages - 2) + # Calculate coefficients of the stability polynomial in monomial form + consistency_order = 3 + dtmax = tspan[2] - tspan[1] + dteps = 1.0f-9 + + num_eig_vals, eig_vals = filter_eig_vals(eig_vals; verbose) + + monomial_coeffs, dt_opt = bisect_stability_polynomial(consistency_order, + num_eig_vals, num_stages, + dtmax, dteps, + eig_vals; verbose) + # Special case of e = 3 - if num_stages == 3 + if num_stages == consistency_order a_unknown = [0.25] # Use classic SSPRK33 (Shu-Osher) Butcher Tableau else - # Calculate coefficients of the stability polynomial in monomial form - consistency_order = 3 - dtmax = tspan[2] - tspan[1] - dteps = 1.0f-9 - - num_eig_vals, eig_vals = filter_eig_vals(eig_vals; verbose) - - monomial_coeffs, dt_opt = bisect_stability_polynomial(consistency_order, - num_eig_vals, num_stages, - dtmax, dteps, - eig_vals; verbose) monomial_coeffs = undo_normalization!(monomial_coeffs, consistency_order, num_stages) From 004612377681e9ed9f37216f2170b74d0cdfffbd Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Wed, 27 Nov 2024 07:41:06 +0100 Subject: [PATCH 09/41] Denote pseudo time for Euler-Gravity (#2101) * Denote pseudo time for Euler-Gravity * Update src/semidiscretization/semidiscretization_euler_gravity.jl * Apply suggestions from code review * further comments * comment * dtau * Apply suggestions from code review Co-authored-by: Hendrik Ranocha --------- Co-authored-by: Hendrik Ranocha --- .../semidiscretization_euler_gravity.jl | 70 ++++++++++++------- 1 file changed, 43 insertions(+), 27 deletions(-) diff --git a/src/semidiscretization/semidiscretization_euler_gravity.jl b/src/semidiscretization/semidiscretization_euler_gravity.jl index 3ca429f63b6..294cc69f471 100644 --- a/src/semidiscretization/semidiscretization_euler_gravity.jl +++ b/src/semidiscretization/semidiscretization_euler_gravity.jl @@ -19,8 +19,8 @@ struct ParametersEulerGravity{RealT <: Real, TimestepGravity} background_density :: RealT # aka rho0 gravitational_constant :: RealT # aka G cfl :: RealT - resid_tol :: RealT - n_iterations_max :: Int + resid_tol :: RealT # Hyp.-Diff. Eq. steady state tolerance + n_iterations_max :: Int # Max. number of iterations of the pseudo-time gravity solver timestep_gravity :: TimestepGravity end @@ -124,6 +124,7 @@ function SemidiscretizationEulerGravity(semi_euler::SemiEuler, SemidiscretizationHyperbolic{Mesh, <:AbstractHyperbolicDiffusionEquations}} u_ode = compute_coefficients(zero(real(semi_gravity)), semi_gravity) du_ode = similar(u_ode) + # Registers for gravity solver, tailored to the 2N and 3S* methods implemented below u_tmp1_ode = similar(u_ode) u_tmp2_ode = similar(u_ode) cache = (; u_ode, du_ode, u_tmp1_ode, u_tmp2_ode) @@ -217,6 +218,9 @@ end calc_error_norms(func, u, t, analyzer, semi.semi_euler, cache_analysis) end +# Coupled Euler and gravity solver at each Runge-Kutta stage, +# corresponding to Algorithm 2 in Schlottke-Lakemper et al. (2020), +# https://dx.doi.org/10.1016/j.jcp.2021.110467 function rhs!(du_ode, u_ode, semi::SemidiscretizationEulerGravity, t) @unpack semi_euler, semi_gravity, cache = semi @@ -275,33 +279,33 @@ function update_gravity!(semi::SemidiscretizationEulerGravity, u_ode) finalstep = false @unpack n_iterations_max, cfl, resid_tol, timestep_gravity = parameters iter = 0 - t = zero(real(semi_gravity.solver)) + tau = zero(real(semi_gravity.solver)) # Pseudo-time # iterate gravity solver until convergence or maximum number of iterations are reached @unpack equations = semi_gravity while !finalstep - dt = @trixi_timeit timer() "calculate dt" begin - cfl * max_dt(u_gravity, t, semi_gravity.mesh, + dtau = @trixi_timeit timer() "calculate dtau" begin + cfl * max_dt(u_gravity, tau, semi_gravity.mesh, have_constant_speed(equations), equations, semi_gravity.solver, semi_gravity.cache) end # evolve solution by one pseudo-time step time_start = time_ns() - timestep_gravity(cache, u_euler, t, dt, parameters, semi_gravity) + timestep_gravity(cache, u_euler, tau, dtau, parameters, semi_gravity) runtime = time_ns() - time_start put!(gravity_counter, runtime) # update iteration counter iter += 1 - t += dt + tau += dtau # check if we reached the maximum number of iterations if n_iterations_max > 0 && iter >= n_iterations_max @warn "Max iterations reached: Gravity solver failed to converge!" residual=maximum(abs, @views du_gravity[1, .., - :]) t=t dt=dt + :]) tau=tau dtau=dtau finalstep = true end @@ -315,34 +319,37 @@ function update_gravity!(semi::SemidiscretizationEulerGravity, u_ode) end # Integrate gravity solver for 2N-type low-storage schemes -function timestep_gravity_2N!(cache, u_euler, t, dt, gravity_parameters, semi_gravity, +function timestep_gravity_2N!(cache, u_euler, tau, dtau, gravity_parameters, + semi_gravity, a, b, c) G = gravity_parameters.gravitational_constant rho0 = gravity_parameters.background_density grav_scale = -4.0 * pi * G + # Note that `u_ode` is `u_gravity` in `rhs!` above @unpack u_ode, du_ode, u_tmp1_ode = cache u_tmp1_ode .= zero(eltype(u_tmp1_ode)) du_gravity = wrap_array(du_ode, semi_gravity) for stage in eachindex(c) - t_stage = t + dt * c[stage] + tau_stage = tau + dtau * c[stage] # rhs! has the source term for the harmonic problem # We don't need a `@trixi_timeit timer() "rhs!"` here since that's already # included in the `rhs!` call. - rhs!(du_ode, u_ode, semi_gravity, t_stage) + rhs!(du_ode, u_ode, semi_gravity, tau_stage) # Source term: Jeans instability OR coupling convergence test OR blast wave # put in gravity source term proportional to Euler density # OBS! subtract off the background density ρ_0 (spatial mean value) + # Note: Adding to `du_gravity` is essentially adding to `du_ode`! @views @. du_gravity[1, .., :] += grav_scale * (u_euler[1, .., :] - rho0) a_stage = a[stage] - b_stage_dt = b[stage] * dt + b_stage_dtau = b[stage] * dtau @trixi_timeit timer() "Runge-Kutta step" begin @threaded for idx in eachindex(u_ode) u_tmp1_ode[idx] = du_ode[idx] - u_tmp1_ode[idx] * a_stage - u_ode[idx] += u_tmp1_ode[idx] * b_stage_dt + u_ode[idx] += u_tmp1_ode[idx] * b_stage_dtau end end end @@ -350,7 +357,7 @@ function timestep_gravity_2N!(cache, u_euler, t, dt, gravity_parameters, semi_gr return nothing end -function timestep_gravity_carpenter_kennedy_erk54_2N!(cache, u_euler, t, dt, +function timestep_gravity_carpenter_kennedy_erk54_2N!(cache, u_euler, tau, dtau, gravity_parameters, semi_gravity) # Coefficients for Carpenter's 5-stage 4th-order low-storage Runge-Kutta method a = SVector(0.0, 567301805773.0 / 1357537059087.0, @@ -363,47 +370,50 @@ function timestep_gravity_carpenter_kennedy_erk54_2N!(cache, u_euler, t, dt, 2526269341429.0 / 6820363962896.0, 2006345519317.0 / 3224310063776.0, 2802321613138.0 / 2924317926251.0) - timestep_gravity_2N!(cache, u_euler, t, dt, gravity_parameters, semi_gravity, a, b, - c) + timestep_gravity_2N!(cache, u_euler, tau, dtau, gravity_parameters, semi_gravity, + a, b, c) end # Integrate gravity solver for 3S*-type low-storage schemes -function timestep_gravity_3Sstar!(cache, u_euler, t, dt, gravity_parameters, +function timestep_gravity_3Sstar!(cache, u_euler, tau, dtau, gravity_parameters, semi_gravity, gamma1, gamma2, gamma3, beta, delta, c) G = gravity_parameters.gravitational_constant rho0 = gravity_parameters.background_density grav_scale = -4 * G * pi + # Note that `u_ode` is `u_gravity` in `rhs!` above @unpack u_ode, du_ode, u_tmp1_ode, u_tmp2_ode = cache u_tmp1_ode .= zero(eltype(u_tmp1_ode)) u_tmp2_ode .= u_ode du_gravity = wrap_array(du_ode, semi_gravity) for stage in eachindex(c) - t_stage = t + dt * c[stage] + tau_stage = tau + dtau * c[stage] # rhs! has the source term for the harmonic problem # We don't need a `@trixi_timeit timer() "rhs!"` here since that's already # included in the `rhs!` call. - rhs!(du_ode, u_ode, semi_gravity, t_stage) + rhs!(du_ode, u_ode, semi_gravity, tau_stage) # Source term: Jeans instability OR coupling convergence test OR blast wave # put in gravity source term proportional to Euler density # OBS! subtract off the background density ρ_0 around which the Jeans instability is perturbed + # Note: Adding to `du_gravity` is essentially adding to `du_ode`! @views @. du_gravity[1, .., :] += grav_scale * (u_euler[1, .., :] - rho0) delta_stage = delta[stage] gamma1_stage = gamma1[stage] gamma2_stage = gamma2[stage] gamma3_stage = gamma3[stage] - beta_stage_dt = beta[stage] * dt + beta_stage_dtau = beta[stage] * dtau @trixi_timeit timer() "Runge-Kutta step" begin @threaded for idx in eachindex(u_ode) + # See Algorithm 1 (3S* method) in Schlottke-Lakemper et al. (2020) u_tmp1_ode[idx] += delta_stage * u_ode[idx] u_ode[idx] = (gamma1_stage * u_ode[idx] + gamma2_stage * u_tmp1_ode[idx] + gamma3_stage * u_tmp2_ode[idx] + - beta_stage_dt * du_ode[idx]) + beta_stage_dtau * du_ode[idx]) end end end @@ -411,7 +421,8 @@ function timestep_gravity_3Sstar!(cache, u_euler, t, dt, gravity_parameters, return nothing end -function timestep_gravity_erk51_3Sstar!(cache, u_euler, t, dt, gravity_parameters, +# First-order, 5-stage, 3S*-storage optimized method +function timestep_gravity_erk51_3Sstar!(cache, u_euler, tau, dtau, gravity_parameters, semi_gravity) # New 3Sstar coefficients optimized for polynomials of degree polydeg=3 # and examples/parameters_hypdiff_lax_friedrichs.toml @@ -434,11 +445,13 @@ function timestep_gravity_erk51_3Sstar!(cache, u_euler, t, dt, gravity_parameter c = SVector(0.0000000000000000E+00, 1.9189497208340553E-01, 1.9580448818599061E-01, 2.4241635859769023E-01, 5.0728347557552977E-01) - timestep_gravity_3Sstar!(cache, u_euler, t, dt, gravity_parameters, semi_gravity, + timestep_gravity_3Sstar!(cache, u_euler, tau, dtau, gravity_parameters, + semi_gravity, gamma1, gamma2, gamma3, beta, delta, c) end -function timestep_gravity_erk52_3Sstar!(cache, u_euler, t, dt, gravity_parameters, +# Second-order, 5-stage, 3S*-storage optimized method +function timestep_gravity_erk52_3Sstar!(cache, u_euler, tau, dtau, gravity_parameters, semi_gravity) # New 3Sstar coefficients optimized for polynomials of degree polydeg=3 # and examples/parameters_hypdiff_lax_friedrichs.toml @@ -461,11 +474,13 @@ function timestep_gravity_erk52_3Sstar!(cache, u_euler, t, dt, gravity_parameter c = SVector(0.0000000000000000E+00, 4.5158640252832094E-01, 1.0221535725056414E+00, 1.4280257701954349E+00, 7.1581334196229851E-01) - timestep_gravity_3Sstar!(cache, u_euler, t, dt, gravity_parameters, semi_gravity, + timestep_gravity_3Sstar!(cache, u_euler, tau, dtau, gravity_parameters, + semi_gravity, gamma1, gamma2, gamma3, beta, delta, c) end -function timestep_gravity_erk53_3Sstar!(cache, u_euler, t, dt, gravity_parameters, +# Third-order, 5-stage, 3S*-storage optimized method +function timestep_gravity_erk53_3Sstar!(cache, u_euler, tau, dtau, gravity_parameters, semi_gravity) # New 3Sstar coefficients optimized for polynomials of degree polydeg=3 # and examples/parameters_hypdiff_lax_friedrichs.toml @@ -488,7 +503,8 @@ function timestep_gravity_erk53_3Sstar!(cache, u_euler, t, dt, gravity_parameter c = SVector(0.0000000000000000E+00, 8.4476964977404881E-02, 2.8110631488732202E-01, 5.7093842145029405E-01, 7.2999896418559662E-01) - timestep_gravity_3Sstar!(cache, u_euler, t, dt, gravity_parameters, semi_gravity, + timestep_gravity_3Sstar!(cache, u_euler, tau, dtau, gravity_parameters, + semi_gravity, gamma1, gamma2, gamma3, beta, delta, c) end From 987fa7e1b81d5a389398bcb23d8996d84acf920a Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Wed, 27 Nov 2024 09:47:12 +0100 Subject: [PATCH 10/41] Correct name (#2179) --- src/equations/lattice_boltzmann_2d.jl | 2 +- src/equations/lattice_boltzmann_3d.jl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/equations/lattice_boltzmann_2d.jl b/src/equations/lattice_boltzmann_2d.jl index bad417d9c84..920f862f436 100644 --- a/src/equations/lattice_boltzmann_2d.jl +++ b/src/equations/lattice_boltzmann_2d.jl @@ -58,7 +58,7 @@ The main sources for the base implementation were Raum**, Master Thesis, University of Cologne, 2018. 3. Dieter Hänel, **Molekulare Gasdynamik**, Springer-Verlag Berlin Heidelberg, 2004 [doi:10.1007/3-540-35047-0](https://doi.org/10.1007/3-540-35047-0) -4. Dieter Krüger et al., **The Lattice Boltzmann Method**, Springer International Publishing, 2017 +4. Timm Krüger et al., **The Lattice Boltzmann Method**, Springer International Publishing, 2017 [doi:10.1007/978-3-319-44649-3](https://doi.org/10.1007/978-3-319-44649-3) """ struct LatticeBoltzmannEquations2D{RealT <: Real, CollisionOp} <: diff --git a/src/equations/lattice_boltzmann_3d.jl b/src/equations/lattice_boltzmann_3d.jl index bf4c365e0fe..1451eb7cbc1 100644 --- a/src/equations/lattice_boltzmann_3d.jl +++ b/src/equations/lattice_boltzmann_3d.jl @@ -97,7 +97,7 @@ The main sources for the base implementation were Raum**, Master Thesis, University of Cologne, 2018. 3. Dieter Hänel, **Molekulare Gasdynamik**, Springer-Verlag Berlin Heidelberg, 2004 [doi:10.1007/3-540-35047-0](https://doi.org/10.1007/3-540-35047-0) -4. Dieter Krüger et al., **The Lattice Boltzmann Method**, Springer International Publishing, 2017 +4. Timm Krüger et al., **The Lattice Boltzmann Method**, Springer International Publishing, 2017 [doi:10.1007/978-3-319-44649-3](https://doi.org/10.1007/978-3-319-44649-3) """ struct LatticeBoltzmannEquations3D{RealT <: Real, CollisionOp} <: From 897c1cdf693321aa4b52e9737381661223d65645 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Wed, 27 Nov 2024 17:35:38 +0100 Subject: [PATCH 11/41] Navier-Stokes Test Case: Viscous Shock propagation (#2173) * 1D version viscous shock * style * Viscous Shock 2D, 3D * Apply suggestions from code review * update test vals * eliminate allocs LWR Greenlight * work on allocs * cut allocs * test vals * Update examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl Co-authored-by: Hendrik Ranocha * gamma func * elaborate mu, mu_bar --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Michael Schlottke-Lakemper --- .../elixir_navierstokes_viscous_shock.jl | 183 ++++++++++++++++++ .../elixir_navierstokes_viscous_shock.jl | 183 ++++++++++++++++++ .../elixir_traffic_flow_lwr_greenlight.jl | 6 +- .../elixir_navierstokes_viscous_shock.jl | 176 +++++++++++++++++ .../compressible_navier_stokes_1d.jl | 8 +- .../compressible_navier_stokes_2d.jl | 9 +- .../compressible_navier_stokes_3d.jl | 10 +- test/test_parabolic_1d.jl | 23 +++ test/test_parabolic_2d.jl | 25 +++ test/test_parabolic_3d.jl | 27 +++ 10 files changed, 645 insertions(+), 5 deletions(-) create mode 100644 examples/p4est_2d_dgsem/elixir_navierstokes_viscous_shock.jl create mode 100644 examples/p4est_3d_dgsem/elixir_navierstokes_viscous_shock.jl create mode 100644 examples/tree_1d_dgsem/elixir_navierstokes_viscous_shock.jl diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_viscous_shock.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_viscous_shock.jl new file mode 100644 index 00000000000..feec3d534d0 --- /dev/null +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_viscous_shock.jl @@ -0,0 +1,183 @@ +using OrdinaryDiffEq +using Trixi + +# This is the classic 1D viscous shock wave problem with analytical solution +# for a special value of the Prandtl number. +# The original references are: +# +# - R. Becker (1922) +# Stoßwelle und Detonation. +# [DOI: 10.1007/BF01329605](https://doi.org/10.1007/BF01329605) +# +# English translations: +# Impact waves and detonation. Part I. +# https://ntrs.nasa.gov/api/citations/19930090862/downloads/19930090862.pdf +# Impact waves and detonation. Part II. +# https://ntrs.nasa.gov/api/citations/19930090863/downloads/19930090863.pdf +# +# - M. Morduchow, P. A. Libby (1949) +# On a Complete Solution of the One-Dimensional Flow Equations +# of a Viscous, Head-Conducting, Compressible Gas +# [DOI: 10.2514/8.11882](https://doi.org/10.2514/8.11882) +# +# +# The particular problem considered here is described in +# - L. G. Margolin, J. M. Reisner, P. M. Jordan (2017) +# Entropy in self-similar shock profiles +# [DOI: 10.1016/j.ijnonlinmec.2017.07.003](https://doi.org/10.1016/j.ijnonlinmec.2017.07.003) + +### Fixed parameters ### + +# Special value for which nonlinear solver can be omitted +# Corresponds essentially to fixing the Mach number +alpha = 0.5 +# We want kappa = cp * mu = mu_bar to ensure constant enthalpy +prandtl_number() = 3 / 4 + +### Free choices: ### +gamma() = 5 / 3 + +# In Margolin et al., the Navier-Stokes equations are given for an +# isotropic stress tensor τ, i.e., ∇ ⋅ τ = μ Δu +mu_isotropic() = 0.15 +mu_bar() = mu_isotropic() / (gamma() - 1) # Re-scaled viscosity + +rho_0() = 1 +v() = 1 # Shock speed + +domain_length = 4.0 + +### Derived quantities ### + +Ma() = 2 / sqrt(3 - gamma()) # Mach number for alpha = 0.5 +c_0() = v() / Ma() # Speed of sound ahead of the shock + +# From constant enthalpy condition +p_0() = c_0()^2 * rho_0() / gamma() + +l() = mu_bar() / (rho_0() * v()) * 2 * gamma() / (gamma() + 1) # Appropriate length scale + +""" + initial_condition_viscous_shock(x, t, equations) + +Classic 1D viscous shock wave problem with analytical solution +for a special value of the Prandtl number. +The version implemented here is described in +- L. G. Margolin, J. M. Reisner, P. M. Jordan (2017) + Entropy in self-similar shock profiles + [DOI: 10.1016/j.ijnonlinmec.2017.07.003](https://doi.org/10.1016/j.ijnonlinmec.2017.07.003) +""" +function initial_condition_viscous_shock(x, t, equations) + y = x[1] - v() * t # Translated coordinate + + # Coordinate transformation. See eq. (33) in Margolin et al. (2017) + chi = 2 * exp(y / (2 * l())) + + w = 1 + 1 / (2 * chi^2) * (1 - sqrt(1 + 2 * chi^2)) + + rho = rho_0() / w + u = v() * (1 - w) + p = p_0() * 1 / w * (1 + (gamma() - 1) / 2 * Ma()^2 * (1 - w^2)) + + return prim2cons(SVector(rho, u, 0, p), equations) +end +initial_condition = initial_condition_viscous_shock + +############################################################################### +# semidiscretization of the ideal compressible Navier-Stokes equations + +equations = CompressibleEulerEquations2D(gamma()) + +# Trixi implements the stress tensor in deviatoric form, thus we need to +# convert the "isotropic viscosity" to the "deviatoric viscosity" +mu_deviatoric() = mu_bar() * 3 / 4 +equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu_deviatoric(), + Prandtl = prandtl_number(), + gradient_variables = GradientVariablesPrimitive()) + +solver = DGSEM(polydeg = 3, surface_flux = flux_hlle) + +coordinates_min = (-domain_length / 2, -domain_length / 2) +coordinates_max = (domain_length / 2, domain_length / 2) + +trees_per_dimension = (8, 2) +mesh = P4estMesh(trees_per_dimension, + polydeg = 3, initial_refinement_level = 0, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = (false, true)) + +### Inviscid boundary conditions ### + +# Prescribe pure influx based on initial conditions +function boundary_condition_inflow(u_inner, normal_direction::AbstractVector, x, t, + surface_flux_function, + equations::CompressibleEulerEquations2D) + u_cons = initial_condition_viscous_shock(x, t, equations) + flux = Trixi.flux(u_cons, normal_direction, equations) + + return flux +end + +# Completely free outflow +function boundary_condition_outflow(u_inner, normal_direction::AbstractVector, x, t, + surface_flux_function, + equations::CompressibleEulerEquations2D) + # Calculate the boundary flux entirely from the internal solution state + flux = Trixi.flux(u_inner, normal_direction, equations) + + return flux +end + +boundary_conditions = Dict(:x_neg => boundary_condition_inflow, + :x_pos => boundary_condition_outflow) + +### Viscous boundary conditions ### +# For the viscous BCs, we use the known analytical solution +velocity_bc = NoSlip() do x, t, equations_parabolic + Trixi.velocity(initial_condition_viscous_shock(x, + t, + equations_parabolic), + equations_parabolic) +end + +heat_bc = Isothermal() do x, t, equations_parabolic + Trixi.temperature(initial_condition_viscous_shock(x, + t, + equations_parabolic), + equations_parabolic) +end + +boundary_condition_parabolic = BoundaryConditionNavierStokesWall(velocity_bc, heat_bc) + +boundary_conditions_parabolic = Dict(:x_neg => boundary_condition_parabolic, + :x_pos => boundary_condition_parabolic) + +semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), + initial_condition, solver; + boundary_conditions = (boundary_conditions, + boundary_conditions_parabolic)) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span `tspan` +tspan = (0.0, 0.5) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +alive_callback = AliveCallback(alive_interval = 10) + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback) + +############################################################################### +# run the simulation + +time_int_tol = 1e-8 +sol = solve(ode, RDPK3SpFSAL49(); abstol = time_int_tol, reltol = time_int_tol, + dt = 1e-3, ode_default_options()..., callback = callbacks) + +summary_callback() # print the timer summary diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_viscous_shock.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_viscous_shock.jl new file mode 100644 index 00000000000..43cdafa3753 --- /dev/null +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_viscous_shock.jl @@ -0,0 +1,183 @@ +using OrdinaryDiffEq +using Trixi + +# This is the classic 1D viscous shock wave problem with analytical solution +# for a special value of the Prandtl number. +# The original references are: +# +# - R. Becker (1922) +# Stoßwelle und Detonation. +# [DOI: 10.1007/BF01329605](https://doi.org/10.1007/BF01329605) +# +# English translations: +# Impact waves and detonation. Part I. +# https://ntrs.nasa.gov/api/citations/19930090862/downloads/19930090862.pdf +# Impact waves and detonation. Part II. +# https://ntrs.nasa.gov/api/citations/19930090863/downloads/19930090863.pdf +# +# - M. Morduchow, P. A. Libby (1949) +# On a Complete Solution of the One-Dimensional Flow Equations +# of a Viscous, Head-Conducting, Compressible Gas +# [DOI: 10.2514/8.11882](https://doi.org/10.2514/8.11882) +# +# +# The particular problem considered here is described in +# - L. G. Margolin, J. M. Reisner, P. M. Jordan (2017) +# Entropy in self-similar shock profiles +# [DOI: 10.1016/j.ijnonlinmec.2017.07.003](https://doi.org/10.1016/j.ijnonlinmec.2017.07.003) + +### Fixed parameters ### + +# Special value for which nonlinear solver can be omitted +# Corresponds essentially to fixing the Mach number +alpha = 0.5 +# We want kappa = cp * mu = mu_bar to ensure constant enthalpy +prandtl_number() = 3 / 4 + +### Free choices: ### +gamma() = 5 / 3 + +# In Margolin et al., the Navier-Stokes equations are given for an +# isotropic stress tensor τ, i.e., ∇ ⋅ τ = μ Δu +mu_isotropic() = 0.15 +mu_bar() = mu_isotropic() / (gamma() - 1) # Re-scaled viscosity + +rho_0() = 1 +v() = 1 # Shock speed + +domain_length = 4.0 + +### Derived quantities ### + +Ma() = 2 / sqrt(3 - gamma()) # Mach number for alpha = 0.5 +c_0() = v() / Ma() # Speed of sound ahead of the shock + +# From constant enthalpy condition +p_0() = c_0()^2 * rho_0() / gamma() + +l() = mu_bar() / (rho_0() * v()) * 2 * gamma() / (gamma() + 1) # Appropriate length scale + +""" + initial_condition_viscous_shock(x, t, equations) + +Classic 1D viscous shock wave problem with analytical solution +for a special value of the Prandtl number. +The version implemented here is described in +- L. G. Margolin, J. M. Reisner, P. M. Jordan (2017) + Entropy in self-similar shock profiles + [DOI: 10.1016/j.ijnonlinmec.2017.07.003](https://doi.org/10.1016/j.ijnonlinmec.2017.07.003) +""" +function initial_condition_viscous_shock(x, t, equations) + y = x[1] - v() * t # Translated coordinate + + # Coordinate transformation. See eq. (33) in Margolin et al. (2017) + chi = 2 * exp(y / (2 * l())) + + w = 1 + 1 / (2 * chi^2) * (1 - sqrt(1 + 2 * chi^2)) + + rho = rho_0() / w + u = v() * (1 - w) + p = p_0() * 1 / w * (1 + (gamma() - 1) / 2 * Ma()^2 * (1 - w^2)) + + return prim2cons(SVector(rho, u, 0, 0, p), equations) +end +initial_condition = initial_condition_viscous_shock + +############################################################################### +# semidiscretization of the ideal compressible Navier-Stokes equations + +equations = CompressibleEulerEquations3D(gamma()) + +# Trixi implements the stress tensor in deviatoric form, thus we need to +# convert the "isotropic viscosity" to the "deviatoric viscosity" +mu_deviatoric() = mu_bar() * 3 / 4 +equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu_deviatoric(), + Prandtl = prandtl_number(), + gradient_variables = GradientVariablesPrimitive()) + +solver = DGSEM(polydeg = 3, surface_flux = flux_hlle) + +coordinates_min = (-domain_length / 2, -domain_length / 2, -domain_length / 2) +coordinates_max = (domain_length / 2, domain_length / 2, domain_length / 2) + +trees_per_dimension = (8, 2, 2) +mesh = P4estMesh(trees_per_dimension, + polydeg = 3, initial_refinement_level = 0, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = (false, true, true)) + +### Inviscid boundary conditions ### + +# Prescribe pure influx based on initial conditions +function boundary_condition_inflow(u_inner, normal_direction::AbstractVector, x, t, + surface_flux_function, + equations::CompressibleEulerEquations3D) + u_cons = initial_condition_viscous_shock(x, t, equations) + flux = Trixi.flux(u_cons, normal_direction, equations) + + return flux +end + +# Completely free outflow +function boundary_condition_outflow(u_inner, normal_direction::AbstractVector, x, t, + surface_flux_function, + equations::CompressibleEulerEquations3D) + # Calculate the boundary flux entirely from the internal solution state + flux = Trixi.flux(u_inner, normal_direction, equations) + + return flux +end + +boundary_conditions = Dict(:x_neg => boundary_condition_inflow, + :x_pos => boundary_condition_outflow) + +### Viscous boundary conditions ### +# For the viscous BCs, we use the known analytical solution +velocity_bc = NoSlip() do x, t, equations_parabolic + Trixi.velocity(initial_condition_viscous_shock(x, + t, + equations_parabolic), + equations_parabolic) +end + +heat_bc = Isothermal() do x, t, equations_parabolic + Trixi.temperature(initial_condition_viscous_shock(x, + t, + equations_parabolic), + equations_parabolic) +end + +boundary_condition_parabolic = BoundaryConditionNavierStokesWall(velocity_bc, heat_bc) + +boundary_conditions_parabolic = Dict(:x_neg => boundary_condition_parabolic, + :x_pos => boundary_condition_parabolic) + +semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), + initial_condition, solver; + boundary_conditions = (boundary_conditions, + boundary_conditions_parabolic)) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span `tspan` +tspan = (0.0, 0.5) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +alive_callback = AliveCallback(alive_interval = 10) + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback) + +############################################################################### +# run the simulation + +time_int_tol = 1e-8 +sol = solve(ode, RDPK3SpFSAL49(); abstol = time_int_tol, reltol = time_int_tol, + dt = 1e-3, ode_default_options()..., callback = callbacks) + +summary_callback() # print the timer summary diff --git a/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl b/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl index 3cb4735e132..6b7dd007128 100644 --- a/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl +++ b/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl @@ -19,7 +19,8 @@ mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max, # Green light that at x = 0 which switches at t = 0 from red to green. # To the left there are cars bumper to bumper, to the right there are no cars. function initial_condition_greenlight(x, t, equation::TrafficFlowLWREquations1D) - scalar = x[1] < 0.0 ? 1.0 : 0.0 + RealT = eltype(x) + scalar = x[1] < 0 ? one(RealT) : zero(RealT) return SVector(scalar) end @@ -29,7 +30,8 @@ end # Assume that there are always cars waiting at the left function inflow(x, t, equations::TrafficFlowLWREquations1D) - return initial_condition_greenlight(coordinates_min, t, equations) + # -1.0 = coordinates_min + return initial_condition_greenlight(-1.0, t, equations) end boundary_condition_inflow = BoundaryConditionDirichlet(inflow) diff --git a/examples/tree_1d_dgsem/elixir_navierstokes_viscous_shock.jl b/examples/tree_1d_dgsem/elixir_navierstokes_viscous_shock.jl new file mode 100644 index 00000000000..df6a453ac83 --- /dev/null +++ b/examples/tree_1d_dgsem/elixir_navierstokes_viscous_shock.jl @@ -0,0 +1,176 @@ +using OrdinaryDiffEq +using Trixi + +# This is the classic 1D viscous shock wave problem with analytical solution +# for a special value of the Prandtl number. +# The original references are: +# +# - R. Becker (1922) +# Stoßwelle und Detonation. +# [DOI: 10.1007/BF01329605](https://doi.org/10.1007/BF01329605) +# +# English translations: +# Impact waves and detonation. Part I. +# https://ntrs.nasa.gov/api/citations/19930090862/downloads/19930090862.pdf +# Impact waves and detonation. Part II. +# https://ntrs.nasa.gov/api/citations/19930090863/downloads/19930090863.pdf +# +# - M. Morduchow, P. A. Libby (1949) +# On a Complete Solution of the One-Dimensional Flow Equations +# of a Viscous, Head-Conducting, Compressible Gas +# [DOI: 10.2514/8.11882](https://doi.org/10.2514/8.11882) +# +# +# The particular problem considered here is described in +# - L. G. Margolin, J. M. Reisner, P. M. Jordan (2017) +# Entropy in self-similar shock profiles +# [DOI: 10.1016/j.ijnonlinmec.2017.07.003](https://doi.org/10.1016/j.ijnonlinmec.2017.07.003) + +### Fixed parameters ### + +# Special value for which nonlinear solver can be omitted +# Corresponds essentially to fixing the Mach number +alpha = 0.5 +# We want kappa = cp * mu = mu_bar to ensure constant enthalpy +prandtl_number() = 1 + +### Free choices: ### +gamma() = 5 / 3 + +mu() = 0.15 +mu_bar() = mu() / (gamma() - 1) # Re-scaled viscosity + +rho_0() = 1 +v() = 1 # Shock speed + +domain_length = 4.0 + +### Derived quantities ### + +Ma() = 2 / sqrt(3 - gamma()) # Mach number for alpha = 0.5 +c_0() = v() / Ma() # Speed of sound ahead of the shock + +# From constant enthalpy condition +p_0() = c_0()^2 * rho_0() / gamma() + +l() = mu_bar() / (rho_0() * v()) * 2 * gamma() / (gamma() + 1) # Appropriate length scale + +""" + initial_condition_viscous_shock(x, t, equations) + +Classic 1D viscous shock wave problem with analytical solution +for a special value of the Prandtl number. +The version implemented here is described in +- L. G. Margolin, J. M. Reisner, P. M. Jordan (2017) + Entropy in self-similar shock profiles + [DOI: 10.1016/j.ijnonlinmec.2017.07.003](https://doi.org/10.1016/j.ijnonlinmec.2017.07.003) +""" +function initial_condition_viscous_shock(x, t, equations) + y = x[1] - v() * t # Translated coordinate + + # Coordinate transformation. See eq. (33) in Margolin et al. (2017) + chi = 2 * exp(y / (2 * l())) + + w = 1 + 1 / (2 * chi^2) * (1 - sqrt(1 + 2 * chi^2)) + + rho = rho_0() / w + u = v() * (1 - w) + p = p_0() * 1 / w * (1 + (gamma() - 1) / 2 * Ma()^2 * (1 - w^2)) + + return prim2cons(SVector(rho, u, p), equations) +end +initial_condition = initial_condition_viscous_shock + +############################################################################### +# semidiscretization of the ideal compressible Navier-Stokes equations + +equations = CompressibleEulerEquations1D(gamma()) +equations_parabolic = CompressibleNavierStokesDiffusion1D(equations, mu = mu_bar(), + Prandtl = prandtl_number(), + gradient_variables = GradientVariablesPrimitive()) + +solver = DGSEM(polydeg = 3, surface_flux = flux_hlle) + +coordinates_min = -domain_length / 2 +coordinates_max = domain_length / 2 + +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + periodicity = false, + n_cells_max = 30_000) + +### Inviscid boundary conditions ### + +# Prescribe pure influx based on initial conditions +function boundary_condition_inflow(u_inner, orientation::Integer, normal_direction, x, t, + surface_flux_function, + equations::CompressibleEulerEquations1D) + u_cons = initial_condition_viscous_shock(x, t, equations) + flux = Trixi.flux(u_cons, orientation, equations) + + return flux +end + +# Completely free outflow +function boundary_condition_outflow(u_inner, orientation::Integer, normal_direction, x, t, + surface_flux_function, + equations::CompressibleEulerEquations1D) + # Calculate the boundary flux entirely from the internal solution state + flux = Trixi.flux(u_inner, orientation, equations) + + return flux +end + +boundary_conditions = (; x_neg = boundary_condition_inflow, + x_pos = boundary_condition_outflow) + +### Viscous boundary conditions ### +# For the viscous BCs, we use the known analytical solution +velocity_bc = NoSlip() do x, t, equations_parabolic + Trixi.velocity(initial_condition_viscous_shock(x, + t, + equations_parabolic), + equations_parabolic) +end + +heat_bc = Isothermal() do x, t, equations_parabolic + Trixi.temperature(initial_condition_viscous_shock(x, + t, + equations_parabolic), + equations_parabolic) +end + +boundary_condition_parabolic = BoundaryConditionNavierStokesWall(velocity_bc, heat_bc) + +boundary_conditions_parabolic = (; x_neg = boundary_condition_parabolic, + x_pos = boundary_condition_parabolic) + +semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), + initial_condition, solver; + boundary_conditions = (boundary_conditions, + boundary_conditions_parabolic)) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span `tspan` +tspan = (0.0, 0.5) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +alive_callback = AliveCallback(alive_interval = 100) + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback) + +############################################################################### +# run the simulation + +time_int_tol = 1e-8 +sol = solve(ode, RDPK3SpFSAL49(); abstol = time_int_tol, reltol = time_int_tol, + dt = 1e-3, ode_default_options()..., callback = callbacks) + +summary_callback() # print the timer summary diff --git a/src/equations/compressible_navier_stokes_1d.jl b/src/equations/compressible_navier_stokes_1d.jl index 024ce2d7e87..fe79a7e0590 100644 --- a/src/equations/compressible_navier_stokes_1d.jl +++ b/src/equations/compressible_navier_stokes_1d.jl @@ -148,7 +148,7 @@ end function flux(u, gradients, orientation::Integer, equations::CompressibleNavierStokesDiffusion1D) # Here, `u` is assumed to be the "transformed" variables specified by `gradient_variable_transformation`. - rho, v1, _ = convert_transformed_to_primitive(u, equations) + _, v1, _ = convert_transformed_to_primitive(u, equations) # Here `gradients` is assumed to contain the gradients of the primitive variables (rho, v1, v2, T) # either computed directly or reverse engineered from the gradient of the entropy variables # by way of the `convert_gradient_variables` function. @@ -257,6 +257,12 @@ end return T end +@inline function velocity(u, equations::CompressibleNavierStokesDiffusion1D) + rho = u[1] + v1 = u[2] / rho + return v1 +end + @inline function (boundary_condition::BoundaryConditionNavierStokesWall{<:NoSlip, <:Adiabatic})(flux_inner, u_inner, diff --git a/src/equations/compressible_navier_stokes_2d.jl b/src/equations/compressible_navier_stokes_2d.jl index 5708abb4e00..977dae18a43 100644 --- a/src/equations/compressible_navier_stokes_2d.jl +++ b/src/equations/compressible_navier_stokes_2d.jl @@ -148,7 +148,7 @@ end function flux(u, gradients, orientation::Integer, equations::CompressibleNavierStokesDiffusion2D) # Here, `u` is assumed to be the "transformed" variables specified by `gradient_variable_transformation`. - rho, v1, v2, _ = convert_transformed_to_primitive(u, equations) + _, v1, v2, _ = convert_transformed_to_primitive(u, equations) # Here `gradients` is assumed to contain the gradients of the primitive variables (rho, v1, v2, T) # either computed directly or reverse engineered from the gradient of the entropy variables # by way of the `convert_gradient_variables` function. @@ -281,6 +281,13 @@ end return T end +@inline function velocity(u, equations::CompressibleNavierStokesDiffusion2D) + rho = u[1] + v1 = u[2] / rho + v2 = u[3] / rho + return SVector(v1, v2) +end + @inline function enstrophy(u, gradients, equations::CompressibleNavierStokesDiffusion2D) # Enstrophy is 0.5 rho ω⋅ω where ω = ∇ × v diff --git a/src/equations/compressible_navier_stokes_3d.jl b/src/equations/compressible_navier_stokes_3d.jl index 9622882fc1a..551462e7595 100644 --- a/src/equations/compressible_navier_stokes_3d.jl +++ b/src/equations/compressible_navier_stokes_3d.jl @@ -148,7 +148,7 @@ end function flux(u, gradients, orientation::Integer, equations::CompressibleNavierStokesDiffusion3D) # Here, `u` is assumed to be the "transformed" variables specified by `gradient_variable_transformation`. - rho, v1, v2, v3, _ = convert_transformed_to_primitive(u, equations) + _, v1, v2, v3, _ = convert_transformed_to_primitive(u, equations) # Here `gradients` is assumed to contain the gradients of the primitive variables (rho, v1, v2, v3, T) # either computed directly or reverse engineered from the gradient of the entropy variables # by way of the `convert_gradient_variables` function. @@ -309,6 +309,14 @@ end return T end +@inline function velocity(u, equations::CompressibleNavierStokesDiffusion3D) + rho = u[1] + v1 = u[2] / rho + v2 = u[3] / rho + v3 = u[4] / rho + return SVector(v1, v2, v3) +end + @inline function enstrophy(u, gradients, equations::CompressibleNavierStokesDiffusion3D) # Enstrophy is 0.5 rho ω⋅ω where ω = ∇ × v diff --git a/test/test_parabolic_1d.jl b/test/test_parabolic_1d.jl index 7de57a45178..c640be3c8b8 100644 --- a/test/test_parabolic_1d.jl +++ b/test/test_parabolic_1d.jl @@ -228,6 +228,29 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "TreeMesh1D: elixir_navierstokes_viscous_shock.jl" begin + @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", + "elixir_navierstokes_viscous_shock.jl"), + l2=[ + 0.00025762354103445303, + 0.0001433692781569829, + 0.00017369861968287976 + ], + linf=[ + 0.0016731940030498826, + 0.0010638575921477766, + 0.0011495207677434394 + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end # Clean up afterwards: delete Trixi output directory diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl index dc3e776154f..62142e1fc3b 100644 --- a/test/test_parabolic_2d.jl +++ b/test/test_parabolic_2d.jl @@ -760,6 +760,31 @@ end @test isapprox(drag_f, 1.5427441885921553, atol = 1e-13) @test isapprox(lift_f, 0.005621910087395724, atol = 1e-13) end + +@trixi_testset "P4estMesh2D: elixir_navierstokes_viscous_shock.jl" begin + @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + "elixir_navierstokes_viscous_shock.jl"), + l2=[ + 0.0002576236264053728, + 0.00014336949098706463, + 7.189100338239794e-17, + 0.00017369905124642074 + ], + linf=[ + 0.0016731940983241156, + 0.0010638640749656147, + 5.59044079947959e-16, + 0.001149532023891009 + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end # Clean up afterwards: delete Trixi.jl output directory diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl index 99ad05c7f70..9b7cb8a75ce 100644 --- a/test/test_parabolic_3d.jl +++ b/test/test_parabolic_3d.jl @@ -503,6 +503,33 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "P4estMesh3D: elixir_navierstokes_viscous_shock.jl" begin + @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", + "elixir_navierstokes_viscous_shock.jl"), + l2=[ + 0.0002576235461250765, + 0.0001433693418567713, + 1.5583069105517042e-16, + 1.257551423107977e-16, + 0.00017369872990116004 + ], + linf=[ + 0.0016731930282756213, + 0.0010638586882356638, + 2.738015991633e-15, + 3.281831854493919e-15, + 0.0011495231318404686 + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end # Clean up afterwards: delete Trixi.jl output directory From 824f7a8a5e433c8deb29245bf9a79c3558bce397 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Thu, 28 Nov 2024 18:20:38 +0100 Subject: [PATCH 12/41] Time StepCallbacks for custom integrators (#2182) --- src/time_integration/methods_2N.jl | 14 ++++++++------ src/time_integration/methods_3Sstar.jl | 14 ++++++++------ src/time_integration/methods_SSP.jl | 15 +++++++++------ 3 files changed, 25 insertions(+), 18 deletions(-) diff --git a/src/time_integration/methods_2N.jl b/src/time_integration/methods_2N.jl index 1143e4ecb93..fb2847e1c91 100644 --- a/src/time_integration/methods_2N.jl +++ b/src/time_integration/methods_2N.jl @@ -189,13 +189,15 @@ function step!(integrator::SimpleIntegrator2N) integrator.iter += 1 integrator.t += integrator.dt - # handle callbacks - if callbacks isa CallbackSet - foreach(callbacks.discrete_callbacks) do cb - if cb.condition(integrator.u, integrator.t, integrator) - cb.affect!(integrator) + @trixi_timeit timer() "Step-Callbacks" begin + # handle callbacks + if callbacks isa CallbackSet + foreach(callbacks.discrete_callbacks) do cb + if cb.condition(integrator.u, integrator.t, integrator) + cb.affect!(integrator) + end + return nothing end - return nothing end end diff --git a/src/time_integration/methods_3Sstar.jl b/src/time_integration/methods_3Sstar.jl index a3ab023b64b..f197ef60902 100644 --- a/src/time_integration/methods_3Sstar.jl +++ b/src/time_integration/methods_3Sstar.jl @@ -265,13 +265,15 @@ function step!(integrator::SimpleIntegrator3Sstar) integrator.iter += 1 integrator.t += integrator.dt - # handle callbacks - if callbacks isa CallbackSet - foreach(callbacks.discrete_callbacks) do cb - if cb.condition(integrator.u, integrator.t, integrator) - cb.affect!(integrator) + @trixi_timeit timer() "Step-Callbacks" begin + # handle callbacks + if callbacks isa CallbackSet + foreach(callbacks.discrete_callbacks) do cb + if cb.condition(integrator.u, integrator.t, integrator) + cb.affect!(integrator) + end + return nothing end - return nothing end end diff --git a/src/time_integration/methods_SSP.jl b/src/time_integration/methods_SSP.jl index ffbc325c82a..33d1f164138 100644 --- a/src/time_integration/methods_SSP.jl +++ b/src/time_integration/methods_SSP.jl @@ -39,7 +39,7 @@ struct SimpleSSPRK33{StageCallbacks} <: SimpleAlgorithmSSP c = SVector(0.0, 1.0, 1 / 2) # Butcher tableau - # c | a + # c | A # 0 | # 1 | 1 # 1/2 | 1/4 1/4 @@ -208,11 +208,14 @@ function solve!(integrator::SimpleIntegratorSSP) integrator.iter += 1 integrator.t += integrator.dt - # handle callbacks - if callbacks isa CallbackSet - foreach(callbacks.discrete_callbacks) do cb - if cb.condition(integrator.u, integrator.t, integrator) - cb.affect!(integrator) + @trixi_timeit timer() "Step-Callbacks" begin + # handle callbacks + if callbacks isa CallbackSet + foreach(callbacks.discrete_callbacks) do cb + if cb.condition(integrator.u, integrator.t, integrator) + cb.affect!(integrator) + end + return nothing end end end From 863795efc58dd9dd4d87c462565bad7a06d860cb Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Sun, 1 Dec 2024 17:10:20 +0100 Subject: [PATCH 13/41] More efficient PERK implementation (#2180) * More efficient PERK implementation * fmt * test vals * minor changes * consistency * change memory layout * Time also PERK3 * remove unnecessary stuff * test resize perk3 * make algorithms immutable --- .../elixir_burgers_perk3.jl | 4 +- .../tree_1d_dgsem/elixir_advection_perk2.jl | 2 +- ext/TrixiConvexECOSExt.jl | 2 +- ext/TrixiNLsolveExt.jl | 2 +- .../methods_PERK2.jl | 75 ++++------- .../methods_PERK3.jl | 122 ++++++++---------- .../paired_explicit_runge_kutta.jl | 32 ++++- test/test_structured_1d.jl | 13 +- test/test_unit.jl | 8 +- 9 files changed, 124 insertions(+), 136 deletions(-) diff --git a/examples/structured_1d_dgsem/elixir_burgers_perk3.jl b/examples/structured_1d_dgsem/elixir_burgers_perk3.jl index bf91fde74ea..5ff3aff3678 100644 --- a/examples/structured_1d_dgsem/elixir_burgers_perk3.jl +++ b/examples/structured_1d_dgsem/elixir_burgers_perk3.jl @@ -1,9 +1,9 @@ # Convex and ECOS are imported because they are used for finding the optimal time step and optimal -# monomial coefficients in the stability polynomial of P-ERK time integrators. +# monomial coefficients in the stability polynomial of PERK time integrators. using Convex, ECOS # NLsolve is imported to solve the system of nonlinear equations to find the coefficients -# in the Butcher tableau in the third order P-ERK time integrator. +# in the Butcher tableau in the third order PERK time integrator. using NLsolve using OrdinaryDiffEq diff --git a/examples/tree_1d_dgsem/elixir_advection_perk2.jl b/examples/tree_1d_dgsem/elixir_advection_perk2.jl index 69b587b42ec..86b173d6564 100644 --- a/examples/tree_1d_dgsem/elixir_advection_perk2.jl +++ b/examples/tree_1d_dgsem/elixir_advection_perk2.jl @@ -1,6 +1,6 @@ # Convex and ECOS are imported because they are used for finding the optimal time step and optimal -# monomial coefficients in the stability polynomial of P-ERK time integrators. +# monomial coefficients in the stability polynomial of PERK time integrators. using Convex, ECOS using OrdinaryDiffEq diff --git a/ext/TrixiConvexECOSExt.jl b/ext/TrixiConvexECOSExt.jl index 9b897436dbb..a83ac0a524f 100644 --- a/ext/TrixiConvexECOSExt.jl +++ b/ext/TrixiConvexECOSExt.jl @@ -1,7 +1,7 @@ # Package extension for adding Convex-based features to Trixi.jl module TrixiConvexECOSExt -# Required for coefficient optimization in P-ERK scheme integrators +# Required for coefficient optimization in PERK scheme integrators if isdefined(Base, :get_extension) using Convex: MOI, solve!, Variable, minimize, evaluate using ECOS: Optimizer diff --git a/ext/TrixiNLsolveExt.jl b/ext/TrixiNLsolveExt.jl index fa188d04c71..2e6bca0ba7b 100644 --- a/ext/TrixiNLsolveExt.jl +++ b/ext/TrixiNLsolveExt.jl @@ -2,7 +2,7 @@ module TrixiNLsolveExt # Required for finding coefficients in Butcher tableau in the third order of -# P-ERK scheme integrators +# PERK scheme integrators if isdefined(Base, :get_extension) using NLsolve: nlsolve else diff --git a/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl b/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl index 2451680a505..37d994dc9bf 100644 --- a/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl +++ b/src/time_integration/paired_explicit_runge_kutta/methods_PERK2.jl @@ -34,8 +34,8 @@ function compute_PairedExplicitRK2_butcher_tableau(num_stages, eig_vals, tspan, # - 2 Since first entry of A is always zero (explicit method) and second is given by c_2 (consistency) coeffs_max = num_stages - 2 - a_matrix = zeros(coeffs_max, 2) - a_matrix[:, 1] = c[3:end] + a_matrix = zeros(2, coeffs_max) + a_matrix[1, :] = c[3:end] consistency_order = 2 @@ -56,8 +56,8 @@ function compute_PairedExplicitRK2_butcher_tableau(num_stages, eig_vals, tspan, num_monomial_coeffs = length(monomial_coeffs) @assert num_monomial_coeffs == coeffs_max A = compute_a_coeffs(num_stages, stage_scaling_factors, monomial_coeffs) - a_matrix[:, 1] -= A - a_matrix[:, 2] = A + a_matrix[1, :] -= A + a_matrix[2, :] = A end return a_matrix, c, dt_opt @@ -78,8 +78,8 @@ function compute_PairedExplicitRK2_butcher_tableau(num_stages, # - 2 Since first entry of A is always zero (explicit method) and second is given by c_2 (consistency) coeffs_max = num_stages - 2 - a_matrix = zeros(coeffs_max, 2) - a_matrix[:, 1] = c[3:end] + a_matrix = zeros(2, coeffs_max) + a_matrix[1, :] = c[3:end] path_monomial_coeffs = joinpath(base_path_monomial_coeffs, "gamma_" * string(num_stages) * ".txt") @@ -91,8 +91,8 @@ function compute_PairedExplicitRK2_butcher_tableau(num_stages, @assert num_monomial_coeffs == coeffs_max A = compute_a_coeffs(num_stages, stage_scaling_factors, monomial_coeffs) - a_matrix[:, 1] -= A - a_matrix[:, 2] = A + a_matrix[1, :] -= A + a_matrix[2, :] = A return a_matrix, c end @@ -131,16 +131,17 @@ optimized for a certain simulation setup (PDE, IC & BC, Riemann Solver, DG Solve Note: To use this integrator, the user must import the `Convex` and `ECOS` packages. """ -mutable struct PairedExplicitRK2 <: AbstractPairedExplicitRKSingle - const num_stages::Int +struct PairedExplicitRK2 <: AbstractPairedExplicitRKSingle + num_stages::Int a_matrix::Matrix{Float64} c::Vector{Float64} b1::Float64 bS::Float64 cS::Float64 + dt_opt::Union{Float64, Nothing} -end # struct PairedExplicitRK2 +end # Constructor that reads the coefficients from a file function PairedExplicitRK2(num_stages, base_path_monomial_coeffs::AbstractString, @@ -200,9 +201,8 @@ mutable struct PairedExplicitRK2Integrator{RealT <: Real, uType, Params, Sol, F, finalstep::Bool # added for convenience dtchangeable::Bool force_stepfail::Bool - # PairedExplicitRK2 stages: + # Additional PERK register k1::uType - k_higher::uType end function init(ode::ODEProblem, alg::PairedExplicitRK2; @@ -211,9 +211,7 @@ function init(ode::ODEProblem, alg::PairedExplicitRK2; du = zero(u0) u_tmp = zero(u0) - # PairedExplicitRK2 stages - k1 = zero(u0) - k_higher = zero(u0) + k1 = zero(u0) # Additional PERK register t0 = first(ode.tspan) tdir = sign(ode.tspan[end] - ode.tspan[1]) @@ -226,7 +224,7 @@ function init(ode::ODEProblem, alg::PairedExplicitRK2; ode.tspan; kwargs...), false, true, false, - k1, k_higher) + k1) # initialize callbacks if callback isa CallbackSet @@ -262,48 +260,21 @@ function step!(integrator::PairedExplicitRK2Integrator) end @trixi_timeit timer() "Paired Explicit Runge-Kutta ODE integration step" begin - # k1 - integrator.f(integrator.du, integrator.u, prob.p, integrator.t) - @threaded for i in eachindex(integrator.du) - integrator.k1[i] = integrator.du[i] * integrator.dt - end - - # Construct current state - @threaded for i in eachindex(integrator.u) - integrator.u_tmp[i] = integrator.u[i] + alg.c[2] * integrator.k1[i] - end - # k2 - integrator.f(integrator.du, integrator.u_tmp, prob.p, - integrator.t + alg.c[2] * integrator.dt) - - @threaded for i in eachindex(integrator.du) - integrator.k_higher[i] = integrator.du[i] * integrator.dt - end + # First and second stage are identical across all single/standalone PERK methods + PERK_k1!(integrator, prob.p) + PERK_k2!(integrator, prob.p, alg) # Higher stages for stage in 3:(alg.num_stages) - # Construct current state - @threaded for i in eachindex(integrator.u) - integrator.u_tmp[i] = integrator.u[i] + - alg.a_matrix[stage - 2, 1] * - integrator.k1[i] + - alg.a_matrix[stage - 2, 2] * - integrator.k_higher[i] - end - - integrator.f(integrator.du, integrator.u_tmp, prob.p, - integrator.t + alg.c[stage] * integrator.dt) - - @threaded for i in eachindex(integrator.du) - integrator.k_higher[i] = integrator.du[i] * integrator.dt - end + PERK_ki!(integrator, prob.p, alg, stage) end @threaded for i in eachindex(integrator.u) - integrator.u[i] += alg.b1 * integrator.k1[i] + - alg.bS * integrator.k_higher[i] + integrator.u[i] += integrator.dt * + (alg.b1 * integrator.k1[i] + + alg.bS * integrator.du[i]) end - end # PairedExplicitRK2 step + end integrator.iter += 1 integrator.t += integrator.dt diff --git a/src/time_integration/paired_explicit_runge_kutta/methods_PERK3.jl b/src/time_integration/paired_explicit_runge_kutta/methods_PERK3.jl index 02bc3eeba36..5be1d498d59 100644 --- a/src/time_integration/paired_explicit_runge_kutta/methods_PERK3.jl +++ b/src/time_integration/paired_explicit_runge_kutta/methods_PERK3.jl @@ -59,11 +59,11 @@ function compute_PairedExplicitRK3_butcher_tableau(num_stages, tspan, monomial_coeffs, c; verbose) end - # Fill A-matrix in P-ERK style - a_matrix = zeros(num_stages - 2, 2) - a_matrix[:, 1] = c[3:end] - a_matrix[:, 1] -= a_unknown - a_matrix[:, 2] = a_unknown + # Fill A-matrix in PERK style + a_matrix = zeros(2, num_stages - 2) + a_matrix[1, :] = c[3:end] + a_matrix[1, :] -= a_unknown + a_matrix[2, :] = a_unknown return a_matrix, c, dt_opt end @@ -80,8 +80,8 @@ function compute_PairedExplicitRK3_butcher_tableau(num_stages, # - 2 Since First entry of A is always zero (explicit method) and second is given by c_2 (consistency) a_coeffs_max = num_stages - 2 - a_matrix = zeros(a_coeffs_max, 2) - a_matrix[:, 1] = c[3:end] + a_matrix = zeros(2, a_coeffs_max) + a_matrix[1, :] = c[3:end] path_a_coeffs = joinpath(base_path_a_coeffs, "a_" * string(num_stages) * ".txt") @@ -91,9 +91,9 @@ function compute_PairedExplicitRK3_butcher_tableau(num_stages, num_a_coeffs = size(a_coeffs, 1) @assert num_a_coeffs == a_coeffs_max - # Fill A-matrix in P-ERK style - a_matrix[:, 1] -= a_coeffs - a_matrix[:, 2] = a_coeffs + # Fill A-matrix in PERK style + a_matrix[1, :] -= a_coeffs + a_matrix[2, :] = a_coeffs return a_matrix, c end @@ -107,7 +107,7 @@ end verbose = false, cS2 = 1.0f0) Parameters: - - `num_stages` (`Int`): Number of stages in the paired explicit Runge-Kutta (P-ERK) method. + - `num_stages` (`Int`): Number of stages in the paired explicit Runge-Kutta (PERK) method. - `base_path_a_coeffs` (`AbstractString`): Path to a file containing some coefficients in the A-matrix in the Butcher tableau of the Runge Kutta method. The matrix should be stored in a text file at `joinpath(base_path_a_coeffs, "a_$(num_stages).txt")` and separated by line breaks. @@ -122,7 +122,7 @@ end s is the number of stages, default is 1.0f0. The following structures and methods provide an implementation of -the third-order paired explicit Runge-Kutta (P-ERK) method +the third-order paired explicit Runge-Kutta (PERK) method optimized for a certain simulation setup (PDE, IC & BC, Riemann Solver, DG Solver). The original paper is - Nasab, Vermeire (2022) @@ -135,13 +135,14 @@ Multirate Time-Integration based on Dynamic ODE Partitioning through Adaptively Note: To use this integrator, the user must import the `Convex`, `ECOS`, and `NLsolve` packages. """ -mutable struct PairedExplicitRK3 <: AbstractPairedExplicitRKSingle - const num_stages::Int # S +struct PairedExplicitRK3 <: AbstractPairedExplicitRKSingle + num_stages::Int # S a_matrix::Matrix{Float64} c::Vector{Float64} + dt_opt::Union{Float64, Nothing} -end # struct PairedExplicitRK3 +end # Constructor for previously computed A Coeffs function PairedExplicitRK3(num_stages, base_path_a_coeffs::AbstractString, @@ -195,9 +196,9 @@ mutable struct PairedExplicitRK3Integrator{RealT <: Real, uType, Params, Sol, F, finalstep::Bool # added for convenience dtchangeable::Bool force_stepfail::Bool - # PairedExplicitRK stages: + # Additional PERK3 registers k1::uType - k_higher::uType + kS1::uType end function init(ode::ODEProblem, alg::PairedExplicitRK3; @@ -206,9 +207,9 @@ function init(ode::ODEProblem, alg::PairedExplicitRK3; du = zero(u0) u_tmp = zero(u0) - # PairedExplicitRK stages + # Additional PERK3 registers k1 = zero(u0) - k_higher = zero(u0) + kS1 = zero(u0) t0 = first(ode.tspan) tdir = sign(ode.tspan[end] - ode.tspan[1]) @@ -221,7 +222,7 @@ function init(ode::ODEProblem, alg::PairedExplicitRK3; ode.tspan; kwargs...), false, true, false, - k1, k_higher) + k1, kS1) # initialize callbacks if callback isa CallbackSet @@ -258,72 +259,42 @@ function step!(integrator::PairedExplicitRK3Integrator) end @trixi_timeit timer() "Paired Explicit Runge-Kutta ODE integration step" begin - # k1 - integrator.f(integrator.du, integrator.u, prob.p, integrator.t) - @threaded for i in eachindex(integrator.du) - integrator.k1[i] = integrator.du[i] * integrator.dt - end - - # Construct current state - @threaded for i in eachindex(integrator.du) - integrator.u_tmp[i] = integrator.u[i] + alg.c[2] * integrator.k1[i] - end - # k2 - integrator.f(integrator.du, integrator.u_tmp, prob.p, - integrator.t + alg.c[2] * integrator.dt) - - @threaded for i in eachindex(integrator.du) - integrator.k_higher[i] = integrator.du[i] * integrator.dt - end + # First and second stage are identical across all single/standalone PERK methods + PERK_k1!(integrator, prob.p) + PERK_k2!(integrator, prob.p, alg) - # Higher stages for stage in 3:(alg.num_stages - 1) - # Construct current state - @threaded for i in eachindex(integrator.du) - integrator.u_tmp[i] = integrator.u[i] + - alg.a_matrix[stage - 2, 1] * - integrator.k1[i] + - alg.a_matrix[stage - 2, 2] * - integrator.k_higher[i] - end - - integrator.f(integrator.du, integrator.u_tmp, prob.p, - integrator.t + alg.c[stage] * integrator.dt) - - @threaded for i in eachindex(integrator.du) - integrator.k_higher[i] = integrator.du[i] * integrator.dt - end + PERK_ki!(integrator, prob.p, alg, stage) end - # Last stage - @threaded for i in eachindex(integrator.du) - integrator.u_tmp[i] = integrator.u[i] + - alg.a_matrix[alg.num_stages - 2, 1] * - integrator.k1[i] + - alg.a_matrix[alg.num_stages - 2, 2] * - integrator.k_higher[i] + # We need to store `du` of the S-1 stage in `kS1` for the final update: + @threaded for i in eachindex(integrator.u) + integrator.kS1[i] = integrator.du[i] end - integrator.f(integrator.du, integrator.u_tmp, prob.p, - integrator.t + alg.c[alg.num_stages] * integrator.dt) + PERK_ki!(integrator, prob.p, alg, alg.num_stages) @threaded for i in eachindex(integrator.u) # "Own" PairedExplicitRK based on SSPRK33. - # Note that 'k_higher' carries the values of K_{S-1} + # Note that 'kS1' carries the values of K_{S-1} # and that we construct 'K_S' "in-place" from 'integrator.du' - integrator.u[i] += (integrator.k1[i] + integrator.k_higher[i] + - 4.0 * integrator.du[i] * integrator.dt) / 6.0 + integrator.u[i] += integrator.dt * + (integrator.k1[i] + integrator.kS1[i] + + 4.0 * integrator.du[i]) / 6.0 end - end # PairedExplicitRK step timer + end integrator.iter += 1 integrator.t += integrator.dt - # handle callbacks - if callbacks isa CallbackSet - for cb in callbacks.discrete_callbacks - if cb.condition(integrator.u, integrator.t, integrator) - cb.affect!(integrator) + @trixi_timeit timer() "Step-Callbacks" begin + # handle callbacks + if callbacks isa CallbackSet + foreach(callbacks.discrete_callbacks) do cb + if cb.condition(integrator.u, integrator.t, integrator) + cb.affect!(integrator) + end + return nothing end end end @@ -334,4 +305,13 @@ function step!(integrator::PairedExplicitRK3Integrator) terminate!(integrator) end end + +function Base.resize!(integrator::PairedExplicitRK3Integrator, new_size) + resize!(integrator.u, new_size) + resize!(integrator.du, new_size) + resize!(integrator.u_tmp, new_size) + + resize!(integrator.k1, new_size) + resize!(integrator.kS1, new_size) +end end # @muladd diff --git a/src/time_integration/paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl b/src/time_integration/paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl index c606326738f..87f067fc39d 100644 --- a/src/time_integration/paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl +++ b/src/time_integration/paired_explicit_runge_kutta/paired_explicit_runge_kutta.jl @@ -108,13 +108,42 @@ function solve!(integrator::AbstractPairedExplicitRKIntegrator) @trixi_timeit timer() "main loop" while !integrator.finalstep step!(integrator) - end # "main loop" timer + end return TimeIntegratorSolution((first(prob.tspan), integrator.t), (prob.u0, integrator.u), integrator.sol.prob) end +# Function that computes the first stage of a general PERK method +@inline function PERK_k1!(integrator::AbstractPairedExplicitRKIntegrator, p) + integrator.f(integrator.k1, integrator.u, p, integrator.t) +end + +@inline function PERK_k2!(integrator::AbstractPairedExplicitRKSingleIntegrator, p, alg) + @threaded for i in eachindex(integrator.du) + integrator.u_tmp[i] = integrator.u[i] + + alg.c[2] * integrator.dt * integrator.k1[i] + end + + integrator.f(integrator.du, integrator.u_tmp, p, + integrator.t + alg.c[2] * integrator.dt) +end + +@inline function PERK_ki!(integrator::AbstractPairedExplicitRKSingleIntegrator, p, alg, + stage) + # Construct current state + @threaded for i in eachindex(integrator.u) + integrator.u_tmp[i] = integrator.u[i] + + integrator.dt * + (alg.a_matrix[1, stage - 2] * integrator.k1[i] + + alg.a_matrix[2, stage - 2] * integrator.du[i]) + end + + integrator.f(integrator.du, integrator.u_tmp, p, + integrator.t + alg.c[stage] * integrator.dt) +end + # used for AMR (Adaptive Mesh Refinement) function Base.resize!(integrator::AbstractPairedExplicitRKIntegrator, new_size) resize!(integrator.u, new_size) @@ -122,7 +151,6 @@ function Base.resize!(integrator::AbstractPairedExplicitRKIntegrator, new_size) resize!(integrator.u_tmp, new_size) resize!(integrator.k1, new_size) - resize!(integrator.k_higher, new_size) end # get a cache where the RHS can be stored diff --git a/test/test_structured_1d.jl b/test/test_structured_1d.jl index 3061257d508..ccb0c8cacac 100644 --- a/test/test_structured_1d.jl +++ b/test/test_structured_1d.jl @@ -72,6 +72,15 @@ end du_ode = similar(u_ode) @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 8000 end + + # Test `resize!` + integrator = init(ode, ode_algorithm, dt = 42.0, callback = callbacks) + resize!(integrator, 42) + @test length(integrator.u) == 42 + @test length(integrator.du) == 42 + @test length(integrator.u_tmp) == 42 + @test length(integrator.k1) == 42 + @test length(integrator.kS1) == 42 end # Testing the third-order paired explicit Runge-Kutta (PERK) method without stepsize callback @@ -82,8 +91,8 @@ end save_solution=SaveSolutionCallback(dt = 0.1 + 1.0e-8), # Adding a small epsilon to avoid floating-point precision issues callbacks=CallbackSet(summary_callback, save_solution, analysis_callback, alive_callback), - l2=[5.726144786001842e-7], - linf=[3.430730019182704e-6]) + l2=[5.726144824784944e-7], + linf=[3.43073006914274e-6]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_unit.jl b/test/test_unit.jl index 641c7244efe..cd219050555 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -1700,7 +1700,7 @@ end ode_algorithm = Trixi.PairedExplicitRK2(6, path_coeff_file) - @test isapprox(ode_algorithm.a_matrix, + @test isapprox(transpose(ode_algorithm.a_matrix), [0.12405417889682908 0.07594582110317093 0.16178873711001726 0.13821126288998273 0.16692313960864164 0.2330768603913584 @@ -1713,7 +1713,7 @@ end tspan = (0.0, 1.0) ode_algorithm = Trixi.PairedExplicitRK2(12, tspan, vec(eig_vals)) - @test isapprox(ode_algorithm.a_matrix, + @test isapprox(transpose(ode_algorithm.a_matrix), [0.06453812656711647 0.02637096434197444 0.09470601372274887 0.041657622640887494 0.12332877820069793 0.058489403617483886 @@ -1733,7 +1733,7 @@ end ode_algorithm = Trixi.PairedExplicitRK3(8, path_coeff_file) - @test isapprox(ode_algorithm.a_matrix, + @test isapprox(transpose(ode_algorithm.a_matrix), [0.33551678438002486 0.06448322158043965 0.49653494442225443 0.10346507941960345 0.6496890912144586 0.15031092070647037 @@ -1748,7 +1748,7 @@ end tspan = (0.0, 1.0) ode_algorithm = Trixi.PairedExplicitRK3(13, tspan, vec(eig_vals)) - @test isapprox(ode_algorithm.a_matrix, + @test isapprox(transpose(ode_algorithm.a_matrix), [0.19121164778938382 0.008788355190848427 0.28723462747227385 0.012765384448655121 0.38017717196008227 0.019822834000382223 From 7580e9ae3b3538b8535344ec54c5229f7af31f46 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 06:20:46 +0100 Subject: [PATCH 14/41] Bump crate-ci/typos from 1.27.0 to 1.28.1 (#2186) Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.27.0 to 1.28.1. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.27.0...v1.28.1) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/SpellCheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml index a40e187e82a..3d557915755 100644 --- a/.github/workflows/SpellCheck.yml +++ b/.github/workflows/SpellCheck.yml @@ -10,4 +10,4 @@ jobs: - name: Checkout Actions Repository uses: actions/checkout@v4 - name: Check spelling - uses: crate-ci/typos@v1.27.0 + uses: crate-ci/typos@v1.28.1 From 50d30e9ef818052518fd48a065e60d509482e49f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 12:38:11 +0100 Subject: [PATCH 15/41] Bump codecov/codecov-action from 4 to 5 (#2185) * Bump codecov/codecov-action from 4 to 5 Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 4 to 5. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v4...v5) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update .github/workflows/ci.yml Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0c636ee8b0b..020dd72d0fe 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -133,9 +133,9 @@ jobs: - uses: julia-actions/julia-processcoverage@v1 with: directories: src,examples,ext - - uses: codecov/codecov-action@v4 + - uses: codecov/codecov-action@v5 with: - file: ./lcov.info + files: ./lcov.info flags: unittests name: codecov-umbrella fail_ci_if_error: true From 5db379bb21f2603391e6a68332c84d051ab09e9a Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Mon, 2 Dec 2024 20:31:31 +0100 Subject: [PATCH 16/41] Arbitrary Precision LGL Basis (#2128) * test higher precision * try to get higher order convergence * type stability * fmt * conservation * correct * working version * changes * rm ODE * rm unused * Apply suggestions from code review Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * RealT for TreeMesh * undo tree * Revert "undo tree" This reverts commit 8103d0a333c7e78339d4bbbdf88e35eedaec1491. * ones, zeros * Apply suggestions from code review Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * reprod test vals f32 * LV suggestion * typo * shorten * data safety * typesafety * fix * docstring * comments * Type general projection * error * syntax * bring back conv * tolerance always dynamic * convert pi * tighter tolerances * warning only * update some float test vals * remove unused/NON_TESTED * restructure arguments * docstrings * remove random 1000 * old behaviour * compliance with deprecated dgsem * Avoid breaking change * avoid ambiguity * pass datatype through * do not cast to float * remvoe comment * more type generality * do not mix up things * move example * tests * News etc * fmt * update test vals to different basis * try to disable warnings * fmt * CI hazzle * fun with Ci * more warnings * Compat entries * Tolerances for quad precision types * Comment * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * comment * shorter code for analysis * Apply suggestions from code review Co-authored-by: Michael Schlottke-Lakemper --------- Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Hendrik Ranocha --- NEWS.md | 2 + README.md | 1 + docs/src/index.md | 1 + .../elixir_advection_float128.jl | 60 +++++++ .../elixir_advection_doublefloat.jl | 63 ++++++++ src/auxiliary/precompile.jl | 6 +- src/auxiliary/special_elixirs.jl | 16 +- src/callbacks_step/analysis_dg1d.jl | 9 +- src/callbacks_step/analysis_dg2d.jl | 9 +- src/callbacks_step/analysis_dg2d_parallel.jl | 9 +- src/callbacks_step/analysis_dg3d.jl | 11 +- src/callbacks_step/analysis_dg3d_parallel.jl | 9 +- src/solvers/dgsem/basis_lobatto_legendre.jl | 153 +++++++++--------- src/solvers/dgsem/l2projection.jl | 28 ++-- test/Project.toml | 4 + test/test_p4est_2d.jl | 4 +- test/test_structured_1d.jl | 14 ++ test/test_tree_1d_advection.jl | 14 ++ test/test_tree_2d_mhd.jl | 34 ++-- test/test_trixi.jl | 12 +- test/test_unit.jl | 6 + test/test_unstructured_2d.jl | 4 +- 22 files changed, 330 insertions(+), 139 deletions(-) create mode 100644 examples/structured_1d_dgsem/elixir_advection_float128.jl create mode 100644 examples/tree_1d_dgsem/elixir_advection_doublefloat.jl diff --git a/NEWS.md b/NEWS.md index 2f71c45e57e..c187f229519 100644 --- a/NEWS.md +++ b/NEWS.md @@ -11,6 +11,8 @@ for human readability. - New time integrator `PairedExplicitRK3`, implementing the third-order paired explicit Runge-Kutta method with [Convex.jl](https://github.com/jump-dev/Convex.jl), [ECOS.jl](https://github.com/jump-dev/ECOS.jl), and [NLsolve.jl](https://github.com/JuliaNLSolvers/NLsolve.jl) ([#2008]) +- `LobattoLegendreBasis` and related datastructures made fully floating-type general, + enabling calculations with higher than double (`Float64`) precision ([#2128]) ## Changes when updating to v0.9 from v0.8.x diff --git a/README.md b/README.md index 70c28799f31..5b58612bb09 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,7 @@ installation and postprocessing procedures. Its features include: * Hierarchical quadtree/octree grid with adaptive mesh refinement * Forests of quadtrees/octrees with [p4est](https://github.com/cburstedde/p4est) via [P4est.jl](https://github.com/trixi-framework/P4est.jl) * High-order accuracy in space and time + * Arbitrary floating-point precision * Discontinuous Galerkin methods * Kinetic energy-preserving and entropy-stable methods based on flux differencing * Entropy-stable shock capturing diff --git a/docs/src/index.md b/docs/src/index.md index 88752cc7f11..40191e97b9a 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -26,6 +26,7 @@ installation and postprocessing procedures. Its features include: * Hierarchical quadtree/octree grid with adaptive mesh refinement * Forests of quadtrees/octrees with [p4est](https://github.com/cburstedde/p4est) via [P4est.jl](https://github.com/trixi-framework/P4est.jl) * High-order accuracy in space and time + * Arbitrary floating-point precision * Discontinuous Galerkin methods * Kinetic energy-preserving and entropy-stable methods based on flux differencing * Entropy-stable shock capturing diff --git a/examples/structured_1d_dgsem/elixir_advection_float128.jl b/examples/structured_1d_dgsem/elixir_advection_float128.jl new file mode 100644 index 00000000000..7f72eba58e3 --- /dev/null +++ b/examples/structured_1d_dgsem/elixir_advection_float128.jl @@ -0,0 +1,60 @@ + +using OrdinaryDiffEq +using Trixi + +using Quadmath + +############################################################################### +# semidiscretization of the linear advection equation + +# See https://github.com/JuliaMath/Quadmath.jl +RealT = Float128 + +advection_velocity = 4 / 3 # Does not need to be in higher precision +equations = LinearScalarAdvectionEquation1D(advection_velocity) + +solver = DGSEM(RealT = RealT, polydeg = 13, surface_flux = flux_lax_friedrichs) + +# CARE: Important to use higher precision datatype for coordinates +# as these are used for type promotion of the mesh (points etc.) +coordinates_min = (-one(RealT),) +coordinates_max = (one(RealT),) +cells_per_dimension = (1,) + +# `StructuredMesh` infers datatype from coordinates +mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + +############################################################################### +# ODE solvers, callbacks etc. + +# CARE: Important to use higher precision datatype in specification of final time +tspan = (zero(RealT), one(RealT)) + +ode = semidiscretize(semi, tspan); + +summary_callback = SummaryCallback() + +analysis_callback = AnalysisCallback(semi, interval = 100, + extra_analysis_errors = (:conservation_error,)) + +# cfl does not need to be in higher precision +stepsize_callback = StepsizeCallback(cfl = 0.25) + +callbacks = CallbackSet(summary_callback, + stepsize_callback, + analysis_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, Feagin14(), + # Turn off adaptivity to avoid setting very small tolerances + adaptive = false, + dt = 42, # `dt` does not need to be in higher precision + save_everystep = false, callback = callbacks); + +# Print the timer summary +summary_callback() diff --git a/examples/tree_1d_dgsem/elixir_advection_doublefloat.jl b/examples/tree_1d_dgsem/elixir_advection_doublefloat.jl new file mode 100644 index 00000000000..8405d0125d0 --- /dev/null +++ b/examples/tree_1d_dgsem/elixir_advection_doublefloat.jl @@ -0,0 +1,63 @@ + +using OrdinaryDiffEq +using Trixi + +using DoubleFloats + +############################################################################### +# semidiscretization of the linear advection equation + +# See https://github.com/JuliaMath/DoubleFloats.jl +RealT = Double64 + +advection_velocity = 4 / 3 # Does not need to be in higher precision +equations = LinearScalarAdvectionEquation1D(advection_velocity) + +solver = DGSEM(RealT = RealT, polydeg = 7, surface_flux = flux_lax_friedrichs) + +# CARE: Important to use higher precision datatype for coordinates +# as these are used for type promotion of the mesh (points etc.) +coordinates_min = -one(RealT) # minimum coordinate +coordinates_max = one(RealT) # maximum coordinate + +# For `TreeMesh` the datatype has to be specified explicitly, +# i.e., is not inferred from the coordinates. +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 30_000, + RealT = RealT) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + +############################################################################### +# ODE solvers, callbacks etc. + +# CARE: Important to use higher precision datatype in specification of final time +tspan = (zero(RealT), one(RealT)) + +ode = semidiscretize(semi, tspan); + +summary_callback = SummaryCallback() + +analysis_callback = AnalysisCallback(semi, interval = 100, + extra_analysis_errors = (:conservation_error,)) + +# cfl does not need to be in higher precision +stepsize_callback = StepsizeCallback(cfl = 1.4) + +callbacks = CallbackSet(summary_callback, + stepsize_callback, + analysis_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, DP8(), + # Turn off adaptivity to avoid setting very small tolerances + adaptive = false, + dt = 42, # `dt` does not need to be in higher precision + save_everystep = false, callback = callbacks); + +# Print the timer summary +summary_callback() diff --git a/src/auxiliary/precompile.jl b/src/auxiliary/precompile.jl index dfda8ece687..5a1de036808 100644 --- a/src/auxiliary/precompile.jl +++ b/src/auxiliary/precompile.jl @@ -329,8 +329,10 @@ function _precompile_manual_() @assert Base.precompile(Tuple{typeof(Trixi.gauss_nodes_weights), Int}) @assert Base.precompile(Tuple{typeof(Trixi.calc_forward_upper), Int}) @assert Base.precompile(Tuple{typeof(Trixi.calc_forward_lower), Int}) - @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_upper), Int, Val{:gauss}}) - @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_lower), Int, Val{:gauss}}) + @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_upper), Int, + Val{:gauss}}) + @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_lower), Int, + Val{:gauss}}) @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_upper), Int, Val{:gauss_lobatto}}) @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_lower), Int, diff --git a/src/auxiliary/special_elixirs.jl b/src/auxiliary/special_elixirs.jl index d71a27aa96a..3e960945e7a 100644 --- a/src/auxiliary/special_elixirs.jl +++ b/src/auxiliary/special_elixirs.jl @@ -6,10 +6,11 @@ #! format: noindent """ - convergence_test([mod::Module=Main,] elixir::AbstractString, iterations; kwargs...) + convergence_test([mod::Module=Main,] elixir::AbstractString, iterations, RealT = Float64; kwargs...) Run `iterations` Trixi.jl simulations using the setup given in `elixir` and compute the experimental order of convergence (EOC) in the ``L^2`` and ``L^\\infty`` norm. +Use `RealT` as the data type to represent the errors. In each iteration, the resolution of the respective mesh will be doubled. Additional keyword arguments `kwargs...` and the optional module `mod` are passed directly to [`trixi_include`](@ref). @@ -18,12 +19,14 @@ This function assumes that the spatial resolution is set via the keywords `initial_refinement_level` (an integer) or `cells_per_dimension` (a tuple of integers, one per spatial dimension). """ -function convergence_test(mod::Module, elixir::AbstractString, iterations; kwargs...) +function convergence_test(mod::Module, elixir::AbstractString, iterations, + RealT = Float64; + kwargs...) @assert(iterations>1, "Number of iterations must be bigger than 1 for a convergence analysis") # Types of errors to be calculated - errors = Dict(:l2 => Float64[], :linf => Float64[]) + errors = Dict(:l2 => RealT[], :linf => RealT[]) initial_resolution = extract_initial_resolution(elixir, kwargs) @@ -105,7 +108,7 @@ function analyze_convergence(errors, iterations, println("") # Print mean EOCs - mean_values = zeros(nvariables) + mean_values = zeros(eltype(errors[:l2]), nvariables) for v in 1:nvariables mean_values[v] = sum(eocs[kind][:, v]) ./ length(eocs[kind][:, v]) @printf("%-10s", "mean") @@ -119,8 +122,9 @@ function analyze_convergence(errors, iterations, return eoc_mean_values end -function convergence_test(elixir::AbstractString, iterations; kwargs...) - convergence_test(Main, elixir::AbstractString, iterations; kwargs...) +function convergence_test(elixir::AbstractString, iterations, RealT = Float64; + kwargs...) + convergence_test(Main, elixir::AbstractString, iterations, RealT; kwargs...) end # Helper methods used in the functions defined above diff --git a/src/callbacks_step/analysis_dg1d.jl b/src/callbacks_step/analysis_dg1d.jl index d2613c325be..b4d49a39d5a 100644 --- a/src/callbacks_step/analysis_dg1d.jl +++ b/src/callbacks_step/analysis_dg1d.jl @@ -61,16 +61,17 @@ function calc_error_norms(func, u, t, analyzer, inv.(view(inverse_jacobian, :, element))) # Calculate errors at each analysis node - @. jacobian_local = abs(jacobian_local) - for i in eachnode(analyzer) u_exact = initial_condition(get_node_coords(x_local, equations, dg, i), t, equations) diff = func(u_exact, equations) - func(get_node_vars(u_local, equations, dg, i), equations) - l2_error += diff .^ 2 * (weights[i] * jacobian_local[i]) + # We take absolute value as we need the Jacobian here for the volume calculation + abs_jacobian_local_i = abs(jacobian_local[i]) + + l2_error += diff .^ 2 * (weights[i] * abs_jacobian_local_i) linf_error = @. max(linf_error, abs(diff)) - total_volume += weights[i] * jacobian_local[i] + total_volume += weights[i] * abs_jacobian_local_i end end diff --git a/src/callbacks_step/analysis_dg2d.jl b/src/callbacks_step/analysis_dg2d.jl index e089133fa17..e33533738ec 100644 --- a/src/callbacks_step/analysis_dg2d.jl +++ b/src/callbacks_step/analysis_dg2d.jl @@ -161,16 +161,17 @@ function calc_error_norms(func, u, t, analyzer, jacobian_tmp1) # Calculate errors at each analysis node - @. jacobian_local = abs(jacobian_local) - for j in eachnode(analyzer), i in eachnode(analyzer) u_exact = initial_condition(get_node_coords(x_local, equations, dg, i, j), t, equations) diff = func(u_exact, equations) - func(get_node_vars(u_local, equations, dg, i, j), equations) - l2_error += diff .^ 2 * (weights[i] * weights[j] * jacobian_local[i, j]) + # We take absolute value as we need the Jacobian here for the volume calculation + abs_jacobian_local_ij = abs(jacobian_local[i, j]) + + l2_error += diff .^ 2 * (weights[i] * weights[j] * abs_jacobian_local_ij) linf_error = @. max(linf_error, abs(diff)) - total_volume += weights[i] * weights[j] * jacobian_local[i, j] + total_volume += weights[i] * weights[j] * abs_jacobian_local_ij end end diff --git a/src/callbacks_step/analysis_dg2d_parallel.jl b/src/callbacks_step/analysis_dg2d_parallel.jl index 5b3ae858ab7..10b6e30f95c 100644 --- a/src/callbacks_step/analysis_dg2d_parallel.jl +++ b/src/callbacks_step/analysis_dg2d_parallel.jl @@ -114,16 +114,17 @@ function calc_error_norms(func, u, t, analyzer, jacobian_tmp1) # Calculate errors at each analysis node - @. jacobian_local = abs(jacobian_local) - for j in eachnode(analyzer), i in eachnode(analyzer) u_exact = initial_condition(get_node_coords(x_local, equations, dg, i, j), t, equations) diff = func(u_exact, equations) - func(get_node_vars(u_local, equations, dg, i, j), equations) - l2_error += diff .^ 2 * (weights[i] * weights[j] * jacobian_local[i, j]) + # We take absolute value as we need the Jacobian here for the volume calculation + abs_jacobian_local_ij = abs(jacobian_local[i, j]) + + l2_error += diff .^ 2 * (weights[i] * weights[j] * abs_jacobian_local_ij) linf_error = @. max(linf_error, abs(diff)) - volume += weights[i] * weights[j] * jacobian_local[i, j] + volume += weights[i] * weights[j] * abs_jacobian_local_ij end end diff --git a/src/callbacks_step/analysis_dg3d.jl b/src/callbacks_step/analysis_dg3d.jl index 3c9c3a69f5e..efc1dc3bb3c 100644 --- a/src/callbacks_step/analysis_dg3d.jl +++ b/src/callbacks_step/analysis_dg3d.jl @@ -187,18 +187,19 @@ function calc_error_norms(func, u, t, analyzer, jacobian_tmp1, jacobian_tmp2) # Calculate errors at each analysis node - @. jacobian_local = abs(jacobian_local) - for k in eachnode(analyzer), j in eachnode(analyzer), i in eachnode(analyzer) u_exact = initial_condition(get_node_coords(x_local, equations, dg, i, j, k), t, equations) diff = func(u_exact, equations) - func(get_node_vars(u_local, equations, dg, i, j, k), equations) + # We take absolute value as we need the Jacobian here for the volume calculation + abs_jacobian_local_ijk = abs(jacobian_local[i, j, k]) + l2_error += diff .^ 2 * - (weights[i] * weights[j] * weights[k] * jacobian_local[i, j, k]) + (weights[i] * weights[j] * weights[k] * abs_jacobian_local_ijk) linf_error = @. max(linf_error, abs(diff)) - total_volume += weights[i] * weights[j] * weights[k] * - jacobian_local[i, j, k] + total_volume += (weights[i] * weights[j] * weights[k] * + abs_jacobian_local_ijk) end end diff --git a/src/callbacks_step/analysis_dg3d_parallel.jl b/src/callbacks_step/analysis_dg3d_parallel.jl index 285f0f62de6..ab9ba6a0055 100644 --- a/src/callbacks_step/analysis_dg3d_parallel.jl +++ b/src/callbacks_step/analysis_dg3d_parallel.jl @@ -31,17 +31,18 @@ function calc_error_norms(func, u, t, analyzer, jacobian_tmp1, jacobian_tmp2) # Calculate errors at each analysis node - @. jacobian_local = abs(jacobian_local) - for k in eachnode(analyzer), j in eachnode(analyzer), i in eachnode(analyzer) u_exact = initial_condition(get_node_coords(x_local, equations, dg, i, j, k), t, equations) diff = func(u_exact, equations) - func(get_node_vars(u_local, equations, dg, i, j, k), equations) + # We take absolute value as we need the Jacobian here for the volume calculation + abs_jacobian_local_ijk = abs(jacobian_local[i, j, k]) + l2_error += diff .^ 2 * - (weights[i] * weights[j] * weights[k] * jacobian_local[i, j, k]) + (weights[i] * weights[j] * weights[k] * abs_jacobian_local_ijk) linf_error = @. max(linf_error, abs(diff)) - volume += weights[i] * weights[j] * weights[k] * jacobian_local[i, j, k] + volume += weights[i] * weights[j] * weights[k] * abs_jacobian_local_ijk end end diff --git a/src/solvers/dgsem/basis_lobatto_legendre.jl b/src/solvers/dgsem/basis_lobatto_legendre.jl index cac1dba9c74..3ea668c0264 100644 --- a/src/solvers/dgsem/basis_lobatto_legendre.jl +++ b/src/solvers/dgsem/basis_lobatto_legendre.jl @@ -6,7 +6,7 @@ #! format: noindent """ - LobattoLegendreBasis([RealT=Float64,] polydeg::Integer) + LobattoLegendreBasis([RealT = Float64,] polydeg::Integer) Create a nodal Lobatto-Legendre basis for polynomials of degree `polydeg`. @@ -37,15 +37,14 @@ end function LobattoLegendreBasis(RealT, polydeg::Integer) nnodes_ = polydeg + 1 - # compute everything using `Float64` by default - nodes_, weights_ = gauss_lobatto_nodes_weights(nnodes_) + nodes_, weights_ = gauss_lobatto_nodes_weights(nnodes_, RealT) inverse_weights_ = inv.(weights_) - _, inverse_vandermonde_legendre_ = vandermonde_legendre(nodes_) + _, inverse_vandermonde_legendre_ = vandermonde_legendre(nodes_, RealT) - boundary_interpolation_ = zeros(nnodes_, 2) - boundary_interpolation_[:, 1] = calc_lhat(-1.0, nodes_, weights_) - boundary_interpolation_[:, 2] = calc_lhat(1.0, nodes_, weights_) + boundary_interpolation_ = zeros(RealT, nnodes_, 2) + boundary_interpolation_[:, 1] = calc_lhat(-one(RealT), nodes_, weights_) + boundary_interpolation_[:, 2] = calc_lhat(one(RealT), nodes_, weights_) derivative_matrix_ = polynomial_derivative_matrix(nodes_) derivative_split_ = calc_dsplit(nodes_, weights_) @@ -79,7 +78,6 @@ function LobattoLegendreBasis(RealT, polydeg::Integer) derivative_split_transpose, derivative_dhat) end - LobattoLegendreBasis(polydeg::Integer) = LobattoLegendreBasis(Float64, polydeg) function Base.show(io::IO, basis::LobattoLegendreBasis) @@ -165,11 +163,10 @@ function MortarL2(basis::LobattoLegendreBasis) RealT = real(basis) nnodes_ = nnodes(basis) - # compute everything using `Float64` by default - forward_upper_ = calc_forward_upper(nnodes_) - forward_lower_ = calc_forward_lower(nnodes_) - reverse_upper_ = calc_reverse_upper(nnodes_, Val(:gauss)) - reverse_lower_ = calc_reverse_lower(nnodes_, Val(:gauss)) + forward_upper_ = calc_forward_upper(nnodes_, RealT) + forward_lower_ = calc_forward_lower(nnodes_, RealT) + reverse_upper_ = calc_reverse_upper(nnodes_, Val(:gauss), RealT) + reverse_lower_ = calc_reverse_lower(nnodes_, Val(:gauss), RealT) # type conversions to get the requested real type and enable possible # optimizations of runtime performance and latency @@ -228,10 +225,10 @@ end # end # function MortarEC(basis::LobattoLegendreBasis{RealT}, surface_flux) -# forward_upper = calc_forward_upper(n_nodes) -# forward_lower = calc_forward_lower(n_nodes) -# l2reverse_upper = calc_reverse_upper(n_nodes, Val(:gauss_lobatto)) -# l2reverse_lower = calc_reverse_lower(n_nodes, Val(:gauss_lobatto)) +# forward_upper = calc_forward_upper(n_nodes, RealT) +# forward_lower = calc_forward_lower(n_nodes, RealT) +# l2reverse_upper = calc_reverse_upper(n_nodes, Val(:gauss_lobatto), RealT) +# l2reverse_lower = calc_reverse_lower(n_nodes, Val(:gauss_lobatto), RealT) # # type conversions to make use of StaticArrays etc. # nnodes_ = nnodes(basis) @@ -263,8 +260,7 @@ function SolutionAnalyzer(basis::LobattoLegendreBasis; RealT = real(basis) nnodes_ = analysis_polydeg + 1 - # compute everything using `Float64` by default - nodes_, weights_ = gauss_lobatto_nodes_weights(nnodes_) + nodes_, weights_ = gauss_lobatto_nodes_weights(nnodes_, RealT) vandermonde_ = polynomial_interpolation_matrix(get_nodes(basis), nodes_) # type conversions to get the requested real type and enable possible @@ -322,11 +318,10 @@ end function AdaptorL2(basis::LobattoLegendreBasis{RealT}) where {RealT} nnodes_ = nnodes(basis) - # compute everything using `Float64` by default - forward_upper_ = calc_forward_upper(nnodes_) - forward_lower_ = calc_forward_lower(nnodes_) - reverse_upper_ = calc_reverse_upper(nnodes_, Val(:gauss)) - reverse_lower_ = calc_reverse_lower(nnodes_, Val(:gauss)) + forward_upper_ = calc_forward_upper(nnodes_, RealT) + forward_lower_ = calc_forward_lower(nnodes_, RealT) + reverse_upper_ = calc_reverse_upper(nnodes_, Val(:gauss), RealT) + reverse_lower_ = calc_reverse_lower(nnodes_, Val(:gauss), RealT) # type conversions to get the requested real type and enable possible # optimizations of runtime performance and latency @@ -408,7 +403,7 @@ end # This implements algorithm 37 "PolynomialDerivativeMatrix" from Kopriva's book. function polynomial_derivative_matrix(nodes) n_nodes = length(nodes) - d = zeros(n_nodes, n_nodes) + d = zeros(eltype(nodes), n_nodes, n_nodes) wbary = barycentric_weights(nodes) for i in 1:n_nodes, j in 1:n_nodes @@ -481,7 +476,7 @@ For details, see (especially Section 3) """ function barycentric_weights(nodes) n_nodes = length(nodes) - weights = ones(n_nodes) + weights = ones(eltype(nodes), n_nodes) for j in 2:n_nodes, k in 1:(j - 1) weights[k] *= nodes[k] - nodes[j] @@ -528,7 +523,7 @@ For details, see e.g. Section 2 of """ function lagrange_interpolating_polynomials(x, nodes, wbary) n_nodes = length(nodes) - polynomials = zeros(n_nodes) + polynomials = zeros(eltype(nodes), n_nodes) for i in 1:n_nodes # Avoid division by zero when `x` is close to node by using @@ -553,7 +548,7 @@ function lagrange_interpolating_polynomials(x, nodes, wbary) end """ - gauss_lobatto_nodes_weights(n_nodes::Integer) + gauss_lobatto_nodes_weights(n_nodes::Integer, RealT = Float64) Computes nodes ``x_j`` and weights ``w_j`` for the (Legendre-)Gauss-Lobatto quadrature. This implements algorithm 25 "GaussLobattoNodesAndWeights" from the book @@ -563,15 +558,14 @@ This implements algorithm 25 "GaussLobattoNodesAndWeights" from the book Algorithms for scientists and engineers. [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) """ -# From FLUXO (but really from blue book by Kopriva) -function gauss_lobatto_nodes_weights(n_nodes::Integer) - # From Kopriva's book - n_iterations = 10 - tolerance = 1e-15 +function gauss_lobatto_nodes_weights(n_nodes::Integer, RealT = Float64) + # From FLUXO (but really from blue book by Kopriva) + n_iterations = 20 + tolerance = 2 * eps(RealT) # Relative tolerance for Newton iteration # Initialize output - nodes = zeros(n_nodes) - weights = zeros(n_nodes) + nodes = zeros(RealT, n_nodes) + weights = zeros(RealT, n_nodes) # Special case for polynomial degree zero (first order finite volume) if n_nodes == 1 @@ -584,15 +578,15 @@ function gauss_lobatto_nodes_weights(n_nodes::Integer) N = n_nodes - 1 # Calculate values at boundary - nodes[1] = -1.0 - nodes[end] = 1.0 - weights[1] = 2 / (N * (N + 1)) + nodes[1] = -1 + nodes[end] = 1 + weights[1] = RealT(2) / (N * (N + 1)) weights[end] = weights[1] # Calculate interior values if N > 1 - cont1 = pi / N - cont2 = 3 / (8 * N * pi) + cont1 = convert(RealT, pi) / N + cont2 = 3 / (8 * N * convert(RealT, pi)) # Use symmetry -> only left side is computed for i in 1:(div(N + 1, 2) - 1) @@ -608,6 +602,10 @@ function gauss_lobatto_nodes_weights(n_nodes::Integer) if abs(dx) < tolerance * abs(nodes[i + 1]) break end + + if k == n_iterations + @warn "`gauss_lobatto_nodes_weights` Newton iteration did not converge" + end end # Calculate weight @@ -622,8 +620,8 @@ function gauss_lobatto_nodes_weights(n_nodes::Integer) # If odd number of nodes, set center node to origin (= 0.0) and calculate weight if n_nodes % 2 == 1 - _, _, L = calc_q_and_l(N, 0) - nodes[div(N, 2) + 1] = 0.0 + _, _, L = calc_q_and_l(N, zero(RealT)) + nodes[div(N, 2) + 1] = 0 weights[div(N, 2) + 1] = weights[1] / L^2 end @@ -631,11 +629,13 @@ function gauss_lobatto_nodes_weights(n_nodes::Integer) end # From FLUXO (but really from blue book by Kopriva, algorithm 24) -function calc_q_and_l(N::Integer, x::Float64) - L_Nm2 = 1.0 +function calc_q_and_l(N::Integer, x::Real) + RealT = typeof(x) + + L_Nm2 = one(RealT) L_Nm1 = x - Lder_Nm2 = 0.0 - Lder_Nm1 = 1.0 + Lder_Nm2 = zero(RealT) + Lder_Nm1 = one(RealT) local L for i in 2:N @@ -652,10 +652,9 @@ function calc_q_and_l(N::Integer, x::Float64) return q, qder, L end -calc_q_and_l(N::Integer, x::Real) = calc_q_and_l(N, convert(Float64, x)) """ - gauss_nodes_weights(n_nodes::Integer) + gauss_nodes_weights(n_nodes::Integer, RealT = Float64) Computes nodes ``x_j`` and weights ``w_j`` for the Gauss-Legendre quadrature. This implements algorithm 23 "LegendreGaussNodesAndWeights" from the book @@ -665,31 +664,30 @@ This implements algorithm 23 "LegendreGaussNodesAndWeights" from the book Algorithms for scientists and engineers. [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) """ -function gauss_nodes_weights(n_nodes::Integer) - # From Kopriva's book - n_iterations = 10 - tolerance = 1e-15 +function gauss_nodes_weights(n_nodes::Integer, RealT = Float64) + n_iterations = 20 + tolerance = 2 * eps(RealT) # Relative tolerance for Newton iteration # Initialize output - nodes = ones(n_nodes) * 1000 - weights = zeros(n_nodes) + nodes = ones(RealT, n_nodes) + weights = zeros(RealT, n_nodes) # Get polynomial degree for convenience N = n_nodes - 1 if N == 0 - nodes .= 0.0 - weights .= 2.0 + nodes .= 0 + weights .= 2 return nodes, weights elseif N == 1 - nodes[1] = -sqrt(1 / 3) + nodes[1] = -sqrt(one(RealT) / 3) nodes[end] = -nodes[1] - weights .= 1.0 + weights .= 1 return nodes, weights else # N > 1 # Use symmetry property of the roots of the Legendre polynomials for i in 0:(div(N + 1, 2) - 1) # Starting guess for Newton method - nodes[i + 1] = -cos(pi / (2 * N + 2) * (2 * i + 1)) + nodes[i + 1] = -cos(convert(RealT, pi) / (2 * N + 2) * (2 * i + 1)) # Newton iteration to find root of Legendre polynomial (= integration node) for k in 0:n_iterations @@ -699,6 +697,10 @@ function gauss_nodes_weights(n_nodes::Integer) if abs(dx) < tolerance * abs(nodes[i + 1]) break end + + if k == n_iterations + @warn "`gauss_nodes_weights` Newton iteration did not converge" + end end # Calculate weight @@ -712,8 +714,8 @@ function gauss_nodes_weights(n_nodes::Integer) # If odd number of nodes, set center node to origin (= 0.0) and calculate weight if n_nodes % 2 == 1 - poly, deriv = legendre_polynomial_and_derivative(N + 1, 0.0) - nodes[div(N, 2) + 1] = 0.0 + poly, deriv = legendre_polynomial_and_derivative(N + 1, zero(RealT)) + nodes[div(N, 2) + 1] = 0 weights[div(N, 2) + 1] = (2 * N + 3) / deriv^2 end @@ -733,20 +735,21 @@ This implements algorithm 22 "LegendrePolynomialAndDerivative" from the book [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) """ function legendre_polynomial_and_derivative(N::Int, x::Real) + RealT = typeof(x) if N == 0 - poly = 1.0 - deriv = 0.0 + poly = one(RealT) + deriv = zero(RealT) elseif N == 1 - poly = convert(Float64, x) - deriv = 1.0 + poly = x + deriv = one(RealT) else - poly_Nm2 = 1.0 - poly_Nm1 = convert(Float64, x) - deriv_Nm2 = 0.0 - deriv_Nm1 = 1.0 + poly_Nm2 = one(RealT) + poly_Nm1 = copy(x) + deriv_Nm2 = zero(RealT) + deriv_Nm1 = one(RealT) - poly = 0.0 - deriv = 0.0 + poly = zero(RealT) + deriv = zero(RealT) for i in 2:N poly = ((2 * i - 1) * x * poly_Nm1 - (i - 1) * poly_Nm2) / i deriv = deriv_Nm2 + (2 * i - 1) * poly_Nm1 @@ -765,10 +768,10 @@ function legendre_polynomial_and_derivative(N::Int, x::Real) end # Calculate Legendre vandermonde matrix and its inverse -function vandermonde_legendre(nodes, N) +function vandermonde_legendre(nodes, N::Integer, RealT = Float64) n_nodes = length(nodes) n_modes = N + 1 - vandermonde = zeros(n_nodes, n_modes) + vandermonde = zeros(RealT, n_nodes, n_modes) for i in 1:n_nodes for m in 1:n_modes @@ -779,5 +782,7 @@ function vandermonde_legendre(nodes, N) inverse_vandermonde = inv(vandermonde) return vandermonde, inverse_vandermonde end -vandermonde_legendre(nodes) = vandermonde_legendre(nodes, length(nodes) - 1) +vandermonde_legendre(nodes, RealT = Float64) = vandermonde_legendre(nodes, + length(nodes) - 1, + RealT) end # @muladd diff --git a/src/solvers/dgsem/l2projection.jl b/src/solvers/dgsem/l2projection.jl index 0bb46f5ca15..436c1b92032 100644 --- a/src/solvers/dgsem/l2projection.jl +++ b/src/solvers/dgsem/l2projection.jl @@ -23,9 +23,9 @@ # Calculate forward projection matrix for discrete L2 projection from large to upper # # Note: This is actually an interpolation. -function calc_forward_upper(n_nodes) +function calc_forward_upper(n_nodes, RealT = Float64) # Calculate nodes, weights, and barycentric weights - nodes, weights = gauss_lobatto_nodes_weights(n_nodes) + nodes, _ = gauss_lobatto_nodes_weights(n_nodes, RealT) wbary = barycentric_weights(nodes) # Calculate projection matrix (actually: interpolation) @@ -43,9 +43,9 @@ end # Calculate forward projection matrix for discrete L2 projection from large to lower # # Note: This is actually an interpolation. -function calc_forward_lower(n_nodes) +function calc_forward_lower(n_nodes, RealT = Float64) # Calculate nodes, weights, and barycentric weights - nodes, weights = gauss_lobatto_nodes_weights(n_nodes) + nodes, _ = gauss_lobatto_nodes_weights(n_nodes, RealT) wbary = barycentric_weights(nodes) # Calculate projection matrix (actually: interpolation) @@ -64,9 +64,9 @@ end # # Note: To not make the L2 projection exact, first convert to Gauss nodes, # perform projection, and convert back to Gauss-Lobatto. -function calc_reverse_upper(n_nodes, ::Val{:gauss}) +function calc_reverse_upper(n_nodes, ::Val{:gauss}, RealT = Float64) # Calculate nodes, weights, and barycentric weights for Legendre-Gauss - gauss_nodes, gauss_weights = gauss_nodes_weights(n_nodes) + gauss_nodes, gauss_weights = gauss_nodes_weights(n_nodes, RealT) gauss_wbary = barycentric_weights(gauss_nodes) # Calculate projection matrix (actually: discrete L2 projection with errors) @@ -80,7 +80,7 @@ function calc_reverse_upper(n_nodes, ::Val{:gauss}) end # Calculate Vandermondes - lobatto_nodes, lobatto_weights = gauss_lobatto_nodes_weights(n_nodes) + lobatto_nodes, _ = gauss_lobatto_nodes_weights(n_nodes, RealT) gauss2lobatto = polynomial_interpolation_matrix(gauss_nodes, lobatto_nodes) lobatto2gauss = polynomial_interpolation_matrix(lobatto_nodes, gauss_nodes) @@ -91,9 +91,9 @@ end # # Note: To not make the L2 projection exact, first convert to Gauss nodes, # perform projection, and convert back to Gauss-Lobatto. -function calc_reverse_lower(n_nodes, ::Val{:gauss}) +function calc_reverse_lower(n_nodes, ::Val{:gauss}, RealT = Float64) # Calculate nodes, weights, and barycentric weights for Legendre-Gauss - gauss_nodes, gauss_weights = gauss_nodes_weights(n_nodes) + gauss_nodes, gauss_weights = gauss_nodes_weights(n_nodes, RealT) gauss_wbary = barycentric_weights(gauss_nodes) # Calculate projection matrix (actually: discrete L2 projection with errors) @@ -107,7 +107,7 @@ function calc_reverse_lower(n_nodes, ::Val{:gauss}) end # Calculate Vandermondes - lobatto_nodes, lobatto_weights = gauss_lobatto_nodes_weights(n_nodes) + lobatto_nodes, _ = gauss_lobatto_nodes_weights(n_nodes, RealT) gauss2lobatto = polynomial_interpolation_matrix(gauss_nodes, lobatto_nodes) lobatto2gauss = polynomial_interpolation_matrix(lobatto_nodes, gauss_nodes) @@ -116,9 +116,9 @@ end # Calculate reverse projection matrix for discrete L2 projection from upper to large (Gauss-Lobatto # version) -function calc_reverse_upper(n_nodes, ::Val{:gauss_lobatto}) +function calc_reverse_upper(n_nodes, ::Val{:gauss_lobatto}, RealT = Float64) # Calculate nodes, weights, and barycentric weights - nodes, weights = gauss_lobatto_nodes_weights(n_nodes) + nodes, weights = gauss_lobatto_nodes_weights(n_nodes, RealT) wbary = barycentric_weights(nodes) # Calculate projection matrix (actually: discrete L2 projection with errors) @@ -135,9 +135,9 @@ end # Calculate reverse projection matrix for discrete L2 projection from lower to large (Gauss-Lobatto # version) -function calc_reverse_lower(n_nodes, ::Val{:gauss_lobatto}) +function calc_reverse_lower(n_nodes, ::Val{:gauss_lobatto}, RealT = Float64) # Calculate nodes, weights, and barycentric weights - nodes, weights = gauss_lobatto_nodes_weights(n_nodes) + nodes, weights = gauss_lobatto_nodes_weights(n_nodes, RealT) wbary = barycentric_weights(nodes) # Calculate projection matrix (actually: discrete L2 projection with errors) diff --git a/test/Project.toml b/test/Project.toml index 64c20ea4c37..f9a6241421b 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -3,6 +3,7 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" Convex = "f65535da-76fb-5f13-bab9-19810c17039a" DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" +DoubleFloats = "497a8b3b-efae-58df-a0af-a86822472b78" Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199" ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7" @@ -14,6 +15,7 @@ NLsolve = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" +Quadmath = "be4d8f0f-7fa4-5f49-b795-2f01399ab2dd" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" @@ -23,6 +25,7 @@ Aqua = "0.8" CairoMakie = "0.10" Convex = "0.16" DelimitedFiles = "1" +DoubleFloats = "1.4.0" Downloads = "1" ECOS = "1.1.2" ExplicitImports = "1.0.1" @@ -34,6 +37,7 @@ NLsolve = "4.5.1" OrdinaryDiffEq = "6.49.1" Plots = "1.19" Printf = "1" +Quadmath = "0.5.10" Random = "1" StableRNGs = "1.0.2" Test = "1" diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl index 7a1a3f3ac09..0e70282e77c 100644 --- a/test/test_p4est_2d.jl +++ b/test/test_p4est_2d.jl @@ -231,10 +231,10 @@ end 0.3507514f0 ], linf=[ - 0.2942562f0, + 0.2930063f0, 0.4079147f0, 0.3972956f0, - 1.0810697f0 + 1.0764117f0 ], tspan=(0.0f0, 1.0f0), rtol=10 * sqrt(eps(Float32)), # to make CI pass diff --git a/test/test_structured_1d.jl b/test/test_structured_1d.jl index ccb0c8cacac..a5b561ed8fe 100644 --- a/test/test_structured_1d.jl +++ b/test/test_structured_1d.jl @@ -58,6 +58,20 @@ end end end +@trixi_testset "elixir_advection_float128.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_float128.jl"), + l2=Float128[6.49879312655540217059228636803492411e-09], + linf=Float128[5.35548407857266390181158920649552284e-08]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + # Testing the third-order paired explicit Runge-Kutta (PERK) method with its optimal CFL number @trixi_testset "elixir_burgers_perk3.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_burgers_perk3.jl"), diff --git a/test/test_tree_1d_advection.jl b/test/test_tree_1d_advection.jl index f061e2e1c30..0665a1be105 100644 --- a/test/test_tree_1d_advection.jl +++ b/test/test_tree_1d_advection.jl @@ -142,6 +142,20 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 8000 end end + +@trixi_testset "elixir_advection_doublefloat.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_doublefloat.jl"), + l2=Double64[6.80895929885700039832943251427357703e-11], + linf=Double64[5.82834770064525291688100323411704252e-10]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end end # module diff --git a/test/test_tree_2d_mhd.jl b/test/test_tree_2d_mhd.jl index b7152cb8b75..5d52f4a5d42 100644 --- a/test/test_tree_2d_mhd.jl +++ b/test/test_tree_2d_mhd.jl @@ -150,24 +150,24 @@ end @trixi_testset "elixir_mhd_ec_float32.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_ec_float32.jl"), - l2=Float32[0.036355644, + l2=Float32[0.03635566, + 0.042947732, 0.042947736, - 0.04294775, - 0.025748005, - 0.16211236, - 0.017452478, - 0.017452475, - 0.026877576, - 2.0951437f-7], - linf=Float32[0.22100884, - 0.28798944, - 0.2879896, - 0.20858234, - 0.8812654, - 0.09208202, - 0.09208143, - 0.14795381, - 2.0912607f-6]) + 0.025748001, + 0.16211228, + 0.01745248, + 0.017452491, + 0.026877586, + 2.417227f-7], + linf=Float32[0.2210092, + 0.28798974, + 0.28799006, + 0.20858109, + 0.8812673, + 0.09208107, + 0.09208131, + 0.14795369, + 2.2078211f-6]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_trixi.jl b/test/test_trixi.jl index be3521ed16a..024f3654ea0 100644 --- a/test/test_trixi.jl +++ b/test/test_trixi.jl @@ -15,6 +15,7 @@ are compared approximately against these reference values, using `atol, rtol` as absolute/relative tolerance. """ macro test_trixi_include(elixir, args...) + # Note: The variables below are just Symbols, not actual errors/types local l2 = get_kwarg(args, :l2, nothing) local linf = get_kwarg(args, :linf, nothing) local RealT = get_kwarg(args, :RealT, :Float64) @@ -24,6 +25,12 @@ macro test_trixi_include(elixir, args...) elseif RealT === :Float32 atol_default = 500 * eps(Float32) rtol_default = sqrt(eps(Float32)) + elseif RealT === :Float128 + atol_default = 500 * eps(Float128) + rtol_default = sqrt(eps(Float128)) + elseif RealT === :Double64 + atol_default = 500 * eps(Double64) + rtol_default = sqrt(eps(Double64)) end local atol = get_kwarg(args, :atol, atol_default) local rtol = get_kwarg(args, :rtol, rtol_default) @@ -167,7 +174,10 @@ macro test_nowarn_mod(expr, additional_ignore_content = String[]) r"WARNING: Method definition .* in module .* at .* overwritten .*.\n", # Warnings from third party packages r"┌ Warning: Problem status ALMOST_INFEASIBLE; solution may be inaccurate.\n└ @ Convex ~/.julia/packages/Convex/.*\n", - r"┌ Warning: Problem status ALMOST_OPTIMAL; solution may be inaccurate.\n└ @ Convex ~/.julia/packages/Convex/.*\n"] + r"┌ Warning: Problem status ALMOST_OPTIMAL; solution may be inaccurate.\n└ @ Convex ~/.julia/packages/Convex/.*\n", + # Warnings for higher-precision floating data types + r"┌ Warning: #= /home/runner/work/Trixi.jl/Trixi.jl/src/solvers/dgsem/interpolation.jl:118 =#:\n│ `LoopVectorization.check_args` on your inputs failed; running fallback `@inbounds @fastmath` loop instead.\n│ Use `warn_check_args=false`, e.g. `@turbo warn_check_args=false ...`, to disable this warning.\n└ @ Trixi ~/.julia/packages/LoopVectorization/tIJUA/src/condense_loopset.jl:1166\n", + r"┌ Warning: #= /home/runner/work/Trixi.jl/Trixi.jl/src/solvers/dgsem/interpolation.jl:136 =#:\n│ `LoopVectorization.check_args` on your inputs failed; running fallback `@inbounds @fastmath` loop instead.\n│ Use `warn_check_args=false`, e.g. `@turbo warn_check_args=false ...`, to disable this warning.\n└ @ Trixi ~/.julia/packages/LoopVectorization/tIJUA/src/condense_loopset.jl:1166\n"] append!(ignore_content, $additional_ignore_content) for pattern in ignore_content stderr_content = replace(stderr_content, pattern => "") diff --git a/test/test_unit.jl b/test/test_unit.jl index cd219050555..97e32726ca7 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -268,6 +268,12 @@ end @timed_testset "interpolation" begin @testset "nodes and weights" begin @test Trixi.gauss_nodes_weights(1) == ([0.0], [2.0]) + + @test Trixi.gauss_nodes_weights(2)[1] ≈ [-1 / sqrt(3), 1 / sqrt(3)] + @test Trixi.gauss_nodes_weights(2)[2] == [1.0, 1.0] + + @test Trixi.gauss_nodes_weights(3)[1] ≈ [-sqrt(3 / 5), 0.0, sqrt(3 / 5)] + @test Trixi.gauss_nodes_weights(3)[2] ≈ [5 / 9, 8 / 9, 5 / 9] end @testset "multiply_dimensionwise" begin diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index c433338a238..fed71e49f0b 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -335,8 +335,8 @@ end ], linf=[ Float32(2.776782342826098), - 3.2162943f0, # this needs to be adapted - 3.6683278f0, # this needed to be adapted + 3.2116454f0, # this needs to be adapted + 3.6616623f0, # this needed to be adapted Float32(2.052861364219655) ], tspan=(0.0f0, 0.25f0), From 09d5eb027327dbcb3b04578fe0bfea99feac34c9 Mon Sep 17 00:00:00 2001 From: Manuel Torrilhon Date: Mon, 2 Dec 2024 23:34:25 +0100 Subject: [PATCH 17/41] Collaborating Institutes (#2187) * research groups in readme file * research groups in doc/src/index.md * Update Andrew's affiliation Co-authored-by: Andrew Winters * Remove temporary changelog file to avoid non-unique header link --------- Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Andrew Winters --- README.md | 21 ++++++++++++++++++++- docs/make.jl | 2 ++ docs/src/index.md | 33 +++++++++++++++++++++++++++------ 3 files changed, 49 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 5b58612bb09..543a0339e41 100644 --- a/README.md +++ b/README.md @@ -261,6 +261,25 @@ To get in touch with the developers, [join us on Slack](https://join.slack.com/t/trixi-framework/shared_invite/zt-sgkc6ppw-6OXJqZAD5SPjBYqLd8MU~g) or [create an issue](https://github.com/trixi-framework/Trixi.jl/issues/new). +## Participating research groups +Participating research groups in alphabetical order: + +[Applied and Computational Mathematics, RWTH Aachen University :de:](https://www.acom.rwth-aachen.de) + +[Applied Mathematics, Department of Mathematics, University of Hamburg :de:](https://www.math.uni-hamburg.de/en/forschung/bereiche/am.html) + +[Division of Applied Mathematics, Department of Mathematics, Linköping University :sweden:](https://liu.se/en/employee/andwi94) + +[Computational and Applied Mathematics, Rice University :us:](https://jlchan.github.io/) + +[High-Performance Computing, Institute of Software Technology, German Aerospace Center (DLR) :de:](https://www.dlr.de/en/sc/about-us/departments/high-performance-computing) + +[High-Performance Scientific Computing, University of Augsburg :de:](https://hpsc.math.uni-augsburg.de) + +[Numerical Mathematics, Institute of Mathematics, Johannes Gutenberg University Mainz :de:](https://ranocha.de) + +[Numerical Simulation, Department of Mathematics and Computer Science, University of Cologne :de:](https://www.mi.uni-koeln.de/NumSim/) + ## Acknowledgments