diff --git a/.github/workflows/CacheNotebooks.yml b/.github/workflows/CacheNotebooks.yml
index c8599d13f26..f89560d8158 100644
--- a/.github/workflows/CacheNotebooks.yml
+++ b/.github/workflows/CacheNotebooks.yml
@@ -11,7 +11,7 @@ jobs:
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- name: Checkout caching branch
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
ref: tutorial_notebooks
diff --git a/.github/workflows/DocPreviewCleanup.yml b/.github/workflows/DocPreviewCleanup.yml
index 66d0b342b2e..0850369c9cc 100644
--- a/.github/workflows/DocPreviewCleanup.yml
+++ b/.github/workflows/DocPreviewCleanup.yml
@@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout gh-pages branch
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
ref: gh-pages
diff --git a/.github/workflows/Documenter.yml b/.github/workflows/Documenter.yml
index 6b557960c89..129c41a3b5c 100644
--- a/.github/workflows/Documenter.yml
+++ b/.github/workflows/Documenter.yml
@@ -33,7 +33,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v1
with:
version: '1.9'
diff --git a/.github/workflows/FormatCheck.yml b/.github/workflows/FormatCheck.yml
index 628d938dd76..ce46360b832 100644
--- a/.github/workflows/FormatCheck.yml
+++ b/.github/workflows/FormatCheck.yml
@@ -20,7 +20,7 @@ jobs:
with:
version: ${{ matrix.julia-version }}
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- name: Install JuliaFormatter and format
# This will use the latest version by default but you can set the version like so:
#
diff --git a/.github/workflows/Invalidations.yml b/.github/workflows/Invalidations.yml
index ba81f83e0ad..18048d26be8 100644
--- a/.github/workflows/Invalidations.yml
+++ b/.github/workflows/Invalidations.yml
@@ -19,12 +19,12 @@ jobs:
- uses: julia-actions/setup-julia@v1
with:
version: '1'
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-invalidations@v1
id: invs_pr
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
ref: ${{ github.event.repository.default_branch }}
- uses: julia-actions/julia-buildpkg@v1
diff --git a/.github/workflows/ReviewChecklist.yml b/.github/workflows/ReviewChecklist.yml
index 959a04752d7..d8854411804 100644
--- a/.github/workflows/ReviewChecklist.yml
+++ b/.github/workflows/ReviewChecklist.yml
@@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Add review checklist
uses: trixi-framework/add-pr-review-checklist@v1
with:
diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml
index a06121e7ca1..eae6d8e0be9 100644
--- a/.github/workflows/SpellCheck.yml
+++ b/.github/workflows/SpellCheck.yml
@@ -8,6 +8,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Actions Repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Check spelling
- uses: crate-ci/typos@v1.16.9
+ uses: crate-ci/typos@v1.16.15
diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml
index c5c95558c8c..2ea30d6fddb 100644
--- a/.github/workflows/benchmark.yml
+++ b/.github/workflows/benchmark.yml
@@ -15,7 +15,7 @@ jobs:
arch:
- x64
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: 0
- run: |
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 4790f93d913..cf8107736e9 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -101,7 +101,7 @@ jobs:
arch: x64
trixi_test: threaded
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v1
with:
version: ${{ matrix.version }}
@@ -175,7 +175,7 @@ jobs:
# Instead, we use the more tedious approach described above.
# At first, we check out the repository and download all artifacts
# (and list files for debugging).
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
- uses: actions/download-artifact@v3
- run: ls -R
# Next, we merge the individual coverage files and upload
diff --git a/NEWS.md b/NEWS.md
index 4b96e1e2834..0c78484a782 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -12,6 +12,7 @@ for human readability.
- Non-uniform `TreeMesh` available for hyperbolic-parabolic equations.
- Capability to set truly discontinuous initial conditions in 1D.
- Wetting and drying feature and examples for 1D and 2D shallow water equations
+- Implementation of the polytropic Euler equations in 2D
- Subcell positivity limiting support for conservative variables in 2D for `TreeMesh`
#### Changed
diff --git a/Project.toml b/Project.toml
index e14dbcd0c03..4efbc40b098 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "Trixi"
uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb"
authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "]
-version = "0.5.41-pre"
+version = "0.5.46-pre"
[deps]
CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"
@@ -56,7 +56,7 @@ DiffEqCallbacks = "2.25"
EllipsisNotation = "1.0"
FillArrays = "0.13.2, 1"
ForwardDiff = "0.10.18"
-HDF5 = "0.14, 0.15, 0.16"
+HDF5 = "0.14, 0.15, 0.16, 0.17"
IfElse = "0.1"
LinearMaps = "2.7, 3.0"
LoopVectorization = "0.12.118"
@@ -71,7 +71,7 @@ PrecompileTools = "1.1"
RecipesBase = "1.1"
Reexport = "1.0"
Requires = "1.1"
-SciMLBase = "1.90"
+SciMLBase = "1.90, 2"
Setfield = "0.8, 1"
SimpleUnPack = "1.1"
StartUpDG = "0.17"
diff --git a/README.md b/README.md
index 63540b1f640..673708d8b89 100644
--- a/README.md
+++ b/README.md
@@ -7,6 +7,7 @@
[![Build Status](https://github.com/trixi-framework/Trixi.jl/workflows/CI/badge.svg)](https://github.com/trixi-framework/Trixi.jl/actions?query=workflow%3ACI)
[![Codecov](https://codecov.io/gh/trixi-framework/Trixi.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/trixi-framework/Trixi.jl)
[![Coveralls](https://coveralls.io/repos/github/trixi-framework/Trixi.jl/badge.svg?branch=main)](https://coveralls.io/github/trixi-framework/Trixi.jl?branch=main)
+[![Aqua QA](https://raw.githubusercontent.com/JuliaTesting/Aqua.jl/master/badge.svg)](https://github.com/JuliaTesting/Aqua.jl)
[![License: MIT](https://img.shields.io/badge/License-MIT-success.svg)](https://opensource.org/licenses/MIT)
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3996439.svg)](https://doi.org/10.5281/zenodo.3996439)
[![Downloads](https://shields.io/endpoint?url=https://pkgs.genieframework.com/api/v1/badge/Trixi)](https://pkgs.genieframework.com?packages=Trixi)
@@ -17,16 +18,6 @@
-***
-**Trixi.jl at JuliaCon 2023**
-At this year's JuliaCon, we will be present with an online contribution that involves Trixi.jl:
-
-* [Scaling Trixi.jl to more than 10,000 cores using MPI](https://pretalx.com/juliacon2023/talk/PC8PZ8/),
- 27th July 2023, 10:30–11:30 (US/Eastern), 32-G449 (Kiva)
-
-We are looking forward to seeing you there ♥️
-***
-
**Trixi.jl** is a numerical simulation framework for hyperbolic conservation
laws written in [Julia](https://julialang.org). A key objective for the
framework is to be useful to both scientists and students. Therefore, next to
diff --git a/docs/Project.toml b/docs/Project.toml
index 9fc974d6f38..ffa86e0b9f7 100644
--- a/docs/Project.toml
+++ b/docs/Project.toml
@@ -13,7 +13,7 @@ Trixi2Vtk = "bc1476a1-1ca6-4cc3-950b-c312b255ff95"
[compat]
CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10"
-Documenter = "0.27"
+Documenter = "1"
ForwardDiff = "0.10"
HOHQMesh = "0.1, 0.2"
LaTeXStrings = "1.2"
diff --git a/docs/literate/make.jl b/docs/literate/make.jl
index b620f85c975..a04d8a0b333 100644
--- a/docs/literate/make.jl
+++ b/docs/literate/make.jl
@@ -51,12 +51,25 @@ function create_tutorials(files)
# Run tests on all tutorial files
@testset "TrixiTutorials" begin
for (i, (title, filename)) in enumerate(files)
+ # Evaluate each tutorial in its own module to avoid leaking of
+ # function/variable names, polluting the namespace of later tutorials
+ # by stuff defined in earlier tutorials.
if filename isa Vector # Several files of one topic
for j in eachindex(filename)
- @testset "$(filename[j][2][2])" begin include(joinpath(repo_src, filename[j][2][1], filename[j][2][2])) end
+ mod = gensym(filename[j][2][2])
+ @testset "$(filename[j][2][2])" begin
+ @eval module $mod
+ include(joinpath($repo_src, $(filename[j][2][1]), $(filename[j][2][2])))
+ end
+ end
end
else # Single files
- @testset "$title" begin include(joinpath(repo_src, filename)) end
+ mod = gensym(title)
+ @testset "$title" begin
+ @eval module $mod
+ include(joinpath($repo_src, $filename))
+ end
+ end
end
end
end
diff --git a/docs/literate/src/files/DGMulti_1.jl b/docs/literate/src/files/DGMulti_1.jl
index 0d78e79907c..5ef577e8eeb 100644
--- a/docs/literate/src/files/DGMulti_1.jl
+++ b/docs/literate/src/files/DGMulti_1.jl
@@ -194,3 +194,15 @@ plot(pd["rho"])
plot!(getmesh(pd))
# For more information, please have a look in the [StartUpDG.jl documentation](https://jlchan.github.io/StartUpDG.jl/stable/).
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "StartUpDG", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/DGMulti_2.jl b/docs/literate/src/files/DGMulti_2.jl
index 92dce43cdab..06248562343 100644
--- a/docs/literate/src/files/DGMulti_2.jl
+++ b/docs/literate/src/files/DGMulti_2.jl
@@ -38,3 +38,15 @@ D = couple_continuously(legendre_derivative_operator(xmin=0.0, xmax=1.0, N=4),
# For more information and other SBP operators, see the documentations of [StartUpDG.jl](https://jlchan.github.io/StartUpDG.jl/dev/)
# and [SummationByPartsOperators.jl](https://ranocha.de/SummationByPartsOperators.jl/stable/).
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "StartUpDG", "SummationByPartsOperators"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/DGSEM_FluxDiff.jl b/docs/literate/src/files/DGSEM_FluxDiff.jl
index 5ec156ebbe3..a5769900269 100644
--- a/docs/literate/src/files/DGSEM_FluxDiff.jl
+++ b/docs/literate/src/files/DGSEM_FluxDiff.jl
@@ -236,3 +236,15 @@ plot(sol)
# [`flux_chandrashekar`](@ref), [`flux_kennedy_gruber`](@ref).
# As surface flux you can use all volume fluxes and additionally for instance [`flux_lax_friedrichs`](@ref),
# [`flux_hll`](@ref), [`flux_hllc`](@ref).
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/adaptive_mesh_refinement.jl b/docs/literate/src/files/adaptive_mesh_refinement.jl
index d6150e887a8..46af8f79523 100644
--- a/docs/literate/src/files/adaptive_mesh_refinement.jl
+++ b/docs/literate/src/files/adaptive_mesh_refinement.jl
@@ -202,3 +202,15 @@ plot!(getmesh(pd))
# Source: Trixi.jl's YouTube channel [`Trixi Framework`](https://www.youtube.com/channel/UCpd92vU2HjjTPup-AIN0pkg)
# For more information, please have a look at the respective links.
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/adding_new_parabolic_terms.jl b/docs/literate/src/files/adding_new_parabolic_terms.jl
index 882f73f66ff..f5c2b815f33 100644
--- a/docs/literate/src/files/adding_new_parabolic_terms.jl
+++ b/docs/literate/src/files/adding_new_parabolic_terms.jl
@@ -158,3 +158,15 @@ sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol,
using Plots
plot(sol)
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
+
diff --git a/docs/literate/src/files/adding_new_scalar_equations.jl b/docs/literate/src/files/adding_new_scalar_equations.jl
index fec7bcf667a..a65b4de7f1a 100644
--- a/docs/literate/src/files/adding_new_scalar_equations.jl
+++ b/docs/literate/src/files/adding_new_scalar_equations.jl
@@ -211,3 +211,15 @@ semi = remake(semi, solver=DGSEM(3, flux_godunov, VolumeIntegralFluxDifferencing
ode = semidiscretize(semi, (0.0, 0.5))
sol = solve(ode, SSPRK43(); ode_default_options()...)
plot(sol)
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/adding_nonconservative_equation.jl b/docs/literate/src/files/adding_nonconservative_equation.jl
index 110fa486070..b40e21fb11a 100644
--- a/docs/literate/src/files/adding_nonconservative_equation.jl
+++ b/docs/literate/src/files/adding_nonconservative_equation.jl
@@ -288,3 +288,15 @@ sol = solve(ode, Tsit5(), abstol=1.0e-6, reltol=1.0e-6,
## Plot the numerical solution at the final time
using Plots: plot
plot(sol);
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/custom_semidiscretization.jl b/docs/literate/src/files/custom_semidiscretization.jl
new file mode 100644
index 00000000000..fd432fb0826
--- /dev/null
+++ b/docs/literate/src/files/custom_semidiscretization.jl
@@ -0,0 +1,324 @@
+#src # Custom semidiscretizations
+
+# As described in the [overview section](@ref overview-semidiscretizations),
+# semidiscretizations are high-level descriptions of spatial discretizations
+# in Trixi.jl. Trixi.jl's main focus is on hyperbolic conservation
+# laws represented in a [`SemidiscretizationHyperbolic`](@ref).
+# Hyperbolic-parabolic problems based on the advection-diffusion equation or
+# the compressible Navier-Stokes equations can be represented in a
+# [`SemidiscretizationHyperbolicParabolic`](@ref). This is described in the
+# [basic tutorial on parabolic terms](@ref parabolic_terms) and its extension to
+# [custom parabolic terms](@ref adding_new_parabolic_terms).
+# In this tutorial, we will describe how these semidiscretizations work and how
+# they can be used to create custom semidiscretizations involving also other tasks.
+
+
+# ## Overview of the right-hand side evaluation
+
+# The semidiscretizations provided by Trixi.jl are set up to create `ODEProblem`s from the
+# [SciML ecosystem for ordinary differential equations](https://diffeq.sciml.ai/latest/).
+# In particular, a spatial semidiscretization can be wrapped in an ODE problem
+# using [`semidiscretize`](@ref), which returns an `ODEProblem`. This `ODEProblem`
+# bundles an initial condition, a right-hand side (RHS) function, the time span,
+# and possible parameters. The `ODEProblem`s created by Trixi.jl use the semidiscretization
+# passed to [`semidiscretize`](@ref) as a parameter.
+# For a [`SemidiscretizationHyperbolic`](@ref), the `ODEProblem` wraps
+# `Trixi.rhs!` as ODE RHS.
+# For a [`SemidiscretizationHyperbolicParabolic`](@ref), Trixi.jl
+# uses a `SplitODEProblem` combining `Trixi.rhs_parabolic!` for the
+# (potentially) stiff part and `Trixi.rhs!` for the other part.
+
+
+# ## Standard Trixi.jl setup
+
+# In this tutorial, we will consider the linear advection equation
+# with source term
+# ```math
+# \partial_t u(t,x) + \partial_x u(t,x) = -\exp(-t) \sin\bigl(\pi (x - t) \bigr)
+# ```
+# with periodic boundary conditions in the domain `[-1, 1]` as a
+# model problem.
+# The initial condition is
+# ```math
+# u(0,x) = \sin(\pi x).
+# ```
+# The source term results in some damping and the analytical solution
+# ```math
+# u(t,x) = \exp(-t) \sin\bigl(\pi (x - t) \bigr).
+# ```
+# First, we discretize this equation using the standard functionality
+# of Trixi.jl.
+
+using Trixi, OrdinaryDiffEq, Plots
+
+# The linear scalar advection equation is already implemented in
+# Trixi.jl as [`LinearScalarAdvectionEquation1D`](@ref). We construct
+# it with an advection velocity `1.0`.
+
+equations = LinearScalarAdvectionEquation1D(1.0)
+
+# Next, we use a standard [`DGSEM`](@ref) solver.
+
+solver = DGSEM(polydeg = 3)
+
+# We create a simple [`TreeMesh`](@ref) in 1D.
+
+coordinates_min = (-1.0,)
+coordinates_max = (+1.0,)
+mesh = TreeMesh(coordinates_min, coordinates_max;
+ initial_refinement_level = 4,
+ n_cells_max = 10^4,
+ periodicity = true)
+
+# We wrap everything in in a semidiscretization and pass the source
+# terms as a standard Julia function. Please note that Trixi.jl uses
+# `SVector`s from
+# [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl)
+# to store the conserved variables `u`. Thus, the return value of the
+# source terms must be wrapped in an `SVector` - even if we consider
+# just a scalar problem.
+
+function initial_condition(x, t, equations)
+ return SVector(exp(-t) * sinpi(x[1] - t))
+end
+
+function source_terms_standard(u, x, t, equations)
+ return -initial_condition(x, t, equations)
+end
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition,
+ solver;
+ source_terms = source_terms_standard)
+
+# Now, we can create the `ODEProblem`, solve the resulting ODE
+# using a time integration method from
+# [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl),
+# and visualize the numerical solution at the final time using
+# [Plots.jl](https://github.com/JuliaPlots/Plots.jl).
+
+tspan = (0.0, 3.0)
+ode = semidiscretize(semi, tspan)
+
+sol = solve(ode, RDPK3SpFSAL49(); ode_default_options()...)
+
+plot(sol; label = "numerical sol.", legend = :topright)
+
+# We can also plot the analytical solution for comparison.
+# Since Trixi.jl uses `SVector`s for the variables, we take their `first`
+# (and only) component to get the scalar value for manual plotting.
+
+let
+ x = range(-1.0, 1.0; length = 200)
+ plot!(x, first.(initial_condition.(x, sol.t[end], equations)),
+ label = "analytical sol.", linestyle = :dash, legend = :topright)
+end
+
+# We can also add the initial condition to the plot.
+
+plot!(sol.u[1], semi, label = "u0", linestyle = :dot, legend = :topleft)
+
+# You can of course also use some
+# [callbacks](https://trixi-framework.github.io/Trixi.jl/stable/callbacks/)
+# provided by Trixi.jl as usual.
+
+summary_callback = SummaryCallback()
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi; interval = analysis_interval)
+alive_callback = AliveCallback(; analysis_interval)
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback)
+
+sol = solve(ode, RDPK3SpFSAL49();
+ ode_default_options()..., callback = callbacks)
+summary_callback()
+
+
+# ## Using a custom ODE right-hand side function
+
+# Next, we will solve the same problem but use our own ODE RHS function.
+# To demonstrate this, we will artificially create a global variable
+# containing the current time of the simulation.
+
+const GLOBAL_TIME = Ref(0.0)
+
+function source_terms_custom(u, x, t, equations)
+ t = GLOBAL_TIME[]
+ return -initial_condition(x, t, equations)
+end
+
+# Next, we create our own RHS function to update the global time of
+# the simulation before calling the RHS function from Trixi.jl.
+
+function rhs_source_custom!(du_ode, u_ode, semi, t)
+ GLOBAL_TIME[] = t
+ Trixi.rhs!(du_ode, u_ode, semi, t)
+end
+
+# Next, we create an `ODEProblem` manually copying over the data from
+# the one we got from [`semidiscretize`](@ref) earlier.
+
+ode_source_custom = ODEProblem(rhs_source_custom!,
+ ode.u0,
+ ode.tspan,
+ ode.p #= semi =#)
+sol_source_custom = solve(ode_source_custom, RDPK3SpFSAL49();
+ ode_default_options()...)
+
+plot(sol_source_custom; label = "numerical sol.")
+let
+ x = range(-1.0, 1.0; length = 200)
+ plot!(x, first.(initial_condition.(x, sol_source_custom.t[end], equations)),
+ label = "analytical sol.", linestyle = :dash, legend = :topleft)
+end
+plot!(sol_source_custom.u[1], semi, label = "u0", linestyle = :dot, legend = :topleft)
+
+# This also works with callbacks as usual.
+
+summary_callback = SummaryCallback()
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi; interval = analysis_interval)
+alive_callback = AliveCallback(; analysis_interval)
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback)
+
+sol = solve(ode_source_custom, RDPK3SpFSAL49();
+ ode_default_options()..., callback = callbacks)
+summary_callback()
+
+
+# ## Setting up a custom semidiscretization
+
+# Using a global constant is of course not really nice from a software
+# engineering point of view. Thus, it can often be useful to collect
+# additional data in the parameters of the `ODEProblem`. Thus, it is
+# time to create our own semidiscretization. Here, we create a small
+# wrapper of a standard semidiscretization of Trixi.jl and the current
+# global time of the simulation.
+
+struct CustomSemidiscretization{Semi, T} <: Trixi.AbstractSemidiscretization
+ semi::Semi
+ t::T
+end
+
+semi_custom = CustomSemidiscretization(semi, Ref(0.0))
+
+# To get pretty printing in the REPL, you can consider specializing
+#
+# - `Base.show(io::IO, parameters::CustomSemidiscretization)`
+# - `Base.show(io::IO, ::MIME"text/plain", parameters::CustomSemidiscretization)`
+#
+# for your custom semidiscretiation.
+
+# Next, we create our own source terms that use the global time stored
+# in the custom semidiscretiation.
+
+source_terms_custom_semi = let semi_custom = semi_custom
+ function source_terms_custom_semi(u, x, t, equations)
+ t = semi_custom.t[]
+ return -initial_condition(x, t, equations)
+ end
+end
+
+# We also create a custom ODE RHS to update the current global time
+# stored in the custom semidiscretization. We unpack the standard
+# semidiscretization created by Trixi.jl and pass it to `Trixi.rhs!`.
+
+function rhs_semi_custom!(du_ode, u_ode, semi_custom, t)
+ semi_custom.t[] = t
+ Trixi.rhs!(du_ode, u_ode, semi_custom.semi, t)
+end
+
+# Finally, we set up an `ODEProblem` and solve it numerically.
+
+ode_semi_custom = ODEProblem(rhs_semi_custom!,
+ ode.u0,
+ ode.tspan,
+ semi_custom)
+sol_semi_custom = solve(ode_semi_custom, RDPK3SpFSAL49();
+ ode_default_options()...)
+
+# If we want to make use of additional functionality provided by
+# Trixi.jl, e.g., for plotting, we need to implement a few additional
+# specializations. In this case, we forward everything to the standard
+# semidiscretization provided by Trixi.jl wrapped in our custom
+# semidiscretization.
+
+Base.ndims(semi::CustomSemidiscretization) = ndims(semi.semi)
+function Trixi.mesh_equations_solver_cache(semi::CustomSemidiscretization)
+ Trixi.mesh_equations_solver_cache(semi.semi)
+end
+
+# Now, we can plot the numerical solution as usual.
+
+plot(sol_semi_custom; label = "numerical sol.")
+let
+ x = range(-1.0, 1.0; length = 200)
+ plot!(x, first.(initial_condition.(x, sol_semi_custom.t[end], equations)),
+ label = "analytical sol.", linestyle = :dash, legend = :topleft)
+end
+plot!(sol_semi_custom.u[1], semi, label = "u0", linestyle = :dot, legend = :topleft)
+
+# This also works with many callbacks as usual. However, the
+# [`AnalysisCallback`](@ref) requires some special handling since it
+# makes use of a performance counter contained in the standard
+# semidiscretizations of Trixi.jl to report some
+# [performance metrics](@ref performance-metrics).
+# Here, we forward all accesses to the performance counter to the
+# wrapped semidiscretization.
+
+function Base.getproperty(semi::CustomSemidiscretization, s::Symbol)
+ if s === :performance_counter
+ wrapped_semi = getfield(semi, :semi)
+ wrapped_semi.performance_counter
+ else
+ getfield(semi, s)
+ end
+end
+
+# Moreover, the [`AnalysisCallback`](@ref) also performs some error
+# calculations. We also need to forward them to the wrapped
+# semidiscretization.
+
+function Trixi.calc_error_norms(func, u, t, analyzer,
+ semi::CustomSemidiscretization,
+ cache_analysis)
+ Trixi.calc_error_norms(func, u, t, analyzer,
+ semi.semi,
+ cache_analysis)
+end
+
+# Now, we can work with the callbacks used before as usual.
+
+summary_callback = SummaryCallback()
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi_custom;
+ interval = analysis_interval)
+alive_callback = AliveCallback(; analysis_interval)
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback)
+
+sol = solve(ode_semi_custom, RDPK3SpFSAL49();
+ ode_default_options()..., callback = callbacks)
+summary_callback()
+
+# For even more advanced usage of custom semidiscretizations, you
+# may look at the source code of the ones contained in Trixi.jl, e.g.,
+# - [`SemidiscretizationHyperbolicParabolic`](@ref)
+# - [`SemidiscretizationEulerGravity`](@ref)
+# - [`SemidiscretizationEulerAcoustics`](@ref)
+# - [`SemidiscretizationCoupled`](@ref)
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/differentiable_programming.jl b/docs/literate/src/files/differentiable_programming.jl
index 5c5a7cd7440..33427803afc 100644
--- a/docs/literate/src/files/differentiable_programming.jl
+++ b/docs/literate/src/files/differentiable_programming.jl
@@ -446,3 +446,15 @@ scatter(real.(λ), imag.(λ))
λ = eigvals(Matrix(A))
relative_maximum = maximum(real, λ) / maximum(abs, λ)
@test relative_maximum < 1.0e-15 #src
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots", "ForwardDiff"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/hohqmesh_tutorial.jl b/docs/literate/src/files/hohqmesh_tutorial.jl
index 87076108d91..b19d363c4bf 100644
--- a/docs/literate/src/files/hohqmesh_tutorial.jl
+++ b/docs/literate/src/files/hohqmesh_tutorial.jl
@@ -566,3 +566,15 @@ mesh = UnstructuredMesh2D(mesh_file);
# for details.
# ![simulation_straight_sides_p4est_amr](https://user-images.githubusercontent.com/74359358/168049930-8abce6ac-cd47-4d04-b40b-0fa459bbd98d.png)
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots", "Trixi2Vtk", "HOHQMesh"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/index.jl b/docs/literate/src/files/index.jl
index 0c8de66bf42..d42695611f6 100644
--- a/docs/literate/src/files/index.jl
+++ b/docs/literate/src/files/index.jl
@@ -76,21 +76,30 @@
# In this part, another physics model is implemented, the nonconservative linear advection equation.
# We run two different simulations with different levels of refinement and compare the resulting errors.
-# ### [10 Adaptive mesh refinement](@ref adaptive_mesh_refinement)
+# ### [10 Parabolic terms](@ref parabolic_terms)
+#-
+# This tutorial describes how parabolic terms are implemented in Trixi.jl, e.g.,
+# to solve the advection-diffusion equation.
+
+# ### [11 Adding new parabolic terms](@ref adding_new_parabolic_terms)
+#-
+# This tutorial describes how new parabolic terms can be implemented using Trixi.jl.
+
+# ### [12 Adaptive mesh refinement](@ref adaptive_mesh_refinement)
#-
# Adaptive mesh refinement (AMR) helps to increase the accuracy in sensitive or turbolent regions while
# not wasting resources for less interesting parts of the domain. This leads to much more efficient
# simulations. This tutorial presents the implementation strategy of AMR in Trixi.jl, including the use of
# different indicators and controllers.
-# ### [11 Structured mesh with curvilinear mapping](@ref structured_mesh_mapping)
+# ### [13 Structured mesh with curvilinear mapping](@ref structured_mesh_mapping)
#-
# In this tutorial, the use of Trixi.jl's structured curved mesh type [`StructuredMesh`](@ref) is explained.
# We present the two basic option to initialize such a mesh. First, the curved domain boundaries
# of a circular cylinder are set by explicit boundary functions. Then, a fully curved mesh is
# defined by passing the transformation mapping.
-# ### [12 Unstructured meshes with HOHQMesh.jl](@ref hohqmesh_tutorial)
+# ### [14 Unstructured meshes with HOHQMesh.jl](@ref hohqmesh_tutorial)
#-
# The purpose of this tutorial is to demonstrate how to use the [`UnstructuredMesh2D`](@ref)
# functionality of Trixi.jl. This begins by running and visualizing an available unstructured
@@ -99,19 +108,24 @@
# software in the Trixi.jl ecosystem, and then run a simulation using Trixi.jl on said mesh.
# In the end, the tutorial briefly explains how to simulate an example using AMR via `P4estMesh`.
-# ### [13 Explicit time stepping](@ref time_stepping)
+# ### [15 Explicit time stepping](@ref time_stepping)
#-
# This tutorial is about time integration using [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl).
# It explains how to use their algorithms and presents two types of time step choices - with error-based
# and CFL-based adaptive step size control.
-# ### [14 Differentiable programming](@ref differentiable_programming)
+# ### [16 Differentiable programming](@ref differentiable_programming)
#-
# This part deals with some basic differentiable programming topics. For example, a Jacobian, its
# eigenvalues and a curve of total energy (through the simulation) are calculated and plotted for
# a few semidiscretizations. Moreover, we calculate an example for propagating errors with Measurement.jl
# at the end.
+# ### [17 Custom semidiscretization](@ref custom_semidiscretization)
+#-
+# This tutorial describes the [semidiscretiations](@ref overview-semidiscretizations) of Trixi.jl
+# and explains how to extend them for custom tasks.
+
# ## Examples in Trixi.jl
diff --git a/docs/literate/src/files/non_periodic_boundaries.jl b/docs/literate/src/files/non_periodic_boundaries.jl
index 54da88a64aa..7ed6324ff99 100644
--- a/docs/literate/src/files/non_periodic_boundaries.jl
+++ b/docs/literate/src/files/non_periodic_boundaries.jl
@@ -155,3 +155,15 @@ end
#
# ```
# Source: [`Video`](https://www.youtube.com/watch?v=w0A9X38cSe4) on Trixi.jl's YouTube channel [`Trixi Framework`](https://www.youtube.com/watch?v=WElqqdMhY4A)
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/parabolic_terms.jl b/docs/literate/src/files/parabolic_terms.jl
index bac0098f8e9..d0a355bbc19 100644
--- a/docs/literate/src/files/parabolic_terms.jl
+++ b/docs/literate/src/files/parabolic_terms.jl
@@ -86,3 +86,14 @@ sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol,
using Plots
plot(sol)
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/scalar_linear_advection_1d.jl b/docs/literate/src/files/scalar_linear_advection_1d.jl
index 42c831c98ba..77ba7b087cc 100644
--- a/docs/literate/src/files/scalar_linear_advection_1d.jl
+++ b/docs/literate/src/files/scalar_linear_advection_1d.jl
@@ -511,3 +511,15 @@ sol_trixi = solve(ode_trixi, RDPK3SpFSAL49(); abstol=1.0e-6, reltol=1.0e-6, ode
plot!(sol_trixi, label="solution at t=$(tspan[2]) with Trixi.jl", legend=:topleft, linestyle=:dash, lw=2)
@test maximum(abs.(vec(u0) - sol_trixi.u[end])) ≈ maximum(abs.(u0 - sol.u[end])) #src
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/shock_capturing.jl b/docs/literate/src/files/shock_capturing.jl
index afa34cbf06a..dd6698c2a86 100644
--- a/docs/literate/src/files/shock_capturing.jl
+++ b/docs/literate/src/files/shock_capturing.jl
@@ -224,3 +224,15 @@ sol = solve(ode, CarpenterKennedy2N54(stage_limiter!, williamson_condition=false
using Plots
plot(sol)
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/structured_mesh_mapping.jl b/docs/literate/src/files/structured_mesh_mapping.jl
index 0ae9cf723f8..c8da30bc2bf 100644
--- a/docs/literate/src/files/structured_mesh_mapping.jl
+++ b/docs/literate/src/files/structured_mesh_mapping.jl
@@ -201,3 +201,15 @@ plot!(getmesh(pd))
# unstructured mesh type [`UnstructuredMesh2D`] and its use of the
# [High-Order Hex-Quad Mesh (HOHQMesh) generator](https://github.com/trixi-framework/HOHQMesh),
# created and developed by David Kopriva.
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/time_stepping.jl b/docs/literate/src/files/time_stepping.jl
index d400c4a94be..de7a2a83a41 100644
--- a/docs/literate/src/files/time_stepping.jl
+++ b/docs/literate/src/files/time_stepping.jl
@@ -49,7 +49,7 @@
# ```math
# \Delta t_n = \text{CFL} * \min_i \frac{\Delta x_i}{\lambda_{\max}(u_i^n)}
# ```
-# We compute $\Delta x_i$ by scaling the element size by a factor of $1/(N+1)$, cf.
+# We compute $\Delta x_i$ by scaling the element size by a factor of $1/(N+1)$, cf.
# [Gassner and Kopriva (2011)](https://doi.org/10.1137/100807211), Section 5.
# Trixi.jl provides such a CFL-based step size control. It is implemented as the callback
@@ -73,3 +73,15 @@
# You can find simple examples with a CFL-based step size control for instance in the elixirs
# [`elixir_advection_basic.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_basic.jl)
# or [`elixir_euler_source_terms.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_euler_source_terms.jl).
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "OrdinaryDiffEq"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/literate/src/files/upwind_fdsbp.jl b/docs/literate/src/files/upwind_fdsbp.jl
index 36ca1b57404..6d3379fa30d 100644
--- a/docs/literate/src/files/upwind_fdsbp.jl
+++ b/docs/literate/src/files/upwind_fdsbp.jl
@@ -62,3 +62,15 @@ Matrix(D_upw.plus)
# flux vector splitting, e.g.,
# - [`elixir_euler_vortex.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_fdsbp/elixir_euler_vortex.jl)
# - [`elixir_euler_taylor_green_vortex.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_3d_fdsbp/elixir_euler_taylor_green_vortex.jl)
+
+
+# ## Package versions
+
+# These results were obtained using the following versions.
+
+using InteractiveUtils
+versioninfo()
+
+using Pkg
+Pkg.status(["Trixi", "SummationByPartsOperators"],
+ mode=PKGMODE_MANIFEST)
diff --git a/docs/make.jl b/docs/make.jl
index 57629577ddb..df8ac04be12 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -68,6 +68,7 @@ files = [
# Topic: other stuff
"Explicit time stepping" => "time_stepping.jl",
"Differentiable programming" => "differentiable_programming.jl",
+ "Custom semidiscretizations" => "custom_semidiscretization.jl"
]
tutorials = create_tutorials(files)
@@ -76,7 +77,7 @@ makedocs(
# Specify modules for which docstrings should be shown
modules = [Trixi, Trixi2Vtk],
# Set sitename to Trixi.jl
- sitename="Trixi.jl",
+ sitename = "Trixi.jl",
# Provide additional formatting options
format = Documenter.HTML(
# Disable pretty URLs during manual testing
@@ -84,7 +85,8 @@ makedocs(
# Explicitly add favicon as asset
assets = ["assets/favicon.ico"],
# Set canonical URL to GitHub pages URL
- canonical = "https://trixi-framework.github.io/Trixi.jl/stable"
+ canonical = "https://trixi-framework.github.io/Trixi.jl/stable",
+ size_threshold_ignore = ["reference-trixi.md"]
),
# Explicitly specify documentation structure
pages = [
@@ -123,9 +125,8 @@ makedocs(
"Authors" => "authors.md",
"Contributing" => "contributing.md",
"Code of Conduct" => "code_of_conduct.md",
- "License" => "license.md"
- ],
- strict = true # to make the GitHub action fail when doctests fail, see https://github.com/neuropsychology/Psycho.jl/issues/34
+ "License" => "license.md",
+ ]
)
deploydocs(
diff --git a/docs/src/callbacks.md b/docs/src/callbacks.md
index 1d3e5e34b51..f018bcf7c39 100644
--- a/docs/src/callbacks.md
+++ b/docs/src/callbacks.md
@@ -30,7 +30,12 @@ An example elixir using AMR can be found at [`examples/tree_2d_dgsem/elixir_adve
The [`AnalysisCallback`](@ref) can be used to analyze the numerical solution, e.g. calculate
errors or user-specified integrals, and print the results to the screen. The results can also be
saved in a file. An example can be found at [`examples/tree_2d_dgsem/elixir_euler_vortex.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_euler_vortex.jl).
-In [Performance metrics of the `AnalysisCallback`](@ref) you can find a detailed
+Note that the errors (e.g. `L2 error` or `Linf error`) are computed with respect to the initial condition.
+The percentage of the simulation time refers to the ratio of the current time and the final time, i.e. it does
+not consider the maximal number of iterations. So the simulation could finish before 100% are reached.
+Note that, e.g., due to AMR or smaller time step sizes, the simulation can actually take longer than
+the percentage indicates.
+In [Performance metrics of the `AnalysisCallback`](@ref performance-metrics) you can find a detailed
description of the different performance metrics the `AnalysisCallback` computes.
### I/O
@@ -106,7 +111,7 @@ will yield the following plot:
the automated performance measurements, including an output of the recorded timers after a simulation.
* The [`VisualizationCallback`](@ref) can be used for in-situ visualization. See
[Visualizing results during a simulation](@ref).
-* The [`TrivialCallback`](@ref) does nothing and can be used to to easily disable some callbacks
+* The [`TrivialCallback`](@ref) does nothing and can be used to easily disable some callbacks
via [`trixi_include`](@ref).
### Equation-specific callbacks
diff --git a/docs/src/index.md b/docs/src/index.md
index bb2afd1019f..9ffaee26c40 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -7,6 +7,7 @@
[![Build Status](https://github.com/trixi-framework/Trixi.jl/workflows/CI/badge.svg)](https://github.com/trixi-framework/Trixi.jl/actions?query=workflow%3ACI)
[![Codecov](https://codecov.io/gh/trixi-framework/Trixi.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/trixi-framework/Trixi.jl)
[![Coveralls](https://coveralls.io/repos/github/trixi-framework/Trixi.jl/badge.svg?branch=main)](https://coveralls.io/github/trixi-framework/Trixi.jl?branch=main)
+[![Aqua QA](https://raw.githubusercontent.com/JuliaTesting/Aqua.jl/master/badge.svg)](https://github.com/JuliaTesting/Aqua.jl)
[![License: MIT](https://img.shields.io/badge/License-MIT-success.svg)](https://opensource.org/licenses/MIT)
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3996439.svg)](https://doi.org/10.5281/zenodo.3996439)
diff --git a/docs/src/overview.md b/docs/src/overview.md
index 46bc28b6025..9cd11a5df93 100644
--- a/docs/src/overview.md
+++ b/docs/src/overview.md
@@ -16,7 +16,7 @@ to solve a PDE numerically are the spatial semidiscretization and the time
integration scheme.
-## Semidiscretizations
+## [Semidiscretizations](@id overview-semidiscretizations)
Semidiscretizations are high-level descriptions of spatial discretizations
specialized for certain PDEs. Trixi.jl's main focus is on hyperbolic conservation
@@ -55,14 +55,15 @@ different features on different mesh types.
| Element type | line, square, cube | line, quadᵃ, hexᵃ | quadᵃ | quadᵃ, hexᵃ | simplex, quadᵃ, hexᵃ |
| Adaptive mesh refinement | ✅ | ❌ | ❌ | ✅ | ❌ | [`AMRCallback`](@ref)
| Solver type | [`DGSEM`](@ref) | [`DGSEM`](@ref) | [`DGSEM`](@ref) | [`DGSEM`](@ref) | [`DGMulti`](@ref) |
-| Domain | hypercube | mapped hypercube | arbitrary | arbitrary | arbitraryᵇ |
+| Domain | hypercube | mapped hypercube | arbitrary | arbitrary | arbitrary |
| Weak form | ✅ | ✅ | ✅ | ✅ | ✅ | [`VolumeIntegralWeakForm`](@ref)
| Flux differencing | ✅ | ✅ | ✅ | ✅ | ✅ | [`VolumeIntegralFluxDifferencing`](@ref)
| Shock capturing | ✅ | ✅ | ✅ | ✅ | ❌ | [`VolumeIntegralShockCapturingHG`](@ref)
| Nonconservative equations | ✅ | ✅ | ✅ | ✅ | ✅ | e.g., GLM MHD or shallow water equations
+| Parabolic termsᵇ | ✅ | ✅ | ❌ | ✅ | ✅ | e.g., [`CompressibleNavierStokesDiffusion2D`](@ref)
ᵃ: quad = quadrilateral, hex = hexahedron
-ᵇ: curved meshes supported for `SBP` and `GaussSBP` approximation types for `VolumeIntegralFluxDifferencing` solvers on quadrilateral and hexahedral `DGMultiMesh`es (non-conservative terms not yet supported)
+ᵇ: Parabolic terms do not currently support adaptivity.
## Time integration methods
diff --git a/docs/src/parallelization.md b/docs/src/parallelization.md
index 245fdc11852..610aa2cbd95 100644
--- a/docs/src/parallelization.md
+++ b/docs/src/parallelization.md
@@ -22,7 +22,7 @@ julia --threads=4
If both the environment variable and the command line argument are specified at
the same time, the latter takes precedence.
-If you use time integration methods from
+If you use time integration methods from
[OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl)
and want to use multiple threads therein, you need to set the keyword argument
`thread=OrdinaryDiffEq.True()` of the algorithms, as described in the
@@ -143,7 +143,7 @@ To start Trixi.jl in parallel with MPI, there are three options:
Switching between panes can be done by `Ctrl+b` followed by `o`.
As of March 2022, newer versions of tmpi also support mpich, which is the default
backend of MPI.jl (via MPICH_Jll.jl). To use this setup, you need to install
- `mpiexecjl` as described in the
+ `mpiexecjl` as described in the
[documentation of MPI.jl](https://juliaparallel.org/MPI.jl/v0.20/usage/#Julia-wrapper-for-mpiexec)
and make it available as `mpirun`, e.g., via a symlink of the form
```bash
@@ -161,22 +161,46 @@ To start Trixi.jl in parallel with MPI, there are three options:
### [Performance](@id parallel_performance)
For information on how to evaluate the parallel performance of Trixi.jl, please
-have a look at the [Performance metrics of the `AnalysisCallback`](@ref)
+have a look at the [Performance metrics of the `AnalysisCallback`](@ref performance-metrics)
section, specifically at the descriptions of the performance index (PID).
### Using error-based step size control with MPI
-If you use error-based step size control (see also the section on [error-based adaptive step sizes](@ref adaptive_step_sizes))
-together with MPI you need to pass `internalnorm=ode_norm` and you should pass
-`unstable_check=ode_unstable_check` to OrdinaryDiffEq's [`solve`](https://docs.sciml.ai/DiffEqDocs/latest/basics/common_solver_opts/),
+If you use error-based step size control (see also the section on
+[error-based adaptive step sizes](@ref adaptive_step_sizes)) together with MPI you need to pass
+`internalnorm=ode_norm` and you should pass `unstable_check=ode_unstable_check` to
+OrdinaryDiffEq's [`solve`](https://docs.sciml.ai/DiffEqDocs/latest/basics/common_solver_opts/),
which are both included in [`ode_default_options`](@ref).
### Using parallel input and output
-Trixi.jl allows parallel I/O using MPI by leveraging parallel HDF5.jl. To enable this, you first need
-to use a system-provided MPI library, see also [here](@ref parallel_system_MPI) and you need to tell
-[HDF5.jl](https://github.com/JuliaIO/HDF5.jl) to use this library.
-To do so, set the environment variable `JULIA_HDF5_PATH` to the local path
-that contains the `libhdf5.so` shared object file and build HDF5.jl by executing `using Pkg; Pkg.build("HDF5")`.
-For more information see also the [documentation of HDF5.jl](https://juliaio.github.io/HDF5.jl/stable/mpi/).
-
-If you do not perform these steps to use parallel HDF5 or if the HDF5 is not MPI-enabled, Trixi.jl will fall back on a less efficient I/O mechanism. In that case, all disk I/O is performed only on rank zero and data is distributed to/gathered from the other ranks using regular MPI communication.
+Trixi.jl allows parallel I/O using MPI by leveraging parallel HDF5.jl. On most systems, this is
+enabled by default. Additionally, you can also use a local installation of the HDF5 library
+(with MPI support). For this, you first need to use a system-provided MPI library, see also
+[here](@ref parallel_system_MPI) and you need to tell [HDF5.jl](https://github.com/JuliaIO/HDF5.jl)
+to use this library. To do so with HDF5.jl v0.17 and newer, set the preferences `libhdf5` and
+`libhdf5_hl` to the local paths of the libraries `libhdf5` and `libhdf5_hl`, which can be done by
+```julia
+julia> using Preferences, UUIDs
+julia> set_preferences!(
+ UUID("f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"), # UUID of HDF5.jl
+ "libhdf5" => "/path/to/your/libhdf5.so",
+ "libhdf5_hl" => "/path/to/your/libhdf5_hl.so", force = true)
+```
+Alternatively, with HDF5.jl v0.17.1 or higher you can use
+```julia
+julia> using HDF5
+julia> HDF5.API.set_libraries!("/path/to/your/libhdf5.so", "/path/to/your/libhdf5_hl.so")
+```
+For more information see also the
+[documentation of HDF5.jl](https://juliaio.github.io/HDF5.jl/stable/mpi/). In total, you should
+have a file called LocalPreferences.toml in the project directory that contains a section
+`[MPIPreferences]`, a section `[HDF5]` with entries `libhdf5` and `libhdf5_hl`, a section `[P4est]`
+with the entry `libp4est` as well as a section `[T8code]` with the entries `libt8`, `libp4est`
+and `libsc`.
+If you use HDF5.jl v0.16 or older, instead of setting the preferences for HDF5.jl, you need to set
+the environment variable `JULIA_HDF5_PATH` to the path, where the HDF5 binaries are located and
+then call `]build HDF5` from Julia.
+
+If HDF5 is not MPI-enabled, Trixi.jl will fall back on a less efficient I/O mechanism. In that
+case, all disk I/O is performed only on rank zero and data is distributed to/gathered from the
+other ranks using regular MPI communication.
diff --git a/docs/src/performance.md b/docs/src/performance.md
index 428672ec75f..bbe3a3390b7 100644
--- a/docs/src/performance.md
+++ b/docs/src/performance.md
@@ -170,7 +170,7 @@ As a rule of thumb:
- Consider using `@nospecialize` for methods like custom implementations of `Base.show`.
-## Performance metrics of the `AnalysisCallback`
+## [Performance metrics of the `AnalysisCallback`](@id performance-metrics)
The [`AnalysisCallback`](@ref) computes two performance indicators that you can use to
evaluate the serial and parallel performance of Trixi.jl. They represent
measured run times that are normalized by the number of `rhs!` evaluations and
diff --git a/docs/src/restart.md b/docs/src/restart.md
index 767269ff27d..c7cbcd11852 100644
--- a/docs/src/restart.md
+++ b/docs/src/restart.md
@@ -77,8 +77,7 @@ and its time step number, e.g.:
```julia
integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
dt=dt, save_everystep=false, callback=callbacks);
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
```
Now we can compute the solution:
diff --git a/examples/p4est_2d_dgsem/elixir_advection_restart.jl b/examples/p4est_2d_dgsem/elixir_advection_restart.jl
index 79a35199b83..52917616a6a 100644
--- a/examples/p4est_2d_dgsem/elixir_advection_restart.jl
+++ b/examples/p4est_2d_dgsem/elixir_advection_restart.jl
@@ -35,8 +35,7 @@ integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
save_everystep=false, callback=callbacks);
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl
index 8111df8251a..b0c6086ad63 100644
--- a/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl
+++ b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence.jl
@@ -170,7 +170,10 @@ end
initial_condition = initial_condition_navier_stokes_convergence_test
# BC types
-velocity_bc_top_bottom = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, t, equations)[2:3])
+velocity_bc_top_bottom = NoSlip() do x, t, equations
+ u = initial_condition_navier_stokes_convergence_test(x, t, equations)
+ return SVector(u[2], u[3])
+end
heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0)
boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom)
diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_convergence_nonperiodic.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence_nonperiodic.jl
new file mode 100644
index 00000000000..935f132ba4b
--- /dev/null
+++ b/examples/p4est_2d_dgsem/elixir_navierstokes_convergence_nonperiodic.jl
@@ -0,0 +1,215 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the ideal compressible Navier-Stokes equations
+
+prandtl_number() = 0.72
+mu() = 0.01
+
+equations = CompressibleEulerEquations2D(1.4)
+equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu=mu(), Prandtl=prandtl_number(),
+ gradient_variables=GradientVariablesPrimitive())
+
+# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux
+solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs,
+ volume_integral=VolumeIntegralWeakForm())
+
+coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y))
+coordinates_max = ( 1.0, 1.0) # maximum coordinates (max(x), max(y))
+
+trees_per_dimension = (4, 4)
+mesh = P4estMesh(trees_per_dimension,
+ polydeg=3, initial_refinement_level=2,
+ coordinates_min=coordinates_min, coordinates_max=coordinates_max,
+ periodicity=(false, false))
+
+# Note: the initial condition cannot be specialized to `CompressibleNavierStokesDiffusion2D`
+# since it is called by both the parabolic solver (which passes in `CompressibleNavierStokesDiffusion2D`)
+# and by the initial condition (which passes in `CompressibleEulerEquations2D`).
+# This convergence test setup was originally derived by Andrew Winters (@andrewwinters5000)
+function initial_condition_navier_stokes_convergence_test(x, t, equations)
+ # Amplitude and shift
+ A = 0.5
+ c = 2.0
+
+ # convenience values for trig. functions
+ pi_x = pi * x[1]
+ pi_y = pi * x[2]
+ pi_t = pi * t
+
+ rho = c + A * sin(pi_x) * cos(pi_y) * cos(pi_t)
+ v1 = sin(pi_x) * log(x[2] + 2.0) * (1.0 - exp(-A * (x[2] - 1.0)) ) * cos(pi_t)
+ v2 = v1
+ p = rho^2
+
+ return prim2cons(SVector(rho, v1, v2, p), equations)
+end
+
+@inline function source_terms_navier_stokes_convergence_test(u, x, t, equations)
+ y = x[2]
+
+ # TODO: parabolic
+ # we currently need to hardcode these parameters until we fix the "combined equation" issue
+ # see also https://github.com/trixi-framework/Trixi.jl/pull/1160
+ inv_gamma_minus_one = inv(equations.gamma - 1)
+ Pr = prandtl_number()
+ mu_ = mu()
+
+ # Same settings as in `initial_condition`
+ # Amplitude and shift
+ A = 0.5
+ c = 2.0
+
+ # convenience values for trig. functions
+ pi_x = pi * x[1]
+ pi_y = pi * x[2]
+ pi_t = pi * t
+
+ # compute the manufactured solution and all necessary derivatives
+ rho = c + A * sin(pi_x) * cos(pi_y) * cos(pi_t)
+ rho_t = -pi * A * sin(pi_x) * cos(pi_y) * sin(pi_t)
+ rho_x = pi * A * cos(pi_x) * cos(pi_y) * cos(pi_t)
+ rho_y = -pi * A * sin(pi_x) * sin(pi_y) * cos(pi_t)
+ rho_xx = -pi * pi * A * sin(pi_x) * cos(pi_y) * cos(pi_t)
+ rho_yy = -pi * pi * A * sin(pi_x) * cos(pi_y) * cos(pi_t)
+
+ v1 = sin(pi_x) * log(y + 2.0) * (1.0 - exp(-A * (y - 1.0))) * cos(pi_t)
+ v1_t = -pi * sin(pi_x) * log(y + 2.0) * (1.0 - exp(-A * (y - 1.0))) * sin(pi_t)
+ v1_x = pi * cos(pi_x) * log(y + 2.0) * (1.0 - exp(-A * (y - 1.0))) * cos(pi_t)
+ v1_y = sin(pi_x) * (A * log(y + 2.0) * exp(-A * (y - 1.0)) + (1.0 - exp(-A * (y - 1.0))) / (y + 2.0)) * cos(pi_t)
+ v1_xx = -pi * pi * sin(pi_x) * log(y + 2.0) * (1.0 - exp(-A * (y - 1.0))) * cos(pi_t)
+ v1_xy = pi * cos(pi_x) * (A * log(y + 2.0) * exp(-A * (y - 1.0)) + (1.0 - exp(-A * (y - 1.0))) / (y + 2.0)) * cos(pi_t)
+ v1_yy = (sin(pi_x) * ( 2.0 * A * exp(-A * (y - 1.0)) / (y + 2.0)
+ - A * A * log(y + 2.0) * exp(-A * (y - 1.0))
+ - (1.0 - exp(-A * (y - 1.0))) / ((y + 2.0) * (y + 2.0))) * cos(pi_t))
+ v2 = v1
+ v2_t = v1_t
+ v2_x = v1_x
+ v2_y = v1_y
+ v2_xx = v1_xx
+ v2_xy = v1_xy
+ v2_yy = v1_yy
+
+ p = rho * rho
+ p_t = 2.0 * rho * rho_t
+ p_x = 2.0 * rho * rho_x
+ p_y = 2.0 * rho * rho_y
+ p_xx = 2.0 * rho * rho_xx + 2.0 * rho_x * rho_x
+ p_yy = 2.0 * rho * rho_yy + 2.0 * rho_y * rho_y
+
+ # Note this simplifies slightly because the ansatz assumes that v1 = v2
+ E = p * inv_gamma_minus_one + 0.5 * rho * (v1^2 + v2^2)
+ E_t = p_t * inv_gamma_minus_one + rho_t * v1^2 + 2.0 * rho * v1 * v1_t
+ E_x = p_x * inv_gamma_minus_one + rho_x * v1^2 + 2.0 * rho * v1 * v1_x
+ E_y = p_y * inv_gamma_minus_one + rho_y * v1^2 + 2.0 * rho * v1 * v1_y
+
+ # Some convenience constants
+ T_const = equations.gamma * inv_gamma_minus_one / Pr
+ inv_rho_cubed = 1.0 / (rho^3)
+
+ # compute the source terms
+ # density equation
+ du1 = rho_t + rho_x * v1 + rho * v1_x + rho_y * v2 + rho * v2_y
+
+ # x-momentum equation
+ du2 = ( rho_t * v1 + rho * v1_t + p_x + rho_x * v1^2
+ + 2.0 * rho * v1 * v1_x
+ + rho_y * v1 * v2
+ + rho * v1_y * v2
+ + rho * v1 * v2_y
+ # stress tensor from x-direction
+ - 4.0 / 3.0 * v1_xx * mu_
+ + 2.0 / 3.0 * v2_xy * mu_
+ - v1_yy * mu_
+ - v2_xy * mu_ )
+ # y-momentum equation
+ du3 = ( rho_t * v2 + rho * v2_t + p_y + rho_x * v1 * v2
+ + rho * v1_x * v2
+ + rho * v1 * v2_x
+ + rho_y * v2^2
+ + 2.0 * rho * v2 * v2_y
+ # stress tensor from y-direction
+ - v1_xy * mu_
+ - v2_xx * mu_
+ - 4.0 / 3.0 * v2_yy * mu_
+ + 2.0 / 3.0 * v1_xy * mu_ )
+ # total energy equation
+ du4 = ( E_t + v1_x * (E + p) + v1 * (E_x + p_x)
+ + v2_y * (E + p) + v2 * (E_y + p_y)
+ # stress tensor and temperature gradient terms from x-direction
+ - 4.0 / 3.0 * v1_xx * v1 * mu_
+ + 2.0 / 3.0 * v2_xy * v1 * mu_
+ - 4.0 / 3.0 * v1_x * v1_x * mu_
+ + 2.0 / 3.0 * v2_y * v1_x * mu_
+ - v1_xy * v2 * mu_
+ - v2_xx * v2 * mu_
+ - v1_y * v2_x * mu_
+ - v2_x * v2_x * mu_
+ - T_const * inv_rho_cubed * ( p_xx * rho * rho
+ - 2.0 * p_x * rho * rho_x
+ + 2.0 * p * rho_x * rho_x
+ - p * rho * rho_xx ) * mu_
+ # stress tensor and temperature gradient terms from y-direction
+ - v1_yy * v1 * mu_
+ - v2_xy * v1 * mu_
+ - v1_y * v1_y * mu_
+ - v2_x * v1_y * mu_
+ - 4.0 / 3.0 * v2_yy * v2 * mu_
+ + 2.0 / 3.0 * v1_xy * v2 * mu_
+ - 4.0 / 3.0 * v2_y * v2_y * mu_
+ + 2.0 / 3.0 * v1_x * v2_y * mu_
+ - T_const * inv_rho_cubed * ( p_yy * rho * rho
+ - 2.0 * p_y * rho * rho_y
+ + 2.0 * p * rho_y * rho_y
+ - p * rho * rho_yy ) * mu_ )
+
+ return SVector(du1, du2, du3, du4)
+end
+
+initial_condition = initial_condition_navier_stokes_convergence_test
+
+# BC types
+velocity_bc_top_bottom = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, t, equations)[2:3])
+heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0)
+boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom)
+
+boundary_condition_left_right = BoundaryConditionDirichlet(initial_condition_navier_stokes_convergence_test)
+
+# define inviscid boundary conditions
+boundary_conditions = Dict(:x_neg => boundary_condition_left_right,
+ :x_pos => boundary_condition_left_right,
+ :y_neg => boundary_condition_slip_wall,
+ :y_pos => boundary_condition_slip_wall)
+
+# define viscous boundary conditions
+boundary_conditions_parabolic = Dict(:x_neg => boundary_condition_left_right,
+ :x_pos => boundary_condition_left_right,
+ :y_neg => boundary_condition_top_bottom,
+ :y_pos => boundary_condition_top_bottom)
+
+semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver;
+ boundary_conditions=(boundary_conditions, boundary_conditions_parabolic),
+ source_terms=source_terms_navier_stokes_convergence_test)
+
+# ###############################################################################
+# # ODE solvers, callbacks etc.
+
+# Create ODE problem with time span `tspan`
+tspan = (0.0, 0.5)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+alive_callback = AliveCallback(alive_interval=10)
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval)
+callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback)
+
+###############################################################################
+# run the simulation
+
+time_int_tol = 1e-8
+sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol, dt = 1e-5,
+ ode_default_options()..., callback=callbacks)
+summary_callback() # print the timer summary
+
diff --git a/examples/p4est_3d_dgsem/elixir_advection_restart.jl b/examples/p4est_3d_dgsem/elixir_advection_restart.jl
index b27eaab62e2..26d10cf8826 100644
--- a/examples/p4est_3d_dgsem/elixir_advection_restart.jl
+++ b/examples/p4est_3d_dgsem/elixir_advection_restart.jl
@@ -32,8 +32,7 @@ integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
save_everystep=false, callback=callbacks);
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl
index c426fe95f5b..0109e58dfb3 100644
--- a/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl
+++ b/examples/p4est_3d_dgsem/elixir_navierstokes_convergence.jl
@@ -220,7 +220,10 @@ end
initial_condition = initial_condition_navier_stokes_convergence_test
# BC types
-velocity_bc_top_bottom = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, t, equations)[2:4])
+velocity_bc_top_bottom = NoSlip() do x, t, equations
+ u = initial_condition_navier_stokes_convergence_test(x, t, equations)
+ return SVector(u[2], u[3], u[4])
+end
heat_bc_top_bottom = Adiabatic((x, t, equations) -> 0.0)
boundary_condition_top_bottom = BoundaryConditionNavierStokesWall(velocity_bc_top_bottom, heat_bc_top_bottom)
diff --git a/examples/structured_2d_dgsem/elixir_advection_restart.jl b/examples/structured_2d_dgsem/elixir_advection_restart.jl
index 98c44fac71a..82eaa21333a 100644
--- a/examples/structured_2d_dgsem/elixir_advection_restart.jl
+++ b/examples/structured_2d_dgsem/elixir_advection_restart.jl
@@ -34,8 +34,7 @@ integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
save_everystep=false, callback=callbacks);
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
# run the simulation
diff --git a/examples/structured_2d_dgsem/elixir_eulerpolytropic_convergence.jl b/examples/structured_2d_dgsem/elixir_eulerpolytropic_convergence.jl
new file mode 100644
index 00000000000..4fc9281e7a0
--- /dev/null
+++ b/examples/structured_2d_dgsem/elixir_eulerpolytropic_convergence.jl
@@ -0,0 +1,63 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the polytropic Euler equations
+
+gamma = 1.4
+kappa = 0.5 # Scaling factor for the pressure.
+equations = PolytropicEulerEquations2D(gamma, kappa)
+
+initial_condition = initial_condition_convergence_test
+
+volume_flux = flux_winters_etal
+solver = DGSEM(polydeg = 3, surface_flux = flux_hll,
+ volume_integral = VolumeIntegralFluxDifferencing(volume_flux))
+
+coordinates_min = (0.0, 0.0)
+coordinates_max = (1.0, 1.0)
+
+cells_per_dimension = (4, 4)
+
+mesh = StructuredMesh(cells_per_dimension,
+ coordinates_min,
+ coordinates_max)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ source_terms = source_terms_convergence_test)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 0.1)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval,
+ extra_analysis_errors = (:l2_error_primitive,
+ :linf_error_primitive))
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+save_solution = SaveSolutionCallback(interval = 100,
+ save_initial_solution = true,
+ save_final_solution = true,
+ solution_variables = cons2prim)
+
+stepsize_callback = StepsizeCallback(cfl = 0.1)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback, alive_callback,
+ save_solution,
+ stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+summary_callback() # print the timer summary
diff --git a/examples/structured_2d_dgsem/elixir_eulerpolytropic_ec.jl b/examples/structured_2d_dgsem/elixir_eulerpolytropic_ec.jl
new file mode 100644
index 00000000000..de15f6b2bc3
--- /dev/null
+++ b/examples/structured_2d_dgsem/elixir_eulerpolytropic_ec.jl
@@ -0,0 +1,80 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the polytropic Euler equations
+
+gamma = 1.4
+kappa = 0.5 # Scaling factor for the pressure.
+equations = PolytropicEulerEquations2D(gamma, kappa)
+
+initial_condition = initial_condition_weak_blast_wave
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = flux_winters_etal
+solver = DGSEM(polydeg=4, surface_flux=flux_winters_etal,
+ volume_integral=VolumeIntegralFluxDifferencing(volume_flux))
+
+###############################################################################
+# Get the curved quad mesh from a mapping function
+
+# Mapping as described in https://arxiv.org/abs/2012.12040, but reduced to 2D
+function mapping(xi_, eta_)
+ # Transform input variables between -1 and 1 onto [0,3]
+ xi = 1.5 * xi_ + 1.5
+ eta = 1.5 * eta_ + 1.5
+
+ y = eta + 3/8 * (cos(1.5 * pi * (2 * xi - 3)/3) *
+ cos(0.5 * pi * (2 * eta - 3)/3))
+
+ x = xi + 3/8 * (cos(0.5 * pi * (2 * xi - 3)/3) *
+ cos(2 * pi * (2 * y - 3)/3))
+
+ return SVector(x, y)
+end
+
+cells_per_dimension = (16, 16)
+
+# Create curved mesh with 16 x 16 elements
+mesh = StructuredMesh(cells_per_dimension, mapping)
+
+###############################################################################
+# create the semi discretization object
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 2.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval=analysis_interval)
+
+save_solution = SaveSolutionCallback(interval=100,
+ save_initial_solution=true,
+ save_final_solution=true)
+
+stepsize_callback = StepsizeCallback(cfl=1.0)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback,
+ alive_callback,
+ save_solution,
+ stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
+ dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep=false, callback=callbacks);
+summary_callback() # print the timer summary
diff --git a/examples/structured_2d_dgsem/elixir_eulerpolytropic_isothermal_wave.jl b/examples/structured_2d_dgsem/elixir_eulerpolytropic_isothermal_wave.jl
new file mode 100644
index 00000000000..4ab90957579
--- /dev/null
+++ b/examples/structured_2d_dgsem/elixir_eulerpolytropic_isothermal_wave.jl
@@ -0,0 +1,83 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the polytropic Euler equations
+
+gamma = 1.0 # With gamma = 1 the system is isothermal.
+kappa = 1.0 # Scaling factor for the pressure.
+equations = PolytropicEulerEquations2D(gamma, kappa)
+
+# Linear pressure wave in the negative x-direction.
+function initial_condition_wave(x, t, equations::PolytropicEulerEquations2D)
+ rho = 1.0
+ v1 = 0.0
+ if x[1] > 0.0
+ rho = ((1.0 + 0.01 * sin(x[1] * 2 * pi)) / equations.kappa)^(1 / equations.gamma)
+ v1 = ((0.01 * sin((x[1] - 1 / 2) * 2 * pi)) / equations.kappa)
+ end
+ v2 = 0.0
+
+ return prim2cons(SVector(rho, v1, v2), equations)
+end
+initial_condition = initial_condition_wave
+
+volume_flux = flux_winters_etal
+solver = DGSEM(polydeg = 2, surface_flux = flux_hll,
+ volume_integral = VolumeIntegralFluxDifferencing(volume_flux))
+
+coordinates_min = (-2.0, -1.0)
+coordinates_max = (2.0, 1.0)
+
+cells_per_dimension = (64, 32)
+
+boundary_conditions = (x_neg = boundary_condition_periodic,
+ x_pos = boundary_condition_periodic,
+ y_neg = boundary_condition_periodic,
+ y_pos = boundary_condition_periodic)
+
+mesh = StructuredMesh(cells_per_dimension,
+ coordinates_min,
+ coordinates_max)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ boundary_conditions = boundary_conditions)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 200
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+save_solution = SaveSolutionCallback(interval = 50,
+ save_initial_solution = true,
+ save_final_solution = true,
+ solution_variables = cons2prim)
+
+stepsize_callback = StepsizeCallback(cfl = 1.7)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback, alive_callback,
+ save_solution,
+ stepsize_callback)
+
+stage_limiter! = PositivityPreservingLimiterZhangShu(thresholds = (1.0e-4, 1.0e-4),
+ variables = (Trixi.density, pressure))
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, CarpenterKennedy2N54(stage_limiter!, williamson_condition = false),
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+
+# Print the timer summary
+summary_callback()
diff --git a/examples/structured_2d_dgsem/elixir_eulerpolytropic_wave.jl b/examples/structured_2d_dgsem/elixir_eulerpolytropic_wave.jl
new file mode 100644
index 00000000000..fd332b20aef
--- /dev/null
+++ b/examples/structured_2d_dgsem/elixir_eulerpolytropic_wave.jl
@@ -0,0 +1,80 @@
+
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the polytropic Euler equations
+
+gamma = 2.0 # Adiabatic monatomic gas in 2d.
+kappa = 0.5 # Scaling factor for the pressure.
+equations = PolytropicEulerEquations2D(gamma, kappa)
+
+# Linear pressure wave in the negative x-direction.
+function initial_condition_wave(x, t, equations::PolytropicEulerEquations2D)
+ rho = 1.0
+ v1 = 0.0
+ if x[1] > 0.0
+ rho = ((1.0 + 0.01 * sin(x[1] * 2 * pi)) / equations.kappa)^(1 / equations.gamma)
+ v1 = ((0.01 * sin((x[1] - 1 / 2) * 2 * pi)) / equations.kappa)
+ end
+ v2 = 0.0
+
+ return prim2cons(SVector(rho, v1, v2), equations)
+end
+initial_condition = initial_condition_wave
+
+volume_flux = flux_winters_etal
+solver = DGSEM(polydeg = 2, surface_flux = flux_hll,
+ volume_integral = VolumeIntegralFluxDifferencing(volume_flux))
+
+coordinates_min = (-2.0, -1.0)
+coordinates_max = (2.0, 1.0)
+
+cells_per_dimension = (64, 32)
+
+boundary_conditions = (x_neg = boundary_condition_periodic,
+ x_pos = boundary_condition_periodic,
+ y_neg = boundary_condition_periodic,
+ y_pos = boundary_condition_periodic)
+
+mesh = StructuredMesh(cells_per_dimension,
+ coordinates_min,
+ coordinates_max)
+
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ boundary_conditions = boundary_conditions)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 200
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+save_solution = SaveSolutionCallback(interval = 50,
+ save_initial_solution = true,
+ save_final_solution = true,
+ solution_variables = cons2prim)
+
+stepsize_callback = StepsizeCallback(cfl = 1.7)
+
+callbacks = CallbackSet(summary_callback,
+ analysis_callback, alive_callback,
+ save_solution,
+ stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+
+# Print the timer summary
+summary_callback()
diff --git a/examples/structured_3d_dgsem/elixir_advection_restart.jl b/examples/structured_3d_dgsem/elixir_advection_restart.jl
index 39d28848c77..921c5310340 100644
--- a/examples/structured_3d_dgsem/elixir_advection_restart.jl
+++ b/examples/structured_3d_dgsem/elixir_advection_restart.jl
@@ -32,8 +32,7 @@ integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
save_everystep=false, callback=callbacks);
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
diff --git a/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl
new file mode 100644
index 00000000000..1daeab04a71
--- /dev/null
+++ b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_walls_amr.jl
@@ -0,0 +1,172 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the ideal compressible Navier-Stokes equations
+
+prandtl_number() = 0.72
+mu() = 0.01
+
+equations = CompressibleEulerEquations1D(1.4)
+equations_parabolic = CompressibleNavierStokesDiffusion1D(equations, mu=mu(), Prandtl=prandtl_number(),
+ gradient_variables=GradientVariablesEntropy())
+
+# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux
+solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs,
+ volume_integral=VolumeIntegralWeakForm())
+
+coordinates_min = -1.0
+coordinates_max = 1.0
+
+# Create a uniformly refined mesh with periodic boundaries
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level=3,
+ periodicity=false,
+ n_cells_max=30_000) # set maximum capacity of tree data structure
+
+# Note: the initial condition cannot be specialized to `CompressibleNavierStokesDiffusion1D`
+# since it is called by both the parabolic solver (which passes in `CompressibleNavierStokesDiffusion1D`)
+# and by the initial condition (which passes in `CompressibleEulerEquations1D`).
+# This convergence test setup was originally derived by Andrew Winters (@andrewwinters5000)
+function initial_condition_navier_stokes_convergence_test(x, t, equations)
+ # Amplitude and shift
+ A = 0.5
+ c = 2.0
+
+ # convenience values for trig. functions
+ pi_x = pi * x[1]
+ pi_t = pi * t
+
+ rho = c + A * cos(pi_x) * cos(pi_t)
+ v1 = log(x[1] + 2.0) * (1.0 - exp(-A * (x[1] - 1.0)) ) * cos(pi_t)
+ p = rho^2
+
+ return prim2cons(SVector(rho, v1, p), equations)
+end
+
+@inline function source_terms_navier_stokes_convergence_test(u, x, t, equations)
+ x = x[1]
+
+ # TODO: parabolic
+ # we currently need to hardcode these parameters until we fix the "combined equation" issue
+ # see also https://github.com/trixi-framework/Trixi.jl/pull/1160
+ inv_gamma_minus_one = inv(equations.gamma - 1)
+ Pr = prandtl_number()
+ mu_ = mu()
+
+ # Same settings as in `initial_condition`
+ # Amplitude and shift
+ A = 0.5
+ c = 2.0
+
+ # convenience values for trig. functions
+ pi_x = pi * x
+ pi_t = pi * t
+
+ # compute the manufactured solution and all necessary derivatives
+ rho = c + A * cos(pi_x) * cos(pi_t)
+ rho_t = -pi * A * cos(pi_x) * sin(pi_t)
+ rho_x = -pi * A * sin(pi_x) * cos(pi_t)
+ rho_xx = -pi * pi * A * cos(pi_x) * cos(pi_t)
+
+ v1 = log(x + 2.0) * (1.0 - exp(-A * (x - 1.0))) * cos(pi_t)
+ v1_t = -pi * log(x + 2.0) * (1.0 - exp(-A * (x - 1.0))) * sin(pi_t)
+ v1_x = (A * log(x + 2.0) * exp(-A * (x - 1.0)) + (1.0 - exp(-A * (x - 1.0))) / (x + 2.0)) * cos(pi_t)
+ v1_xx = (( 2.0 * A * exp(-A * (x - 1.0)) / (x + 2.0)
+ - A * A * log(x + 2.0) * exp(-A * (x - 1.0))
+ - (1.0 - exp(-A * (x - 1.0))) / ((x + 2.0) * (x + 2.0))) * cos(pi_t))
+
+ p = rho * rho
+ p_t = 2.0 * rho * rho_t
+ p_x = 2.0 * rho * rho_x
+ p_xx = 2.0 * rho * rho_xx + 2.0 * rho_x * rho_x
+
+ # Note this simplifies slightly because the ansatz assumes that v1 = v2
+ E = p * inv_gamma_minus_one + 0.5 * rho * v1^2
+ E_t = p_t * inv_gamma_minus_one + 0.5 * rho_t * v1^2 + rho * v1 * v1_t
+ E_x = p_x * inv_gamma_minus_one + 0.5 * rho_x * v1^2 + rho * v1 * v1_x
+
+ # Some convenience constants
+ T_const = equations.gamma * inv_gamma_minus_one / Pr
+ inv_rho_cubed = 1.0 / (rho^3)
+
+ # compute the source terms
+ # density equation
+ du1 = rho_t + rho_x * v1 + rho * v1_x
+
+ # y-momentum equation
+ du2 = ( rho_t * v1 + rho * v1_t
+ + p_x + rho_x * v1^2 + 2.0 * rho * v1 * v1_x
+ # stress tensor from y-direction
+ - v1_xx * mu_)
+
+ # total energy equation
+ du3 = ( E_t + v1_x * (E + p) + v1 * (E_x + p_x)
+ # stress tensor and temperature gradient terms from x-direction
+ - v1_xx * v1 * mu_
+ - v1_x * v1_x * mu_
+ - T_const * inv_rho_cubed * ( p_xx * rho * rho
+ - 2.0 * p_x * rho * rho_x
+ + 2.0 * p * rho_x * rho_x
+ - p * rho * rho_xx ) * mu_ )
+
+ return SVector(du1, du2, du3)
+end
+
+initial_condition = initial_condition_navier_stokes_convergence_test
+
+# BC types
+velocity_bc_left_right = NoSlip((x, t, equations) -> initial_condition_navier_stokes_convergence_test(x, t, equations)[2])
+
+heat_bc_left = Isothermal((x, t, equations) ->
+ Trixi.temperature(initial_condition_navier_stokes_convergence_test(x, t, equations),
+ equations_parabolic))
+heat_bc_right = Adiabatic((x, t, equations) -> 0.0)
+
+boundary_condition_left = BoundaryConditionNavierStokesWall(velocity_bc_left_right, heat_bc_left)
+boundary_condition_right = BoundaryConditionNavierStokesWall(velocity_bc_left_right, heat_bc_right)
+
+# define inviscid boundary conditions
+boundary_conditions = (; x_neg = boundary_condition_slip_wall,
+ x_pos = boundary_condition_slip_wall)
+
+# define viscous boundary conditions
+boundary_conditions_parabolic = (; x_neg = boundary_condition_left,
+ x_pos = boundary_condition_right)
+
+semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver;
+ boundary_conditions=(boundary_conditions, boundary_conditions_parabolic),
+ source_terms=source_terms_navier_stokes_convergence_test)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+# Create ODE problem with time span `tspan`
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+alive_callback = AliveCallback(alive_interval=10)
+analysis_interval = 100
+analysis_callback = AnalysisCallback(semi, interval=analysis_interval)
+
+amr_controller = ControllerThreeLevel(semi,
+ IndicatorLöhner(semi, variable=Trixi.density),
+ base_level=3,
+ med_level=4, med_threshold=0.005,
+ max_level=5, max_threshold=0.01)
+
+amr_callback = AMRCallback(semi, amr_controller,
+ interval=5,
+ adapt_initial_condition=true)
+
+# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, amr_callback)
+
+###############################################################################
+# run the simulation
+
+time_int_tol = 1e-8
+sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol, dt = 1e-5,
+ ode_default_options()..., callback=callbacks)
+summary_callback() # print the timer summary
\ No newline at end of file
diff --git a/examples/tree_1d_dgsem/elixir_shallow_water_quasi_1d_source_terms.jl b/examples/tree_1d_dgsem/elixir_shallow_water_quasi_1d_source_terms.jl
new file mode 100644
index 00000000000..72747c669e2
--- /dev/null
+++ b/examples/tree_1d_dgsem/elixir_shallow_water_quasi_1d_source_terms.jl
@@ -0,0 +1,60 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# Semidiscretization of the quasi 1d shallow water equations
+# See Chan et al. https://doi.org/10.48550/arXiv.2307.12089 for details
+
+equations = ShallowWaterEquationsQuasi1D(gravity_constant = 9.81)
+
+initial_condition = initial_condition_convergence_test
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_chan_etal, flux_nonconservative_chan_etal)
+surface_flux = (FluxPlusDissipation(flux_chan_etal, DissipationLocalLaxFriedrichs()),
+ flux_nonconservative_chan_etal)
+solver = DGSEM(polydeg = 3, surface_flux = surface_flux,
+ volume_integral = VolumeIntegralFluxDifferencing(volume_flux))
+
+###############################################################################
+# Get the TreeMesh and setup a periodic mesh
+
+coordinates_min = 0.0
+coordinates_max = sqrt(2.0)
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level = 3,
+ n_cells_max = 10_000,
+ periodicity = true)
+
+# create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver,
+ source_terms = source_terms_convergence_test)
+
+###############################################################################
+# ODE solvers, callbacks etc.
+
+tspan = (0.0, 1.0)
+ode = semidiscretize(semi, tspan)
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 500
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval)
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+save_solution = SaveSolutionCallback(interval = 200,
+ save_initial_solution = true,
+ save_final_solution = true)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution)
+
+###############################################################################
+# run the simulation
+
+# use a Runge-Kutta method with automatic (error based) time step size control
+sol = solve(ode, RDPK3SpFSAL49(); abstol = 1.0e-8, reltol = 1.0e-8,
+ ode_default_options()..., callback = callbacks);
+summary_callback() # print the timer summary
diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_quasi_1d_well_balanced.jl b/examples/tree_1d_dgsem/elixir_shallowwater_quasi_1d_well_balanced.jl
new file mode 100644
index 00000000000..d9f1a52b500
--- /dev/null
+++ b/examples/tree_1d_dgsem/elixir_shallowwater_quasi_1d_well_balanced.jl
@@ -0,0 +1,84 @@
+using OrdinaryDiffEq
+using Trixi
+
+###############################################################################
+# semidiscretization of the shallow water equations with a discontinuous
+# bottom topography function and channel width function
+
+equations = ShallowWaterEquationsQuasi1D(gravity_constant = 9.81, H0 = 2.0)
+
+# Setup a truly discontinuous bottom topography function and channel width for
+# this academic testcase of well-balancedness. The errors from the analysis
+# callback are not important but the error for this lake-at-rest test case
+# `∑|H0-(h+b)|` should be around machine roundoff.
+# Works as intended for TreeMesh1D with `initial_refinement_level=3`. If the mesh
+# refinement level is changed the initial condition below may need changed as well to
+# ensure that the discontinuities lie on an element interface.
+function initial_condition_discontinuous_well_balancedness(x, t,
+ equations::ShallowWaterEquationsQuasi1D)
+ H = equations.H0
+ v = 0.0
+
+ # for a periodic domain, this choice of `b` and `a` mimic
+ # discontinuity across the periodic boundary.
+ b = 0.5 * (x[1] + 1)
+ a = 2 + x[1]
+
+ return prim2cons(SVector(H, v, b, a), equations)
+end
+
+initial_condition = initial_condition_discontinuous_well_balancedness
+
+###############################################################################
+# Get the DG approximation space
+
+volume_flux = (flux_chan_etal, flux_nonconservative_chan_etal)
+surface_flux = volume_flux
+solver = DGSEM(polydeg = 4, surface_flux = surface_flux,
+ volume_integral = VolumeIntegralFluxDifferencing(volume_flux))
+
+###############################################################################
+# Get the TreeMesh and setup a periodic mesh
+
+coordinates_min = -1.0
+coordinates_max = 1.0
+mesh = TreeMesh(coordinates_min, coordinates_max,
+ initial_refinement_level = 3,
+ n_cells_max = 10_000)
+
+# Create the semi discretization object
+semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
+
+###############################################################################
+# ODE solver
+
+tspan = (0.0, 100.0)
+ode = semidiscretize(semi, tspan)
+
+###############################################################################
+# Callbacks
+
+summary_callback = SummaryCallback()
+
+analysis_interval = 1000
+analysis_callback = AnalysisCallback(semi, interval = analysis_interval,
+ extra_analysis_integrals = (lake_at_rest_error,))
+
+alive_callback = AliveCallback(analysis_interval = analysis_interval)
+
+save_solution = SaveSolutionCallback(interval = 1000,
+ save_initial_solution = true,
+ save_final_solution = true)
+
+stepsize_callback = StepsizeCallback(cfl = 3.0)
+
+callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution,
+ stepsize_callback)
+
+###############################################################################
+# run the simulation
+
+sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false),
+ dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
+ save_everystep = false, callback = callbacks);
+summary_callback() # print the timer summary
diff --git a/examples/tree_2d_dgsem/elixir_advection_extended.jl b/examples/tree_2d_dgsem/elixir_advection_extended.jl
index 8c837957ffd..278dc85386d 100644
--- a/examples/tree_2d_dgsem/elixir_advection_extended.jl
+++ b/examples/tree_2d_dgsem/elixir_advection_extended.jl
@@ -54,7 +54,7 @@ analysis_callback = AnalysisCallback(semi, interval=analysis_interval,
alive_callback = AliveCallback(analysis_interval=analysis_interval)
# The SaveRestartCallback allows to save a file from which a Trixi.jl simulation can be restarted
-save_restart = SaveRestartCallback(interval=100,
+save_restart = SaveRestartCallback(interval=40,
save_final_restart=true)
# The SaveSolutionCallback allows to save the solution to a file in regular intervals
@@ -77,9 +77,10 @@ callbacks = CallbackSet(summary_callback,
# run the simulation
# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks
-sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
+alg = CarpenterKennedy2N54(williamson_condition=false)
+sol = solve(ode, alg,
dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
- save_everystep=false, callback=callbacks);
+ save_everystep=false, callback=callbacks; ode_default_options()...);
# Print the timer summary
summary_callback()
diff --git a/examples/tree_2d_dgsem/elixir_advection_restart.jl b/examples/tree_2d_dgsem/elixir_advection_restart.jl
index 72efb7d0c84..b63a8d1f7bc 100644
--- a/examples/tree_2d_dgsem/elixir_advection_restart.jl
+++ b/examples/tree_2d_dgsem/elixir_advection_restart.jl
@@ -3,9 +3,10 @@ using OrdinaryDiffEq
using Trixi
###############################################################################
-# create a restart file
-
-trixi_include(@__MODULE__, joinpath(@__DIR__, "elixir_advection_extended.jl"))
+# Define time integration algorithm
+alg = CarpenterKennedy2N54(williamson_condition=false)
+# Create a restart file
+trixi_include(@__MODULE__, joinpath(@__DIR__, "elixir_advection_extended.jl"), alg = alg, tspan = (0.0, 10.0))
###############################################################################
@@ -14,26 +15,29 @@ trixi_include(@__MODULE__, joinpath(@__DIR__, "elixir_advection_extended.jl"))
# Note: If you get a restart file from somewhere else, you need to provide
# appropriate setups in the elixir loading a restart file
-restart_filename = joinpath("out", "restart_000018.h5")
+restart_filename = joinpath("out", "restart_000040.h5")
mesh = load_mesh(restart_filename)
semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver)
-tspan = (load_time(restart_filename), 2.0)
+tspan = (load_time(restart_filename), 10.0)
dt = load_dt(restart_filename)
ode = semidiscretize(semi, tspan, restart_filename);
# Do not overwrite the initial snapshot written by elixir_advection_extended.jl.
save_solution.condition.save_initial_solution = false
-alg = CarpenterKennedy2N54(williamson_condition=false)
integrator = init(ode, alg,
dt=dt, # solve needs some value here but it will be overwritten by the stepsize_callback
- save_everystep=false, callback=callbacks)
+ save_everystep=false, callback=callbacks; ode_default_options()...)
+
+# Load saved context for adaptive time integrator
+if integrator.opts.adaptive
+ load_adaptive_time_integrator!(integrator, restart_filename)
+end
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
# run the simulation
diff --git a/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl b/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl
index 6b69e4db563..c696e2de416 100644
--- a/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl
+++ b/examples/tree_2d_dgsem/elixir_euler_shockcapturing_subcell.jl
@@ -84,7 +84,7 @@ callbacks = CallbackSet(summary_callback,
###############################################################################
# run the simulation
-stage_callbacks = (SubcellLimiterIDPCorrection(),)
+stage_callbacks = (SubcellLimiterIDPCorrection(), BoundsCheckCallback(save_errors=false))
sol = Trixi.solve(ode, Trixi.SimpleSSPRK33(stage_callbacks=stage_callbacks);
dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl
index a67eaeb5b2b..c5a7a5932e6 100644
--- a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl
+++ b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl
@@ -132,7 +132,9 @@ callbacks = CallbackSet(summary_callback,
###############################################################################
# run the simulation
-stage_callbacks = (SubcellLimiterIDPCorrection(),)
+output_directory = "out"
+stage_callbacks = (SubcellLimiterIDPCorrection(),
+ BoundsCheckCallback(save_errors=true, interval=100, output_directory=output_directory))
sol = Trixi.solve(ode, Trixi.SimpleSSPRK33(stage_callbacks=stage_callbacks);
dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback
diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl b/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl
index 81e48737e79..3314343ccca 100644
--- a/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl
+++ b/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl
@@ -66,7 +66,7 @@ summary_callback = SummaryCallback()
alive_callback = AliveCallback(alive_interval=100)
analysis_interval = 100
analysis_callback = AnalysisCallback(semi, interval=analysis_interval)
-callbacks = CallbackSet(summary_callback, alive_callback)
+callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback)
###############################################################################
# run the simulation
diff --git a/examples/tree_3d_dgsem/elixir_advection_restart.jl b/examples/tree_3d_dgsem/elixir_advection_restart.jl
index 3061f165874..b7835ed061f 100644
--- a/examples/tree_3d_dgsem/elixir_advection_restart.jl
+++ b/examples/tree_3d_dgsem/elixir_advection_restart.jl
@@ -31,8 +31,7 @@ integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
save_everystep=false, callback=callbacks);
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
diff --git a/examples/unstructured_2d_dgsem/elixir_euler_restart.jl b/examples/unstructured_2d_dgsem/elixir_euler_restart.jl
index b85cc2c6d70..6653f8662d9 100644
--- a/examples/unstructured_2d_dgsem/elixir_euler_restart.jl
+++ b/examples/unstructured_2d_dgsem/elixir_euler_restart.jl
@@ -33,8 +33,7 @@ integrator = init(ode, CarpenterKennedy2N54(williamson_condition=false),
save_everystep=false, callback=callbacks);
# Get the last time index and work with that.
-integrator.iter = load_timestep(restart_filename)
-integrator.stats.naccept = integrator.iter
+load_timestep!(integrator, restart_filename)
###############################################################################
diff --git a/src/Trixi.jl b/src/Trixi.jl
index dcf33c1ca03..619dbef5cd5 100644
--- a/src/Trixi.jl
+++ b/src/Trixi.jl
@@ -149,7 +149,9 @@ export AcousticPerturbationEquations2D,
LatticeBoltzmannEquations2D, LatticeBoltzmannEquations3D,
ShallowWaterEquations1D, ShallowWaterEquations2D,
ShallowWaterTwoLayerEquations1D, ShallowWaterTwoLayerEquations2D,
- LinearizedEulerEquations2D
+ ShallowWaterEquationsQuasi1D,
+ LinearizedEulerEquations2D,
+ PolytropicEulerEquations2D
export LaplaceDiffusion1D, LaplaceDiffusion2D,
CompressibleNavierStokesDiffusion1D, CompressibleNavierStokesDiffusion2D,
@@ -164,6 +166,7 @@ export flux, flux_central, flux_lax_friedrichs, flux_hll, flux_hllc, flux_hlle,
flux_kennedy_gruber, flux_shima_etal, flux_ec,
flux_fjordholm_etal, flux_nonconservative_fjordholm_etal, flux_es_fjordholm_etal,
flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal,
+ flux_chan_etal, flux_nonconservative_chan_etal, flux_winters_etal,
hydrostatic_reconstruction_audusse_etal, flux_nonconservative_audusse_etal,
# TODO: TrixiShallowWater: move anything with "chen_noelle" to new file
hydrostatic_reconstruction_chen_noelle, flux_nonconservative_chen_noelle,
@@ -230,7 +233,7 @@ export DG,
SurfaceIntegralUpwind,
MortarL2
-export VolumeIntegralSubcellLimiting,
+export VolumeIntegralSubcellLimiting, BoundsCheckCallback,
SubcellLimiterIDP, SubcellLimiterIDPCorrection
export nelements, nnodes, nvariables,
@@ -254,7 +257,8 @@ export SummaryCallback, SteadyStateCallback, AnalysisCallback, AliveCallback,
GlmSpeedCallback, LBMCollisionCallback, EulerAcousticsCouplingCallback,
TrivialCallback, AnalysisCallbackCoupled
-export load_mesh, load_time, load_timestep, load_dt
+export load_mesh, load_time, load_timestep, load_timestep!, load_dt,
+ load_adaptive_time_integrator!
export ControllerThreeLevel, ControllerThreeLevelCombined,
IndicatorLöhner, IndicatorLoehner, IndicatorMax,
diff --git a/src/auxiliary/math.jl b/src/auxiliary/math.jl
index 4ecf7dd3fcc..38ea0bda8c8 100644
--- a/src/auxiliary/math.jl
+++ b/src/auxiliary/math.jl
@@ -211,4 +211,71 @@ Return `x` if `x` is negative, else zero. In other words, return
@inline function negative_part(x)
return min(x, zero(x))
end
+
+"""
+ stolarsky_mean(x, y, gamma)
+
+Compute an instance of a weighted Stolarsky mean of the form
+
+ stolarsky_mean(x, y, gamma) = (gamma - 1)/gamma * (y^gamma - x^gamma) / (y^(gamma-1) - x^(gamma-1))
+
+where `gamma > 1`.
+
+Problem: The formula above has a removable singularity at `x == y`. Thus,
+some care must be taken to implement it correctly without problems or loss
+of accuracy when `x ≈ y`. Here, we use the approach proposed by
+Winters et al. (2020).
+Set f = (y - x) / (y + x) and g = gamma (for compact notation).
+Then, we use the expansions
+
+ ((1+f)^g - (1-f)^g) / g = 2*f + (g-1)(g-2)/3 * f^3 + (g-1)(g-2)(g-3)(g-4)/60 * f^5 + O(f^7)
+
+and
+
+ ((1+f)^(g-1) - (1-f)^(g-1)) / (g-1) = 2*f + (g-2)(g-3)/3 * f^3 + (g-2)(g-3)(g-4)(g-5)/60 * f^5 + O(f^7)
+
+Inserting the first few terms of these expansions and performing polynomial long division
+we find that
+
+ stolarsky_mean(x, y, gamma) ≈ (y + x) / 2 * (1 + (g-2)/3 * f^2 - (g+1)(g-2)(g-3)/45 * f^4 + (g+1)(g-2)(g-3)(2g(g-2)-9)/945 * f^6)
+
+Since divisions are usually more expensive on modern hardware than
+multiplications (Agner Fog), we try to avoid computing two divisions. Thus,
+we use
+
+ f^2 = (y - x)^2 / (x + y)^2
+ = (x * (x - 2 * y) + y * y) / (x * (x + 2 * y) + y * y)
+
+Given ε = 1.0e-4, we use the following algorithm.
+
+ if f^2 < ε
+ # use the expansion above
+ else
+ # use the direct formula (gamma - 1)/gamma * (y^gamma - x^gamma) / (y^(gamma-1) - x^(gamma-1))
+ end
+
+# References
+- Andrew R. Winters, Christof Czernik, Moritz B. Schily & Gregor J. Gassner (2020)
+ Entropy stable numerical approximations for the isothermal and polytropic
+ Euler equations
+ [DOI: 10.1007/s10543-019-00789-w](https://doi.org/10.1007/s10543-019-00789-w)
+- Agner Fog.
+ Lists of instruction latencies, throughputs and micro-operation breakdowns
+ for Intel, AMD, and VIA CPUs.
+ [https://www.agner.org/optimize/instruction_tables.pdf](https://www.agner.org/optimize/instruction_tables.pdf)
+"""
+@inline function stolarsky_mean(x, y, gamma)
+ epsilon_f2 = 1.0e-4
+ f2 = (x * (x - 2 * y) + y * y) / (x * (x + 2 * y) + y * y) # f2 = f^2
+ if f2 < epsilon_f2
+ # convenience coefficients
+ c1 = (1 / 3) * (gamma - 2)
+ c2 = -(1 / 15) * (gamma + 1) * (gamma - 3) * c1
+ c3 = -(1 / 21) * (2 * gamma * (gamma - 2) - 9) * c2
+ return 0.5 * (x + y) * @evalpoly(f2, 1, c1, c2, c3)
+ else
+ return (gamma - 1) / gamma * (y^gamma - x^gamma) /
+ (y^(gamma - 1) - x^(gamma - 1))
+ end
+end
end # @muladd
diff --git a/src/callbacks_stage/callbacks_stage.jl b/src/callbacks_stage/callbacks_stage.jl
index 976af327e6f..70d60de7914 100644
--- a/src/callbacks_stage/callbacks_stage.jl
+++ b/src/callbacks_stage/callbacks_stage.jl
@@ -7,6 +7,7 @@
include("positivity_zhang_shu.jl")
include("subcell_limiter_idp_correction.jl")
+include("subcell_bounds_check.jl")
# TODO: TrixiShallowWater: move specific limiter file
include("positivity_shallow_water.jl")
end # @muladd
diff --git a/src/callbacks_stage/subcell_bounds_check.jl b/src/callbacks_stage/subcell_bounds_check.jl
new file mode 100644
index 00000000000..c86f266147c
--- /dev/null
+++ b/src/callbacks_stage/subcell_bounds_check.jl
@@ -0,0 +1,130 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+"""
+ BoundsCheckCallback(; output_directory="out", save_errors=false, interval=1)
+
+Subcell limiting techniques with [`SubcellLimiterIDP`](@ref) are constructed to adhere certain
+local or global bounds. To make sure that these bounds are actually met, this callback calculates
+the maximum deviation from the bounds. The maximum deviation per applied bound is printed to
+the screen at the end of the simulation.
+For more insights, when setting `save_errors=true` the occurring errors are exported every
+`interval` time steps during the simulation. Then, the maximum deviations since the last
+export are saved in "`output_directory`/deviations.txt".
+The `BoundsCheckCallback` has to be applied as a stage callback for the SSPRK time integration scheme.
+
+!!! note
+ For `SubcellLimiterIDP`, the solution is corrected in the a posteriori correction stage
+ [`SubcellLimiterIDPCorrection`](@ref). So, to check the final solution, this bounds check
+ callback must be called after the correction stage.
+"""
+struct BoundsCheckCallback
+ output_directory::String
+ save_errors::Bool
+ interval::Int
+end
+
+function BoundsCheckCallback(; output_directory = "out", save_errors = false,
+ interval = 1)
+ BoundsCheckCallback(output_directory, save_errors, interval)
+end
+
+function (callback::BoundsCheckCallback)(u_ode, integrator, stage)
+ mesh, equations, solver, cache = mesh_equations_solver_cache(integrator.p)
+ (; t, iter, alg) = integrator
+ u = wrap_array(u_ode, mesh, equations, solver, cache)
+
+ save_errors = callback.save_errors && (callback.interval > 0) &&
+ (stage == length(alg.c)) &&
+ (iter % callback.interval == 0 || integrator.finalstep)
+ @trixi_timeit timer() "check_bounds" check_bounds(u, mesh, equations, solver, cache,
+ solver.volume_integral, t,
+ iter + 1,
+ callback.output_directory,
+ save_errors)
+end
+
+function check_bounds(u, mesh, equations, solver, cache,
+ volume_integral::AbstractVolumeIntegral, t, iter,
+ output_directory, save_errors)
+ return nothing
+end
+
+function check_bounds(u, mesh, equations, solver, cache,
+ volume_integral::VolumeIntegralSubcellLimiting, t, iter,
+ output_directory, save_errors)
+ check_bounds(u, mesh, equations, solver, cache, volume_integral.limiter, t, iter,
+ output_directory, save_errors)
+end
+
+function init_callback(callback::BoundsCheckCallback, semi)
+ init_callback(callback, semi, semi.solver.volume_integral)
+end
+
+init_callback(callback::BoundsCheckCallback, semi, volume_integral::AbstractVolumeIntegral) = nothing
+
+function init_callback(callback::BoundsCheckCallback, semi,
+ volume_integral::VolumeIntegralSubcellLimiting)
+ init_callback(callback, semi, volume_integral.limiter)
+end
+
+function init_callback(callback::BoundsCheckCallback, semi, limiter::SubcellLimiterIDP)
+ if !callback.save_errors || (callback.interval == 0)
+ return nothing
+ end
+
+ (; positivity) = limiter
+ (; output_directory) = callback
+ variables = varnames(cons2cons, semi.equations)
+
+ mkpath(output_directory)
+ open("$output_directory/deviations.txt", "a") do f
+ print(f, "# iter, simu_time")
+ if positivity
+ for v in limiter.positivity_variables_cons
+ print(f, ", " * string(variables[v]) * "_min")
+ end
+ end
+ println(f)
+ end
+
+ return nothing
+end
+
+function finalize_callback(callback::BoundsCheckCallback, semi)
+ finalize_callback(callback, semi, semi.solver.volume_integral)
+end
+
+finalize_callback(callback::BoundsCheckCallback, semi, volume_integral::AbstractVolumeIntegral) = nothing
+
+function finalize_callback(callback::BoundsCheckCallback, semi,
+ volume_integral::VolumeIntegralSubcellLimiting)
+ finalize_callback(callback, semi, volume_integral.limiter)
+end
+
+@inline function finalize_callback(callback::BoundsCheckCallback, semi,
+ limiter::SubcellLimiterIDP)
+ (; positivity) = limiter
+ (; idp_bounds_delta) = limiter.cache
+ variables = varnames(cons2cons, semi.equations)
+
+ println("─"^100)
+ println("Maximum deviation from bounds:")
+ println("─"^100)
+ if positivity
+ for v in limiter.positivity_variables_cons
+ println(string(variables[v]) * ":\n- positivity: ",
+ idp_bounds_delta[Symbol(string(v), "_min")][2])
+ end
+ end
+ println("─"^100 * "\n")
+
+ return nothing
+end
+
+include("subcell_bounds_check_2d.jl")
+end # @muladd
diff --git a/src/callbacks_stage/subcell_bounds_check_2d.jl b/src/callbacks_stage/subcell_bounds_check_2d.jl
new file mode 100644
index 00000000000..8159becb503
--- /dev/null
+++ b/src/callbacks_stage/subcell_bounds_check_2d.jl
@@ -0,0 +1,49 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+@inline function check_bounds(u, mesh::AbstractMesh{2}, equations, solver, cache,
+ limiter::SubcellLimiterIDP,
+ time, iter, output_directory, save_errors)
+ (; positivity) = solver.volume_integral.limiter
+ (; variable_bounds) = limiter.cache.subcell_limiter_coefficients
+ (; idp_bounds_delta) = limiter.cache
+
+ if positivity
+ for v in limiter.positivity_variables_cons
+ key = Symbol(string(v), "_min")
+ deviation = idp_bounds_delta[key]
+ for element in eachelement(solver, cache), j in eachnode(solver),
+ i in eachnode(solver)
+
+ var = u[v, i, j, element]
+ deviation[1] = max(deviation[1],
+ variable_bounds[key][i, j, element] - var)
+ end
+ deviation[2] = max(deviation[2], deviation[1])
+ end
+ end
+ if save_errors
+ # Print to output file
+ open("$output_directory/deviations.txt", "a") do f
+ print(f, iter, ", ", time)
+ if positivity
+ for v in limiter.positivity_variables_cons
+ key = Symbol(string(v), "_min")
+ print(f, ", ", idp_bounds_delta[key][1])
+ end
+ end
+ println(f)
+ end
+ # Reset first entries of idp_bounds_delta
+ for (key, _) in idp_bounds_delta
+ idp_bounds_delta[key][1] = zero(eltype(idp_bounds_delta[key][1]))
+ end
+ end
+
+ return nothing
+end
+end # @muladd
diff --git a/src/callbacks_step/alive.jl b/src/callbacks_step/alive.jl
index eeacd9681d8..9df7181521e 100644
--- a/src/callbacks_step/alive.jl
+++ b/src/callbacks_step/alive.jl
@@ -86,9 +86,15 @@ function (alive_callback::AliveCallback)(integrator)
println("─"^100)
println()
elseif mpi_isroot()
+ t = integrator.t
+ t_initial = first(integrator.sol.prob.tspan)
+ t_final = last(integrator.sol.prob.tspan)
+ sim_time_percentage = (t - t_initial) / (t_final - t_initial) * 100
runtime_absolute = 1.0e-9 * (time_ns() - alive_callback.start_time)
- @printf("#timesteps: %6d │ Δt: %.4e │ sim. time: %.4e │ run time: %.4e s\n",
- integrator.stats.naccept, integrator.dt, integrator.t, runtime_absolute)
+ println(rpad(@sprintf("#timesteps: %6d │ Δt: %.4e │ sim. time: %.4e (%5.3f%%)",
+ integrator.stats.naccept, integrator.dt, t,
+ sim_time_percentage), 71) *
+ @sprintf("│ run time: %.4e s", runtime_absolute))
end
# avoid re-evaluating possible FSAL stages
diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl
index 4d80e6e1139..ba840ff9675 100644
--- a/src/callbacks_step/amr.jl
+++ b/src/callbacks_step/amr.jl
@@ -192,6 +192,16 @@ end
amr_callback(u_ode, mesh_equations_solver_cache(semi)..., semi, t, iter; kwargs...)
end
+@inline function (amr_callback::AMRCallback)(u_ode::AbstractVector,
+ semi::SemidiscretizationHyperbolicParabolic,
+ t, iter;
+ kwargs...)
+ # Note that we don't `wrap_array` the vector `u_ode` to be able to `resize!`
+ # it when doing AMR while still dispatching on the `mesh` etc.
+ amr_callback(u_ode, mesh_equations_solver_cache(semi)..., semi.cache_parabolic,
+ semi, t, iter; kwargs...)
+end
+
# `passive_args` is currently used for Euler with self-gravity to adapt the gravity solver
# passively without querying its indicator, based on the assumption that both solvers use
# the same mesh. That's a hack and should be improved in the future once we have more examples
@@ -346,6 +356,154 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh,
return has_changed
end
+function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh,
+ equations, dg::DG,
+ cache, cache_parabolic,
+ semi::SemidiscretizationHyperbolicParabolic,
+ t, iter;
+ only_refine = false, only_coarsen = false)
+ @unpack controller, adaptor = amr_callback
+
+ u = wrap_array(u_ode, mesh, equations, dg, cache)
+ # Indicator kept based on hyperbolic variables
+ lambda = @trixi_timeit timer() "indicator" controller(u, mesh, equations, dg, cache,
+ t = t, iter = iter)
+
+ if mpi_isparallel()
+ error("MPI has not been verified yet for parabolic AMR")
+
+ # Collect lambda for all elements
+ lambda_global = Vector{eltype(lambda)}(undef, nelementsglobal(dg, cache))
+ # Use parent because n_elements_by_rank is an OffsetArray
+ recvbuf = MPI.VBuffer(lambda_global, parent(cache.mpi_cache.n_elements_by_rank))
+ MPI.Allgatherv!(lambda, recvbuf, mpi_comm())
+ lambda = lambda_global
+ end
+
+ leaf_cell_ids = leaf_cells(mesh.tree)
+ @boundscheck begin
+ @assert axes(lambda)==axes(leaf_cell_ids) ("Indicator (axes = $(axes(lambda))) and leaf cell (axes = $(axes(leaf_cell_ids))) arrays have different axes")
+ end
+
+ @unpack to_refine, to_coarsen = amr_callback.amr_cache
+ empty!(to_refine)
+ empty!(to_coarsen)
+ for element in 1:length(lambda)
+ controller_value = lambda[element]
+ if controller_value > 0
+ push!(to_refine, leaf_cell_ids[element])
+ elseif controller_value < 0
+ push!(to_coarsen, leaf_cell_ids[element])
+ end
+ end
+
+ @trixi_timeit timer() "refine" if !only_coarsen && !isempty(to_refine)
+ # refine mesh
+ refined_original_cells = @trixi_timeit timer() "mesh" refine!(mesh.tree,
+ to_refine)
+
+ # Find all indices of elements whose cell ids are in refined_original_cells
+ # Note: This assumes same indices for hyperbolic and parabolic part.
+ elements_to_refine = findall(in(refined_original_cells),
+ cache.elements.cell_ids)
+
+ # refine solver
+ @trixi_timeit timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg,
+ cache, cache_parabolic,
+ elements_to_refine)
+ else
+ # If there is nothing to refine, create empty array for later use
+ refined_original_cells = Int[]
+ end
+
+ @trixi_timeit timer() "coarsen" if !only_refine && !isempty(to_coarsen)
+ # Since the cells may have been shifted due to refinement, first we need to
+ # translate the old cell ids to the new cell ids
+ if !isempty(to_coarsen)
+ to_coarsen = original2refined(to_coarsen, refined_original_cells, mesh)
+ end
+
+ # Next, determine the parent cells from which the fine cells are to be
+ # removed, since these are needed for the coarsen! function. However, since
+ # we only want to coarsen if *all* child cells are marked for coarsening,
+ # we count the coarsening indicators for each parent cell and only coarsen
+ # if all children are marked as such (i.e., where the count is 2^ndims). At
+ # the same time, check if a cell is marked for coarsening even though it is
+ # *not* a leaf cell -> this can only happen if it was refined due to 2:1
+ # smoothing during the preceding refinement operation.
+ parents_to_coarsen = zeros(Int, length(mesh.tree))
+ for cell_id in to_coarsen
+ # If cell has no parent, it cannot be coarsened
+ if !has_parent(mesh.tree, cell_id)
+ continue
+ end
+
+ # If cell is not leaf (anymore), it cannot be coarsened
+ if !is_leaf(mesh.tree, cell_id)
+ continue
+ end
+
+ # Increase count for parent cell
+ parent_id = mesh.tree.parent_ids[cell_id]
+ parents_to_coarsen[parent_id] += 1
+ end
+
+ # Extract only those parent cells for which all children should be coarsened
+ to_coarsen = collect(1:length(parents_to_coarsen))[parents_to_coarsen .== 2^ndims(mesh)]
+
+ # Finally, coarsen mesh
+ coarsened_original_cells = @trixi_timeit timer() "mesh" coarsen!(mesh.tree,
+ to_coarsen)
+
+ # Convert coarsened parent cell ids to the list of child cell ids that have
+ # been removed, since this is the information that is expected by the solver
+ removed_child_cells = zeros(Int,
+ n_children_per_cell(mesh.tree) *
+ length(coarsened_original_cells))
+ for (index, coarse_cell_id) in enumerate(coarsened_original_cells)
+ for child in 1:n_children_per_cell(mesh.tree)
+ removed_child_cells[n_children_per_cell(mesh.tree) * (index - 1) + child] = coarse_cell_id +
+ child
+ end
+ end
+
+ # Find all indices of elements whose cell ids are in removed_child_cells
+ # Note: This assumes same indices for hyperbolic and parabolic part.
+ elements_to_remove = findall(in(removed_child_cells), cache.elements.cell_ids)
+
+ # coarsen solver
+ @trixi_timeit timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg,
+ cache, cache_parabolic,
+ elements_to_remove)
+ else
+ # If there is nothing to coarsen, create empty array for later use
+ coarsened_original_cells = Int[]
+ end
+
+ # Store whether there were any cells coarsened or refined
+ has_changed = !isempty(refined_original_cells) || !isempty(coarsened_original_cells)
+ if has_changed # TODO: Taal decide, where shall we set this?
+ # don't set it to has_changed since there can be changes from earlier calls
+ mesh.unsaved_changes = true
+ end
+
+ # Dynamically balance computational load by first repartitioning the mesh and then redistributing the cells/elements
+ if has_changed && mpi_isparallel() && amr_callback.dynamic_load_balancing
+ error("MPI has not been verified yet for parabolic AMR")
+
+ @trixi_timeit timer() "dynamic load balancing" begin
+ old_mpi_ranks_per_cell = copy(mesh.tree.mpi_ranks)
+
+ partition!(mesh)
+
+ rebalance_solver!(u_ode, mesh, equations, dg, cache, old_mpi_ranks_per_cell)
+ end
+ end
+
+ # Return true if there were any cells coarsened or refined, otherwise false
+ return has_changed
+end
+
# Copy controller values to quad user data storage, will be called below
function copy_to_quad_iter_volume(info, user_data)
info_pw = PointerWrapper(info)
diff --git a/src/callbacks_step/amr_dg1d.jl b/src/callbacks_step/amr_dg1d.jl
index e31a74730ea..e721ccc61cb 100644
--- a/src/callbacks_step/amr_dg1d.jl
+++ b/src/callbacks_step/amr_dg1d.jl
@@ -76,6 +76,44 @@ function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1},
return nothing
end
+function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1},
+ equations, dg::DGSEM, cache, cache_parabolic,
+ elements_to_refine)
+ # Call `refine!` for the hyperbolic part, which does the heavy lifting of
+ # actually transferring the solution to the refined cells
+ refine!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_refine)
+
+ # The remaining function only handles the necessary adaptation of the data structures
+ # for the parabolic part of the semidiscretization
+
+ # Get new list of leaf cells
+ leaf_cell_ids = local_leaf_cells(mesh.tree)
+
+ @unpack elements, viscous_container = cache_parabolic
+ resize!(elements, length(leaf_cell_ids))
+ init_elements!(elements, leaf_cell_ids, mesh, dg.basis)
+
+ # Resize parabolic helper variables
+ resize!(viscous_container, equations, dg, cache)
+
+ # re-initialize interfaces container
+ @unpack interfaces = cache_parabolic
+ resize!(interfaces, count_required_interfaces(mesh, leaf_cell_ids))
+ init_interfaces!(interfaces, elements, mesh)
+
+ # re-initialize boundaries container
+ @unpack boundaries = cache_parabolic
+ resize!(boundaries, count_required_boundaries(mesh, leaf_cell_ids))
+ init_boundaries!(boundaries, elements, mesh)
+
+ # Sanity check
+ if isperiodic(mesh.tree)
+ @assert ninterfaces(interfaces)==1 * nelements(dg, cache_parabolic) ("For 1D and periodic domains, the number of interfaces must be the same as the number of elements")
+ end
+
+ return nothing
+end
+
# TODO: Taal compare performance of different implementations
# Refine solution data u for an element, using L2 projection (interpolation)
function refine_element!(u::AbstractArray{<:Any, 3}, element_id,
@@ -201,6 +239,41 @@ function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1},
return nothing
end
+function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{1},
+ equations, dg::DGSEM, cache, cache_parabolic,
+ elements_to_remove)
+ # Call `coarsen!` for the hyperbolic part, which does the heavy lifting of
+ # actually transferring the solution to the coarsened cells
+ coarsen!(u_ode, adaptor, mesh, equations, dg, cache, elements_to_remove)
+
+ # Get new list of leaf cells
+ leaf_cell_ids = local_leaf_cells(mesh.tree)
+
+ @unpack elements, viscous_container = cache_parabolic
+ resize!(elements, length(leaf_cell_ids))
+ init_elements!(elements, leaf_cell_ids, mesh, dg.basis)
+
+ # Resize parabolic helper variables
+ resize!(viscous_container, equations, dg, cache)
+
+ # re-initialize interfaces container
+ @unpack interfaces = cache_parabolic
+ resize!(interfaces, count_required_interfaces(mesh, leaf_cell_ids))
+ init_interfaces!(interfaces, elements, mesh)
+
+ # re-initialize boundaries container
+ @unpack boundaries = cache_parabolic
+ resize!(boundaries, count_required_boundaries(mesh, leaf_cell_ids))
+ init_boundaries!(boundaries, elements, mesh)
+
+ # Sanity check
+ if isperiodic(mesh.tree)
+ @assert ninterfaces(interfaces)==1 * nelements(dg, cache_parabolic) ("For 1D and periodic domains, the number of interfaces must be the same as the number of elements")
+ end
+
+ return nothing
+end
+
# TODO: Taal compare performance of different implementations
# Coarsen solution data u for two elements, using L2 projection
function coarsen_elements!(u::AbstractArray{<:Any, 3}, element_id,
diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl
index fad42b11098..e5b4a01a885 100644
--- a/src/callbacks_step/analysis.jl
+++ b/src/callbacks_step/analysis.jl
@@ -232,6 +232,12 @@ function (analysis_callback::AnalysisCallback)(u_ode, du_ode, integrator, semi)
@unpack dt, t = integrator
iter = integrator.stats.naccept
+ # Compute the percentage of the simulation that is done
+ t = integrator.t
+ t_initial = first(integrator.sol.prob.tspan)
+ t_final = last(integrator.sol.prob.tspan)
+ sim_time_percentage = (t - t_initial) / (t_final - t_initial) * 100
+
# Record performance measurements and compute performance index (PID)
runtime_since_last_analysis = 1.0e-9 * (time_ns() -
analysis_callback.start_time_last_analysis)
@@ -291,13 +297,13 @@ function (analysis_callback::AnalysisCallback)(u_ode, du_ode, integrator, semi)
" " *
" └── GC time: " *
@sprintf("%10.8e s (%5.3f%%)", gc_time_absolute, gc_time_percentage))
- mpi_println(" sim. time: " * @sprintf("%10.8e", t) *
- " " *
+ mpi_println(rpad(" sim. time: " *
+ @sprintf("%10.8e (%5.3f%%)", t, sim_time_percentage), 46) *
" time/DOF/rhs!: " * @sprintf("%10.8e s", runtime_relative))
mpi_println(" " * " " *
" " *
" PID: " * @sprintf("%10.8e s", performance_index))
- mpi_println(" #DOF: " * @sprintf("% 14d", ndofs(semi)) *
+ mpi_println(" #DOFs per field:" * @sprintf("% 14d", ndofs(semi)) *
" " *
" alloc'd memory: " * @sprintf("%14.3f MiB", memory_use))
mpi_println(" #elements: " *
diff --git a/src/callbacks_step/save_restart.jl b/src/callbacks_step/save_restart.jl
index f567a5c7fda..0d174d85805 100644
--- a/src/callbacks_step/save_restart.jl
+++ b/src/callbacks_step/save_restart.jl
@@ -105,6 +105,11 @@ function (restart_callback::SaveRestartCallback)(integrator)
end
save_restart_file(u_ode, t, dt, iter, semi, restart_callback)
+ # If using an adaptive time stepping scheme, store controller values for restart
+ if integrator.opts.adaptive
+ save_adaptive_time_integrator(integrator, integrator.opts.controller,
+ restart_callback)
+ end
end
# avoid re-evaluating possible FSAL stages
@@ -141,6 +146,18 @@ function load_timestep(restart_file::AbstractString)
end
end
+"""
+ load_timestep!(integrator, restart_file::AbstractString)
+
+Load the time step number saved in a `restart_file` and assign it to both the time step
+number and and the number of accepted steps
+(`iter` and `stats.naccept` in OrdinaryDiffEq.jl, respectively) in `integrator`.
+"""
+function load_timestep!(integrator, restart_file::AbstractString)
+ integrator.iter = load_timestep(restart_file)
+ integrator.stats.naccept = integrator.iter
+end
+
"""
load_dt(restart_file::AbstractString)
@@ -156,5 +173,36 @@ function load_restart_file(semi::AbstractSemidiscretization, restart_file)
load_restart_file(mesh_equations_solver_cache(semi)..., restart_file)
end
+"""
+ load_adaptive_time_integrator!(integrator, restart_file::AbstractString)
+
+Load the context information for time integrators with error-based step size control
+saved in a `restart_file`.
+"""
+function load_adaptive_time_integrator!(integrator, restart_file::AbstractString)
+ controller = integrator.opts.controller
+ # Read context information for controller
+ h5open(restart_file, "r") do file
+ # Ensure that the necessary information was saved
+ if !("time_integrator_qold" in keys(attributes(file))) ||
+ !("time_integrator_dtpropose" in keys(attributes(file))) ||
+ (hasproperty(controller, :err) &&
+ !("time_integrator_controller_err" in keys(attributes(file))))
+ error("Missing data in restart file: check the consistency of adaptive time controller with initial setup!")
+ end
+ # Load data that is required both for PIController and PIDController
+ integrator.qold = read(attributes(file)["time_integrator_qold"])
+ integrator.dtpropose = read(attributes(file)["time_integrator_dtpropose"])
+ # Accept step to use dtpropose already in the first step
+ integrator.accept_step = true
+ # Reevaluate integrator.fsal_first on the first step
+ integrator.reeval_fsal = true
+ # Load additional parameters for PIDController
+ if hasproperty(controller, :err) # Distinguish PIDController from PIController
+ controller.err[:] = read(attributes(file)["time_integrator_controller_err"])
+ end
+ end
+end
+
include("save_restart_dg.jl")
end # @muladd
diff --git a/src/callbacks_step/save_restart_dg.jl b/src/callbacks_step/save_restart_dg.jl
index 8db6db2d2b8..cddeef77bb2 100644
--- a/src/callbacks_step/save_restart_dg.jl
+++ b/src/callbacks_step/save_restart_dg.jl
@@ -327,4 +327,28 @@ function load_restart_file_on_root(mesh::Union{ParallelTreeMesh, ParallelP4estMe
return u_ode
end
+
+# Store controller values for an adaptive time stepping scheme
+function save_adaptive_time_integrator(integrator,
+ controller, restart_callback)
+ # Save only on root
+ if mpi_isroot()
+ @unpack output_directory = restart_callback
+ timestep = integrator.stats.naccept
+
+ # Filename based on current time step
+ filename = joinpath(output_directory, @sprintf("restart_%06d.h5", timestep))
+
+ # Open file (preserve existing content)
+ h5open(filename, "r+") do file
+ # Add context information as attributes both for PIController and PIDController
+ attributes(file)["time_integrator_qold"] = integrator.qold
+ attributes(file)["time_integrator_dtpropose"] = integrator.dtpropose
+ # For PIDController is necessary to save additional parameters
+ if hasproperty(controller, :err) # Distinguish PIDController from PIController
+ attributes(file)["time_integrator_controller_err"] = controller.err
+ end
+ end
+ end
+end
end # @muladd
diff --git a/src/callbacks_step/save_solution.jl b/src/callbacks_step/save_solution.jl
index 14ea33368f8..31fe0e87c77 100644
--- a/src/callbacks_step/save_solution.jl
+++ b/src/callbacks_step/save_solution.jl
@@ -222,21 +222,28 @@ end
end
end
+ node_variables = Dict{Symbol, Any}()
+ @trixi_timeit timer() "get node variables" get_node_variables!(node_variables,
+ semi)
+
@trixi_timeit timer() "save solution" save_solution_file(u_ode, t, dt, iter, semi,
solution_callback,
element_variables,
+ node_variables,
system = system)
end
@inline function save_solution_file(u_ode, t, dt, iter,
semi::AbstractSemidiscretization, solution_callback,
- element_variables = Dict{Symbol, Any}();
+ element_variables = Dict{Symbol, Any}(),
+ node_variables = Dict{Symbol, Any}();
system = "")
mesh, equations, solver, cache = mesh_equations_solver_cache(semi)
u = wrap_array_native(u_ode, mesh, equations, solver, cache)
save_solution_file(u, t, dt, iter, mesh, equations, solver, cache,
solution_callback,
- element_variables; system = system)
+ element_variables,
+ node_variables; system = system)
end
# TODO: Taal refactor, move save_mesh_file?
diff --git a/src/callbacks_step/save_solution_dg.jl b/src/callbacks_step/save_solution_dg.jl
index 6d5004ff65f..7c015999035 100644
--- a/src/callbacks_step/save_solution_dg.jl
+++ b/src/callbacks_step/save_solution_dg.jl
@@ -10,7 +10,9 @@ function save_solution_file(u, time, dt, timestep,
UnstructuredMesh2D, SerialP4estMesh,
SerialT8codeMesh},
equations, dg::DG, cache,
- solution_callback, element_variables = Dict{Symbol, Any}();
+ solution_callback,
+ element_variables = Dict{Symbol, Any}(),
+ node_variables = Dict{Symbol, Any}();
system = "")
@unpack output_directory, solution_variables = solution_callback
@@ -73,6 +75,16 @@ function save_solution_file(u, time, dt, timestep,
var = file["element_variables_$v"]
attributes(var)["name"] = string(key)
end
+
+ # Store node variables
+ for (v, (key, node_variable)) in enumerate(node_variables)
+ # Add to file
+ file["node_variables_$v"] = node_variable
+
+ # Add variable name as attribute
+ var = file["node_variables_$v"]
+ attributes(var)["name"] = string(key)
+ end
end
return filename
@@ -81,7 +93,9 @@ end
function save_solution_file(u, time, dt, timestep,
mesh::Union{ParallelTreeMesh, ParallelP4estMesh}, equations,
dg::DG, cache,
- solution_callback, element_variables = Dict{Symbol, Any}();
+ solution_callback,
+ element_variables = Dict{Symbol, Any}(),
+ node_variables = Dict{Symbol, Any}();
system = "")
@unpack output_directory, solution_variables = solution_callback
diff --git a/src/equations/compressible_navier_stokes_1d.jl b/src/equations/compressible_navier_stokes_1d.jl
index dca846cac1e..74d672ce7ae 100644
--- a/src/equations/compressible_navier_stokes_1d.jl
+++ b/src/equations/compressible_navier_stokes_1d.jl
@@ -1,3 +1,10 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
@doc raw"""
CompressibleNavierStokesDiffusion1D(equations; mu, Pr,
gradient_variables=GradientVariablesPrimitive())
@@ -77,7 +84,8 @@ w_2 = \frac{\rho v1}{p},\, w_3 = -\frac{\rho}{p}
This code is experimental and may be changed or removed in any future release.
"""
struct CompressibleNavierStokesDiffusion1D{GradientVariables, RealT <: Real,
- E <: AbstractCompressibleEulerEquations{1}} <:
+ E <: AbstractCompressibleEulerEquations{1}
+ } <:
AbstractCompressibleNavierStokesDiffusion{1, 3}
# TODO: parabolic
# 1) For now save gamma and inv(gamma-1) again, but could potentially reuse them from the Euler equations
@@ -109,7 +117,8 @@ function CompressibleNavierStokesDiffusion1D(equations::CompressibleEulerEquatio
CompressibleNavierStokesDiffusion1D{typeof(gradient_variables), typeof(gamma),
typeof(equations)}(gamma, inv_gamma_minus_one,
μ, Pr, kappa,
- equations, gradient_variables)
+ equations,
+ gradient_variables)
end
# TODO: parabolic
@@ -263,7 +272,8 @@ end
u_inner,
orientation::Integer,
direction,
- x, t,
+ x,
+ t,
operator_type::Gradient,
equations::CompressibleNavierStokesDiffusion1D{
GradientVariablesPrimitive
@@ -278,7 +288,8 @@ end
u_inner,
orientation::Integer,
direction,
- x, t,
+ x,
+ t,
operator_type::Divergence,
equations::CompressibleNavierStokesDiffusion1D{
GradientVariablesPrimitive
@@ -299,7 +310,8 @@ end
u_inner,
orientation::Integer,
direction,
- x, t,
+ x,
+ t,
operator_type::Gradient,
equations::CompressibleNavierStokesDiffusion1D{
GradientVariablesPrimitive
@@ -316,7 +328,8 @@ end
u_inner,
orientation::Integer,
direction,
- x, t,
+ x,
+ t,
operator_type::Divergence,
equations::CompressibleNavierStokesDiffusion1D{
GradientVariablesPrimitive
@@ -337,7 +350,8 @@ end
w_inner,
orientation::Integer,
direction,
- x, t,
+ x,
+ t,
operator_type::Gradient,
equations::CompressibleNavierStokesDiffusion1D{
GradientVariablesEntropy
@@ -354,7 +368,8 @@ end
w_inner,
orientation::Integer,
direction,
- x, t,
+ x,
+ t,
operator_type::Divergence,
equations::CompressibleNavierStokesDiffusion1D{
GradientVariablesEntropy
@@ -374,7 +389,8 @@ end
w_inner,
orientation::Integer,
direction,
- x, t,
+ x,
+ t,
operator_type::Gradient,
equations::CompressibleNavierStokesDiffusion1D{
GradientVariablesEntropy
@@ -394,10 +410,12 @@ end
w_inner,
orientation::Integer,
direction,
- x, t,
+ x,
+ t,
operator_type::Divergence,
equations::CompressibleNavierStokesDiffusion1D{
GradientVariablesEntropy
})
return SVector(flux_inner[1], flux_inner[2], flux_inner[3])
end
+end # @muladd
diff --git a/src/equations/compressible_navier_stokes_2d.jl b/src/equations/compressible_navier_stokes_2d.jl
index f762fe5d5ee..80857999017 100644
--- a/src/equations/compressible_navier_stokes_2d.jl
+++ b/src/equations/compressible_navier_stokes_2d.jl
@@ -1,3 +1,10 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
@doc raw"""
CompressibleNavierStokesDiffusion2D(equations; mu, Pr,
gradient_variables=GradientVariablesPrimitive())
@@ -77,7 +84,8 @@ w_2 = \frac{\rho v_1}{p},\, w_3 = \frac{\rho v_2}{p},\, w_4 = -\frac{\rho}{p}
This code is experimental and may be changed or removed in any future release.
"""
struct CompressibleNavierStokesDiffusion2D{GradientVariables, RealT <: Real,
- E <: AbstractCompressibleEulerEquations{2}} <:
+ E <: AbstractCompressibleEulerEquations{2}
+ } <:
AbstractCompressibleNavierStokesDiffusion{2, 4}
# TODO: parabolic
# 1) For now save gamma and inv(gamma-1) again, but could potentially reuse them from the Euler equations
@@ -109,7 +117,8 @@ function CompressibleNavierStokesDiffusion2D(equations::CompressibleEulerEquatio
CompressibleNavierStokesDiffusion2D{typeof(gradient_variables), typeof(gamma),
typeof(equations)}(gamma, inv_gamma_minus_one,
μ, Pr, kappa,
- equations, gradient_variables)
+ equations,
+ gradient_variables)
end
# TODO: parabolic
@@ -301,12 +310,14 @@ end
<:Adiabatic})(flux_inner,
u_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Gradient,
equations::CompressibleNavierStokesDiffusion2D{
GradientVariablesPrimitive
})
- v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t,
+ v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x,
+ t,
equations)
return SVector(u_inner[1], v1, v2, u_inner[4])
end
@@ -315,7 +326,8 @@ end
<:Adiabatic})(flux_inner,
u_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Divergence,
equations::CompressibleNavierStokesDiffusion2D{
GradientVariablesPrimitive
@@ -324,7 +336,8 @@ end
normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x,
t,
equations)
- v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t,
+ v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x,
+ t,
equations)
_, tau_1n, tau_2n, _ = flux_inner # extract fluxes for 2nd and 3rd equations
normal_energy_flux = v1 * tau_1n + v2 * tau_2n + normal_heat_flux
@@ -335,12 +348,14 @@ end
<:Isothermal})(flux_inner,
u_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Gradient,
equations::CompressibleNavierStokesDiffusion2D{
GradientVariablesPrimitive
})
- v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t,
+ v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x,
+ t,
equations)
T = boundary_condition.boundary_condition_heat_flux.boundary_value_function(x, t,
equations)
@@ -351,7 +366,8 @@ end
<:Isothermal})(flux_inner,
u_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Divergence,
equations::CompressibleNavierStokesDiffusion2D{
GradientVariablesPrimitive
@@ -371,12 +387,14 @@ end
<:Adiabatic})(flux_inner,
w_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Gradient,
equations::CompressibleNavierStokesDiffusion2D{
GradientVariablesEntropy
})
- v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t,
+ v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x,
+ t,
equations)
negative_rho_inv_p = w_inner[4] # w_4 = -rho / p
return SVector(w_inner[1], -v1 * negative_rho_inv_p, -v2 * negative_rho_inv_p,
@@ -388,7 +406,8 @@ end
<:Adiabatic})(flux_inner,
w_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Divergence,
equations::CompressibleNavierStokesDiffusion2D{
GradientVariablesEntropy
@@ -396,7 +415,8 @@ end
normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x,
t,
equations)
- v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t,
+ v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x,
+ t,
equations)
_, tau_1n, tau_2n, _ = flux_inner # extract fluxes for 2nd and 3rd equations
normal_energy_flux = v1 * tau_1n + v2 * tau_2n + normal_heat_flux
@@ -407,12 +427,14 @@ end
<:Isothermal})(flux_inner,
w_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Gradient,
equations::CompressibleNavierStokesDiffusion2D{
GradientVariablesEntropy
})
- v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t,
+ v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x,
+ t,
equations)
T = boundary_condition.boundary_condition_heat_flux.boundary_value_function(x, t,
equations)
@@ -426,10 +448,41 @@ end
<:Isothermal})(flux_inner,
w_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Divergence,
equations::CompressibleNavierStokesDiffusion2D{
GradientVariablesEntropy
})
return SVector(flux_inner[1], flux_inner[2], flux_inner[3], flux_inner[4])
end
+
+# Dirichlet Boundary Condition for P4est mesh
+
+@inline function (boundary_condition::BoundaryConditionDirichlet)(flux_inner,
+ u_inner,
+ normal::AbstractVector,
+ x, t,
+ operator_type::Gradient,
+ equations::CompressibleNavierStokesDiffusion2D{
+ GradientVariablesPrimitive
+ })
+ # BCs are usually specified as conservative variables so we convert them to primitive variables
+ # because the gradients are assumed to be with respect to the primitive variables
+ u_boundary = boundary_condition.boundary_value_function(x, t, equations)
+
+ return cons2prim(u_boundary, equations)
+end
+
+@inline function (boundary_condition::BoundaryConditionDirichlet)(flux_inner,
+ u_inner,
+ normal::AbstractVector,
+ x, t,
+ operator_type::Divergence,
+ equations::CompressibleNavierStokesDiffusion2D{
+ GradientVariablesPrimitive
+ })
+ # for Dirichlet boundary conditions, we do not impose any conditions on the viscous fluxes
+ return flux_inner
+end
+end # @muladd
diff --git a/src/equations/compressible_navier_stokes_3d.jl b/src/equations/compressible_navier_stokes_3d.jl
index 166b53bf615..de2cad99ea8 100644
--- a/src/equations/compressible_navier_stokes_3d.jl
+++ b/src/equations/compressible_navier_stokes_3d.jl
@@ -1,3 +1,10 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
@doc raw"""
CompressibleNavierStokesDiffusion3D(equations; mu, Pr,
gradient_variables=GradientVariablesPrimitive())
@@ -77,7 +84,8 @@ w_2 = \frac{\rho v_1}{p},\, w_3 = \frac{\rho v_2}{p},\, w_4 = \frac{\rho v_3}{p}
This code is experimental and may be changed or removed in any future release.
"""
struct CompressibleNavierStokesDiffusion3D{GradientVariables, RealT <: Real,
- E <: AbstractCompressibleEulerEquations{3}} <:
+ E <: AbstractCompressibleEulerEquations{3}
+ } <:
AbstractCompressibleNavierStokesDiffusion{3, 5}
# TODO: parabolic
# 1) For now save gamma and inv(gamma-1) again, but could potentially reuse them from the Euler equations
@@ -109,7 +117,8 @@ function CompressibleNavierStokesDiffusion3D(equations::CompressibleEulerEquatio
CompressibleNavierStokesDiffusion3D{typeof(gradient_variables), typeof(gamma),
typeof(equations)}(gamma, inv_gamma_minus_one,
μ, Pr, kappa,
- equations, gradient_variables)
+ equations,
+ gradient_variables)
end
# TODO: parabolic
@@ -319,9 +328,12 @@ end
@inline function vorticity(u, gradients, equations::CompressibleNavierStokesDiffusion3D)
# Ensure that we have velocity `gradients` by way of the `convert_gradient_variables` function.
- _, dv1dx, dv2dx, dv3dx, _ = convert_derivative_to_primitive(u, gradients[1], equations)
- _, dv1dy, dv2dy, dv3dy, _ = convert_derivative_to_primitive(u, gradients[2], equations)
- _, dv1dz, dv2dz, dv3dz, _ = convert_derivative_to_primitive(u, gradients[3], equations)
+ _, dv1dx, dv2dx, dv3dx, _ = convert_derivative_to_primitive(u, gradients[1],
+ equations)
+ _, dv1dy, dv2dy, dv3dy, _ = convert_derivative_to_primitive(u, gradients[2],
+ equations)
+ _, dv1dz, dv2dz, dv3dz, _ = convert_derivative_to_primitive(u, gradients[3],
+ equations)
return SVector(dv3dy - dv2dz, dv1dz - dv3dx, dv2dx - dv1dy)
end
@@ -330,7 +342,8 @@ end
<:Adiabatic})(flux_inner,
u_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Gradient,
equations::CompressibleNavierStokesDiffusion3D{
GradientVariablesPrimitive
@@ -345,7 +358,8 @@ end
<:Adiabatic})(flux_inner,
u_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Divergence,
equations::CompressibleNavierStokesDiffusion3D{
GradientVariablesPrimitive
@@ -367,7 +381,8 @@ end
<:Isothermal})(flux_inner,
u_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Gradient,
equations::CompressibleNavierStokesDiffusion3D{
GradientVariablesPrimitive
@@ -384,7 +399,8 @@ end
<:Isothermal})(flux_inner,
u_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Divergence,
equations::CompressibleNavierStokesDiffusion3D{
GradientVariablesPrimitive
@@ -404,7 +420,8 @@ end
<:Adiabatic})(flux_inner,
w_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Gradient,
equations::CompressibleNavierStokesDiffusion3D{
GradientVariablesEntropy
@@ -422,7 +439,8 @@ end
<:Adiabatic})(flux_inner,
w_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Divergence,
equations::CompressibleNavierStokesDiffusion3D{
GradientVariablesEntropy
@@ -443,7 +461,8 @@ end
<:Isothermal})(flux_inner,
w_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Gradient,
equations::CompressibleNavierStokesDiffusion3D{
GradientVariablesEntropy
@@ -463,7 +482,8 @@ end
<:Isothermal})(flux_inner,
w_inner,
normal::AbstractVector,
- x, t,
+ x,
+ t,
operator_type::Divergence,
equations::CompressibleNavierStokesDiffusion3D{
GradientVariablesEntropy
@@ -471,3 +491,4 @@ end
return SVector(flux_inner[1], flux_inner[2], flux_inner[3], flux_inner[4],
flux_inner[5])
end
+end # @muladd
diff --git a/src/equations/equations.jl b/src/equations/equations.jl
index 90b2cd62191..3142dcc2765 100644
--- a/src/equations/equations.jl
+++ b/src/equations/equations.jl
@@ -75,8 +75,14 @@ end
@inline Base.ndims(::AbstractEquations{NDIMS}) where {NDIMS} = NDIMS
-# equations act like scalars in broadcasting
-Base.broadcastable(equations::AbstractEquations) = Ref(equations)
+# Equations act like scalars in broadcasting.
+# Using `Ref(equations)` would be more convenient in some circumstances.
+# However, this does not work with Julia v1.9.3 correctly due to a (performance)
+# bug in Julia, see
+# - https://github.com/trixi-framework/Trixi.jl/pull/1618
+# - https://github.com/JuliaLang/julia/issues/51118
+# Thus, we use the workaround below.
+Base.broadcastable(equations::AbstractEquations) = (equations,)
"""
flux(u, orientation_or_normal, equations)
@@ -350,6 +356,7 @@ include("shallow_water_1d.jl")
include("shallow_water_2d.jl")
include("shallow_water_two_layer_1d.jl")
include("shallow_water_two_layer_2d.jl")
+include("shallow_water_quasi_1d.jl")
# CompressibleEulerEquations
abstract type AbstractCompressibleEulerEquations{NDIMS, NVARS} <:
@@ -364,6 +371,11 @@ abstract type AbstractCompressibleEulerMulticomponentEquations{NDIMS, NVARS, NCO
include("compressible_euler_multicomponent_1d.jl")
include("compressible_euler_multicomponent_2d.jl")
+# PolytropicEulerEquations
+abstract type AbstractPolytropicEulerEquations{NDIMS, NVARS} <:
+ AbstractEquations{NDIMS, NVARS} end
+include("polytropic_euler_2d.jl")
+
# Retrieve number of components from equation instance for the multicomponent case
@inline function ncomponents(::AbstractCompressibleEulerMulticomponentEquations{NDIMS,
NVARS,
diff --git a/src/equations/polytropic_euler_2d.jl b/src/equations/polytropic_euler_2d.jl
new file mode 100644
index 00000000000..d4902bbafb2
--- /dev/null
+++ b/src/equations/polytropic_euler_2d.jl
@@ -0,0 +1,257 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+@doc raw"""
+ PolytropicEulerEquations2D(gamma, kappa)
+
+The polytropic Euler equations
+```math
+\frac{\partial}{\partial t}
+\begin{pmatrix}
+\rho \\ \rho v_1 \\ \rho v_2
+\end{pmatrix}
++
+\frac{\partial}{\partial x}
+\begin{pmatrix}
+ \rho v_1 \\ \rho v_1^2 + \kappa\rho^\gamma \\ \rho v_1 v_2
+\end{pmatrix}
++
+\frac{\partial}{\partial y}
+\begin{pmatrix}
+\rho v_2 \\ \rho v_1 v_2 \\ \rho v_2^2 + \kappa\rho^\gamma
+\end{pmatrix}
+=
+\begin{pmatrix}
+0 \\ 0 \\ 0
+\end{pmatrix}
+```
+for an ideal gas with ratio of specific heats `gamma`
+in two space dimensions.
+Here, ``\rho`` is the density and ``v_1`` and`v_2` the velocities and
+```math
+p = \kappa\rho^\gamma
+```
+the pressure, which we replaced using this relation.
+"""
+struct PolytropicEulerEquations2D{RealT <: Real} <:
+ AbstractPolytropicEulerEquations{2, 3}
+ gamma::RealT # ratio of specific heats
+ kappa::RealT # fluid scaling factor
+
+ function PolytropicEulerEquations2D(gamma, kappa)
+ gamma_, kappa_ = promote(gamma, kappa)
+ new{typeof(gamma_)}(gamma_, kappa_)
+ end
+end
+
+function varnames(::typeof(cons2cons), ::PolytropicEulerEquations2D)
+ ("rho", "rho_v1", "rho_v2")
+end
+varnames(::typeof(cons2prim), ::PolytropicEulerEquations2D) = ("rho", "v1", "v2")
+
+"""
+ initial_condition_convergence_test(x, t, equations::PolytropicEulerEquations2D)
+
+Manufactured smooth initial condition used for convergence tests
+in combination with [`source_terms_convergence_test`](@ref).
+"""
+function initial_condition_convergence_test(x, t, equations::PolytropicEulerEquations2D)
+ # manufactured initial condition from Winters (2019) [0.1007/s10543-019-00789-w]
+ # domain must be set to [0, 1] x [0, 1]
+ h = 8 + cos(2 * pi * x[1]) * sin(2 * pi * x[2]) * cos(2 * pi * t)
+
+ return SVector(h, h / 2, 3 * h / 2)
+end
+
+"""
+ source_terms_convergence_test(u, x, t, equations::PolytropicEulerEquations2D)
+
+Source terms used for convergence tests in combination with
+[`initial_condition_convergence_test`](@ref).
+"""
+@inline function source_terms_convergence_test(u, x, t,
+ equations::PolytropicEulerEquations2D)
+ rho, v1, v2 = cons2prim(u, equations)
+
+ # Residual from Winters (2019) [0.1007/s10543-019-00789-w] eq. (5.2).
+ h = 8 + cos(2 * pi * x[1]) * sin(2 * pi * x[2]) * cos(2 * pi * t)
+ h_t = -2 * pi * cos(2 * pi * x[1]) * sin(2 * pi * x[2]) * sin(2 * pi * t)
+ h_x = -2 * pi * sin(2 * pi * x[1]) * sin(2 * pi * x[2]) * cos(2 * pi * t)
+ h_y = 2 * pi * cos(2 * pi * x[1]) * cos(2 * pi * x[2]) * cos(2 * pi * t)
+
+ rho_x = h_x
+ rho_y = h_y
+
+ b = equations.kappa * equations.gamma * h^(equations.gamma - 1)
+
+ r_1 = h_t + h_x / 2 + 3 / 2 * h_y
+ r_2 = h_t / 2 + h_x / 4 + b * rho_x + 3 / 4 * h_y
+ r_3 = 3 / 2 * h_t + 3 / 4 * h_x + 9 / 4 * h_y + b * rho_y
+
+ return SVector(r_1, r_2, r_3)
+end
+
+"""
+ initial_condition_weak_blast_wave(x, t, equations::PolytropicEulerEquations2D)
+
+A weak blast wave adapted from
+- Sebastian Hennemann, Gregor J. Gassner (2020)
+ A provably entropy stable subcell shock capturing approach for high order split form DG
+ [arXiv: 2008.12044](https://arxiv.org/abs/2008.12044)
+"""
+function initial_condition_weak_blast_wave(x, t, equations::PolytropicEulerEquations2D)
+ # Adapted MHD version of the weak blast wave from Hennemann & Gassner JCP paper 2020 (Sec. 6.3)
+ # Set up polar coordinates
+ inicenter = (0, 0)
+ x_norm = x[1] - inicenter[1]
+ y_norm = x[2] - inicenter[2]
+ r = sqrt(x_norm^2 + y_norm^2)
+ phi = atan(y_norm, x_norm)
+
+ # Calculate primitive variables
+ rho = r > 0.5 ? 1.0 : 1.1691
+ v1 = r > 0.5 ? 0.0 : 0.1882 * cos(phi)
+ v2 = r > 0.5 ? 0.0 : 0.1882 * sin(phi)
+
+ return prim2cons(SVector(rho, v1, v2), equations)
+end
+
+# Calculate 1D flux for a single point in the normal direction
+# Note, this directional vector is not normalized
+@inline function flux(u, normal_direction::AbstractVector,
+ equations::PolytropicEulerEquations2D)
+ rho, v1, v2 = cons2prim(u, equations)
+ p = pressure(u, equations)
+
+ v_normal = v1 * normal_direction[1] + v2 * normal_direction[2]
+ rho_v_normal = rho * v_normal
+ f1 = rho_v_normal
+ f2 = rho_v_normal * v1 + p * normal_direction[1]
+ f3 = rho_v_normal * v2 + p * normal_direction[2]
+ return SVector(f1, f2, f3)
+end
+
+"""
+ flux_winters_etal(u_ll, u_rr, normal_direction,
+ equations::PolytropicEulerEquations2D)
+
+Entropy conserving two-point flux for isothermal or polytropic gases.
+Requires a special weighted Stolarsky mean for the evaluation of the density
+denoted here as `stolarsky_mean`. Note, for isothermal gases where `gamma = 1`
+this `stolarsky_mean` becomes the [`ln_mean`](@ref).
+
+For details see Section 3.2 of the following reference
+- Andrew R. Winters, Christof Czernik, Moritz B. Schily & Gregor J. Gassner (2020)
+ Entropy stable numerical approximations for the isothermal and polytropic
+ Euler equations
+ [DOI: 10.1007/s10543-019-00789-w](https://doi.org/10.1007/s10543-019-00789-w)
+"""
+@inline function flux_winters_etal(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::PolytropicEulerEquations2D)
+ # Unpack left and right state
+ rho_ll, v1_ll, v2_ll = cons2prim(u_ll, equations)
+ rho_rr, v1_rr, v2_rr = cons2prim(u_rr, equations)
+ p_ll = equations.kappa * rho_ll^equations.gamma
+ p_rr = equations.kappa * rho_rr^equations.gamma
+ v_dot_n_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2]
+ v_dot_n_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2]
+
+ # Compute the necessary mean values
+ if equations.gamma == 1.0 # isothermal gas
+ rho_mean = ln_mean(rho_ll, rho_rr)
+ else # equations.gamma > 1 # polytropic gas
+ rho_mean = stolarsky_mean(rho_ll, rho_rr, equations.gamma)
+ end
+ v1_avg = 0.5 * (v1_ll + v1_rr)
+ v2_avg = 0.5 * (v2_ll + v2_rr)
+ p_avg = 0.5 * (p_ll + p_rr)
+
+ # Calculate fluxes depending on normal_direction
+ f1 = rho_mean * 0.5 * (v_dot_n_ll + v_dot_n_rr)
+ f2 = f1 * v1_avg + p_avg * normal_direction[1]
+ f3 = f1 * v2_avg + p_avg * normal_direction[2]
+
+ return SVector(f1, f2, f3)
+end
+
+@inline function min_max_speed_naive(u_ll, u_rr, normal_direction::AbstractVector,
+ equations::PolytropicEulerEquations2D)
+ rho_ll, v1_ll, v2_ll = cons2prim(u_ll, equations)
+ rho_rr, v1_rr, v2_rr = cons2prim(u_rr, equations)
+ p_ll = equations.kappa * rho_ll^equations.gamma
+ p_rr = equations.kappa * rho_rr^equations.gamma
+
+ v_normal_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2]
+ v_normal_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2]
+
+ norm_ = norm(normal_direction)
+ # The v_normals are already scaled by the norm
+ lambda_min = v_normal_ll - sqrt(equations.gamma * p_ll / rho_ll) * norm_
+ lambda_max = v_normal_rr + sqrt(equations.gamma * p_rr / rho_rr) * norm_
+
+ return lambda_min, lambda_max
+end
+
+@inline function max_abs_speeds(u, equations::PolytropicEulerEquations2D)
+ rho, v1, v2 = cons2prim(u, equations)
+ c = sqrt(equations.gamma * equations.kappa * rho^(equations.gamma - 1))
+
+ return abs(v1) + c, abs(v2) + c
+end
+
+# Convert conservative variables to primitive
+@inline function cons2prim(u, equations::PolytropicEulerEquations2D)
+ rho, rho_v1, rho_v2 = u
+
+ v1 = rho_v1 / rho
+ v2 = rho_v2 / rho
+
+ return SVector(rho, v1, v2)
+end
+
+# Convert conservative variables to entropy
+@inline function cons2entropy(u, equations::PolytropicEulerEquations2D)
+ rho, rho_v1, rho_v2 = u
+
+ v1 = rho_v1 / rho
+ v2 = rho_v2 / rho
+ v_square = v1^2 + v2^2
+ p = pressure(u, equations)
+ # Form of the internal energy depends on gas type
+ if equations.gamma == 1.0 # isothermal gas
+ internal_energy = equations.kappa * log(rho)
+ else # equations.gamma > 1 # polytropic gas
+ internal_energy = equations.kappa * rho^(equations.gamma - 1) /
+ (equations.gamma - 1.0)
+ end
+
+ w1 = internal_energy + p / rho - 0.5 * v_square
+ w2 = v1
+ w3 = v2
+
+ return SVector(w1, w2, w3)
+end
+
+# Convert primitive to conservative variables
+@inline function prim2cons(prim, equations::PolytropicEulerEquations2D)
+ rho, v1, v2 = prim
+ rho_v1 = rho * v1
+ rho_v2 = rho * v2
+ return SVector(rho, rho_v1, rho_v2)
+end
+
+@inline function density(u, equations::PolytropicEulerEquations2D)
+ rho = u[1]
+ return rho
+end
+
+@inline function pressure(u, equations::PolytropicEulerEquations2D)
+ rho, rho_v1, rho_v2 = u
+ p = equations.kappa * rho^equations.gamma
+ return p
+end
+end # @muladd
diff --git a/src/equations/shallow_water_quasi_1d.jl b/src/equations/shallow_water_quasi_1d.jl
new file mode 100644
index 00000000000..217a764e173
--- /dev/null
+++ b/src/equations/shallow_water_quasi_1d.jl
@@ -0,0 +1,323 @@
+# By default, Julia/LLVM does not use fused multiply-add operations (FMAs).
+# Since these FMAs can increase the performance of many numerical algorithms,
+# we need to opt-in explicitly.
+# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details.
+@muladd begin
+#! format: noindent
+
+@doc raw"""
+ ShallowWaterEquationsQuasi1D(; gravity, H0 = 0, threshold_limiter = nothing threshold_wet = nothing)
+
+The quasi-1D shallow water equations (SWE). The equations are given by
+```math
+\begin{aligned}
+ \frac{\partial}{\partial t}(a h) + \frac{\partial}{\partial x}(a h v) &= 0 \\
+ \frac{\partial}{\partial t}(a h v) + \frac{\partial}{\partial x}(a h v^2)
+ + g a h \frac{\partial}{\partial x}(h + b) &= 0
+\end{aligned}
+```
+The unknown quantities of the Quasi-1D SWE are the water height ``h`` and the scaled velocity ``v``.
+The gravitational constant is denoted by `g`, the (possibly) variable bottom topography function ``b(x)``, and (possibly) variable channel width ``a(x)``. The water height ``h`` is measured from the bottom topography ``b``, therefore one also defines the total water height as ``H = h + b``.
+
+The additional quantity ``H_0`` is also available to store a reference value for the total water height that
+is useful to set initial conditions or test the "lake-at-rest" well-balancedness.
+
+Also, there are two thresholds which prevent numerical problems as well as instabilities. Both of them do not
+have to be passed, as default values are defined within the struct. The first one, `threshold_limiter`, is
+used in [`PositivityPreservingLimiterShallowWater`](@ref) on the water height, as a (small) shift on the initial
+condition and cutoff before the next time step. The second one, `threshold_wet`, is applied on the water height to
+define when the flow is "wet" before calculating the numerical flux.
+
+The bottom topography function ``b(x)`` and channel width ``a(x)`` are set inside the initial condition routine
+for a particular problem setup. To test the conservative form of the SWE one can set the bottom topography
+variable `b` to zero and ``a`` to one.
+
+In addition to the unknowns, Trixi.jl currently stores the bottom topography and channel width values at the approximation points
+despite being fixed in time. This is done for convenience of computing the bottom topography gradients
+on the fly during the approximation as well as computing auxiliary quantities like the total water height ``H``
+or the entropy variables.
+This affects the implementation and use of these equations in various ways:
+* The flux values corresponding to the bottom topography and channel width must be zero.
+* The bottom topography and channel width values must be included when defining initial conditions, boundary conditions or
+ source terms.
+* [`AnalysisCallback`](@ref) analyzes this variable.
+* Trixi.jl's visualization tools will visualize the bottom topography and channel width by default.
+"""
+struct ShallowWaterEquationsQuasi1D{RealT <: Real} <:
+ AbstractShallowWaterEquations{1, 4}
+ gravity::RealT # gravitational constant
+ H0::RealT # constant "lake-at-rest" total water height
+ # `threshold_limiter` used in `PositivityPreservingLimiterShallowWater` on water height,
+ # as a (small) shift on the initial condition and cutoff before the next time step.
+ # Default is 500*eps() which in double precision is ≈1e-13.
+ threshold_limiter::RealT
+ # `threshold_wet` applied on water height to define when the flow is "wet"
+ # before calculating the numerical flux.
+ # Default is 5*eps() which in double precision is ≈1e-15.
+ threshold_wet::RealT
+end
+
+# Allow for flexibility to set the gravitational constant within an elixir depending on the
+# application where `gravity_constant=1.0` or `gravity_constant=9.81` are common values.
+# The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest"
+# well-balancedness test cases.
+# Strict default values for thresholds that performed well in many numerical experiments
+function ShallowWaterEquationsQuasi1D(; gravity_constant, H0 = zero(gravity_constant),
+ threshold_limiter = nothing,
+ threshold_wet = nothing)
+ T = promote_type(typeof(gravity_constant), typeof(H0))
+ if threshold_limiter === nothing
+ threshold_limiter = 500 * eps(T)
+ end
+ if threshold_wet === nothing
+ threshold_wet = 5 * eps(T)
+ end
+ ShallowWaterEquationsQuasi1D(gravity_constant, H0, threshold_limiter, threshold_wet)
+end
+
+have_nonconservative_terms(::ShallowWaterEquationsQuasi1D) = True()
+function varnames(::typeof(cons2cons), ::ShallowWaterEquationsQuasi1D)
+ ("a_h", "a_h_v", "b", "a")
+end
+# Note, we use the total water height, H = h + b, as the first primitive variable for easier
+# visualization and setting initial conditions
+varnames(::typeof(cons2prim), ::ShallowWaterEquationsQuasi1D) = ("H", "v", "b", "a")
+
+# Set initial conditions at physical location `x` for time `t`
+"""
+ initial_condition_convergence_test(x, t, equations::ShallowWaterEquationsQuasi1D)
+
+A smooth initial condition used for convergence tests in combination with
+[`source_terms_convergence_test`](@ref)
+(and [`BoundaryConditionDirichlet(initial_condition_convergence_test)`](@ref) in non-periodic domains).
+"""
+function initial_condition_convergence_test(x, t,
+ equations::ShallowWaterEquationsQuasi1D)
+ # generates a manufactured solution.
+ # some constants are chosen such that the function is periodic on the domain [0,sqrt(2)]
+ Omega = sqrt(2) * pi
+ H = 2.0 + 0.5 * sin(Omega * x[1] - t)
+ v = 0.25
+ b = 0.2 - 0.05 * sin(Omega * x[1])
+ a = 1 + 0.1 * cos(Omega * x[1])
+ return prim2cons(SVector(H, v, b, a), equations)
+end
+
+"""
+ source_terms_convergence_test(u, x, t, equations::ShallowWaterEquationsQuasi1D)
+
+Source terms used for convergence tests in combination with
+[`initial_condition_convergence_test`](@ref)
+(and [`BoundaryConditionDirichlet(initial_condition_convergence_test)`](@ref) in non-periodic domains).
+
+This manufactured solution source term is specifically designed for the bottom topography function
+`b(x) = 0.2 - 0.05 * sin(sqrt(2) * pi *x[1])` and channel width 'a(x)= 1 + 0.1 * cos(sqrt(2) * pi * x[1])'
+as defined in [`initial_condition_convergence_test`](@ref).
+"""
+@inline function source_terms_convergence_test(u, x, t,
+ equations::ShallowWaterEquationsQuasi1D)
+ # Same settings as in `initial_condition_convergence_test`. Some derivative simplify because
+ # this manufactured solution velocity is taken to be constant
+ Omega = sqrt(2) * pi
+ H = 2.0 + 0.5 * sin(Omega * x[1] - t)
+ H_x = 0.5 * cos(Omega * x[1] - t) * Omega
+ H_t = -0.5 * cos(Omega * x[1] - t)
+
+ v = 0.25
+
+ b = 0.2 - 0.05 * sin(Omega * x[1])
+ b_x = -0.05 * cos(Omega * x[1]) * Omega
+
+ a = 1 + 0.1 * cos(Omega * x[1])
+ a_x = -0.1 * sin(Omega * x[1]) * Omega
+
+ du1 = a * H_t + v * (a_x * (H - b) + a * (H_x - b_x))
+ du2 = v * du1 + a * (equations.gravity * (H - b) * H_x)
+
+ return SVector(du1, du2, 0.0, 0.0)
+end
+
+# Calculate 1D flux for a single point
+# Note, the bottom topography and channel width have no flux
+@inline function flux(u, orientation::Integer, equations::ShallowWaterEquationsQuasi1D)
+ a_h, a_h_v, _, a = u
+ h = waterheight(u, equations)
+ v = velocity(u, equations)
+
+ p = 0.5 * a * equations.gravity * h^2
+
+ f1 = a_h_v
+ f2 = a_h_v * v + p
+
+ return SVector(f1, f2, zero(eltype(u)), zero(eltype(u)))
+end
+
+"""
+ flux_nonconservative_chan_etal(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquationsQuasi1D)
+
+Non-symmetric two-point volume flux discretizing the nonconservative (source) term
+that contains the gradient of the bottom topography [`ShallowWaterEquationsQuasi1D`](@ref)
+and the channel width.
+
+Further details are available in the paper:
+- Jesse Chan, Khemraj Shukla, Xinhui Wu, Ruofeng Liu, Prani Nalluri (2023)
+ High order entropy stable schemes for the quasi-one-dimensional
+ shallow water and compressible Euler equations
+ [DOI: 10.48550/arXiv.2307.12089](https://doi.org/10.48550/arXiv.2307.12089)
+"""
+@inline function flux_nonconservative_chan_etal(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquationsQuasi1D)
+ a_h_ll, _, b_ll, a_ll = u_ll
+ a_h_rr, _, b_rr, a_rr = u_rr
+
+ h_ll = waterheight(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+
+ z = zero(eltype(u_ll))
+
+ return SVector(z, equations.gravity * a_ll * h_ll * (h_rr + b_rr), z, z)
+end
+
+"""
+ flux_chan_etal(u_ll, u_rr, orientation,
+ equations::ShallowWaterEquationsQuasi1D)
+
+Total energy conservative (mathematical entropy for quasi 1D shallow water equations) split form.
+When the bottom topography is nonzero this scheme will be well-balanced when used as a `volume_flux`.
+The `surface_flux` should still use, e.g., [`FluxPlusDissipation(flux_chan_etal, DissipationLocalLaxFriedrichs())`](@ref).
+
+Further details are available in the paper:
+- Jesse Chan, Khemraj Shukla, Xinhui Wu, Ruofeng Liu, Prani Nalluri (2023)
+ High order entropy stable schemes for the quasi-one-dimensional
+ shallow water and compressible Euler equations
+ [DOI: 10.48550/arXiv.2307.12089](https://doi.org/10.48550/arXiv.2307.12089)
+"""
+@inline function flux_chan_etal(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquationsQuasi1D)
+ a_h_ll, a_h_v_ll, _, _ = u_ll
+ a_h_rr, a_h_v_rr, _, _ = u_rr
+
+ v_ll = velocity(u_ll, equations)
+ v_rr = velocity(u_rr, equations)
+
+ f1 = 0.5 * (a_h_v_ll + a_h_v_rr)
+ f2 = f1 * 0.5 * (v_ll + v_rr)
+
+ return SVector(f1, f2, zero(eltype(u_ll)), zero(eltype(u_ll)))
+end
+
+# Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the
+# maximum velocity magnitude plus the maximum speed of sound
+@inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer,
+ equations::ShallowWaterEquationsQuasi1D)
+ # Get the velocity quantities
+ v_ll = velocity(u_ll, equations)
+ v_rr = velocity(u_rr, equations)
+
+ # Calculate the wave celerity on the left and right
+ h_ll = waterheight(u_ll, equations)
+ h_rr = waterheight(u_rr, equations)
+ c_ll = sqrt(equations.gravity * h_ll)
+ c_rr = sqrt(equations.gravity * h_rr)
+
+ return max(abs(v_ll), abs(v_rr)) + max(c_ll, c_rr)
+end
+
+# Specialized `DissipationLocalLaxFriedrichs` to avoid spurious dissipation in the bottom topography
+# and channel width
+@inline function (dissipation::DissipationLocalLaxFriedrichs)(u_ll, u_rr,
+ orientation_or_normal_direction,
+ equations::ShallowWaterEquationsQuasi1D)
+ λ = dissipation.max_abs_speed(u_ll, u_rr, orientation_or_normal_direction,
+ equations)
+ diss = -0.5 * λ * (u_rr - u_ll)
+ return SVector(diss[1], diss[2], zero(eltype(u_ll)), zero(eltype(u_ll)))
+end
+
+@inline function max_abs_speeds(u, equations::ShallowWaterEquationsQuasi1D)
+ h = waterheight(u, equations)
+ v = velocity(u, equations)
+
+ c = equations.gravity * sqrt(h)
+ return (abs(v) + c,)
+end
+
+# Helper function to extract the velocity vector from the conservative variables
+@inline function velocity(u, equations::ShallowWaterEquationsQuasi1D)
+ a_h, a_h_v, _, _ = u
+
+ v = a_h_v / a_h
+
+ return v
+end
+
+# Convert conservative variables to primitive
+@inline function cons2prim(u, equations::ShallowWaterEquationsQuasi1D)
+ a_h, _, b, a = u
+ h = a_h / a
+ H = h + b
+ v = velocity(u, equations)
+ return SVector(H, v, b, a)
+end
+
+# Convert conservative variables to entropy variables
+# Note, only the first two are the entropy variables, the third and fourth entries still
+# just carry the bottom topography and channel width values for convenience
+@inline function cons2entropy(u, equations::ShallowWaterEquationsQuasi1D)
+ a_h, a_h_v, b, a = u
+ h = waterheight(u, equations)
+ v = velocity(u, equations)
+ #entropy variables are the same as ones in standard shallow water equations
+ w1 = equations.gravity * (h + b) - 0.5 * v^2
+ w2 = v
+
+ return SVector(w1, w2, b, a)
+end
+
+# Convert primitive to conservative variables
+@inline function prim2cons(prim, equations::ShallowWaterEquationsQuasi1D)
+ H, v, b, a = prim
+
+ a_h = a * (H - b)
+ a_h_v = a_h * v
+ return SVector(a_h, a_h_v, b, a)
+end
+
+@inline function waterheight(u, equations::ShallowWaterEquationsQuasi1D)
+ return u[1] / u[4]
+end
+
+# Entropy function for the shallow water equations is the total energy
+@inline function entropy(cons, equations::ShallowWaterEquationsQuasi1D)
+ a = cons[4]
+ return a * energy_total(cons, equations)
+end
+
+# Calculate total energy for a conservative state `cons`
+@inline function energy_total(cons, equations::ShallowWaterEquationsQuasi1D)
+ a_h, a_h_v, b, a = cons
+ e = (a_h_v^2) / (2 * a * a_h) + 0.5 * equations.gravity * (a_h^2 / a) +
+ equations.gravity * a_h * b
+ return e
+end
+
+# Calculate the error for the "lake-at-rest" test case where H = h+b should
+# be a constant value over time. Note, assumes there is a single reference
+# water height `H0` with which to compare.
+#
+# TODO: TrixiShallowWater: where should `threshold_limiter` live? May need
+# to modify or have different versions of the `lake_at_rest_error` function
+@inline function lake_at_rest_error(u, equations::ShallowWaterEquationsQuasi1D)
+ _, _, b, _ = u
+ h = waterheight(u, equations)
+
+ # For well-balancedness testing with possible wet/dry regions the reference
+ # water height `H0` accounts for the possibility that the bottom topography
+ # can emerge out of the water as well as for the threshold offset to avoid
+ # division by a "hard" zero water heights as well.
+ H0_wet_dry = max(equations.H0, b + equations.threshold_limiter)
+
+ return abs(H0_wet_dry - (h + b))
+end
+end # @muladd
diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl
index b9895e7d454..92e38ce1bf3 100644
--- a/src/meshes/mesh_io.jl
+++ b/src/meshes/mesh_io.jl
@@ -263,32 +263,27 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT)
size = Tuple(size_)
# TODO: `@eval` is evil
- # A temporary workaround to evaluate the code that defines the domain mapping in a local scope.
- # This prevents errors when multiple restart elixirs are executed in one session, where one
- # defines `mapping` as a variable, while the other defines it as a function.
#
# This should be replaced with something more robust and secure,
# see https://github.com/trixi-framework/Trixi.jl/issues/541).
- expr = Meta.parse(mapping_as_string)
- if expr.head == :toplevel
- expr.head = :block
- end
-
if ndims == 1
- mapping = @eval function (xi)
- $expr
+ mapping = eval(Meta.parse("""function (xi)
+ $mapping_as_string
mapping(xi)
end
+ """))
elseif ndims == 2
- mapping = @eval function (xi, eta)
- $expr
+ mapping = eval(Meta.parse("""function (xi, eta)
+ $mapping_as_string
mapping(xi, eta)
end
+ """))
else # ndims == 3
- mapping = @eval function (xi, eta, zeta)
- $expr
+ mapping = eval(Meta.parse("""function (xi, eta, zeta)
+ $mapping_as_string
mapping(xi, eta, zeta)
end
+ """))
end
mesh = StructuredMesh(size, mapping; RealT = RealT, unsaved_changes = false,
diff --git a/src/meshes/structured_mesh.jl b/src/meshes/structured_mesh.jl
index df067db833d..553aabbbc20 100644
--- a/src/meshes/structured_mesh.jl
+++ b/src/meshes/structured_mesh.jl
@@ -96,13 +96,17 @@ function StructuredMesh(cells_per_dimension, faces::Tuple; RealT = Float64,
# Collect definitions of face functions in one string (separated by semicolons)
face2substring(face) = code_string(face, ntuple(_ -> Float64, NDIMS - 1))
- join_semicolon(strings) = join(strings, "; ")
+ join_newline(strings) = join(strings, "\n")
- faces_definition = faces .|> face2substring .|> string |> join_semicolon
+ faces_definition = faces .|> face2substring .|> string |> join_newline
# Include faces definition in `mapping_as_string` to allow for evaluation
# without knowing the face functions
- mapping_as_string = "$faces_definition; faces = $(string(faces)); mapping = transfinite_mapping(faces)"
+ mapping_as_string = """
+ $faces_definition
+ faces = $(string(faces))
+ mapping = transfinite_mapping(faces)
+ """
return StructuredMesh(cells_per_dimension, mapping; RealT = RealT,
periodicity = periodicity,
@@ -123,13 +127,14 @@ Create a StructuredMesh that represents a uncurved structured mesh with a rectan
"""
function StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max;
periodicity = true)
- NDIMS = length(cells_per_dimension)
RealT = promote_type(eltype(coordinates_min), eltype(coordinates_max))
mapping = coordinates2mapping(coordinates_min, coordinates_max)
- mapping_as_string = "coordinates_min = $coordinates_min; " *
- "coordinates_max = $coordinates_max; " *
- "mapping = coordinates2mapping(coordinates_min, coordinates_max)"
+ mapping_as_string = """
+ coordinates_min = $coordinates_min
+ coordinates_max = $coordinates_max
+ mapping = coordinates2mapping(coordinates_min, coordinates_max)
+ """
return StructuredMesh(cells_per_dimension, mapping; RealT = RealT,
periodicity = periodicity,
mapping_as_string = mapping_as_string)
diff --git a/src/meshes/tree_mesh.jl b/src/meshes/tree_mesh.jl
index 93ba982bce9..05699d17d16 100644
--- a/src/meshes/tree_mesh.jl
+++ b/src/meshes/tree_mesh.jl
@@ -199,6 +199,7 @@ function Base.show(io::IO, ::MIME"text/plain",
"length" => mesh.tree.length_level_0,
"periodicity" => mesh.tree.periodicity,
"current #cells" => mesh.tree.length,
+ "#leaf-cells" => count_leaf_cells(mesh.tree),
"maximum #cells" => mesh.tree.capacity,
]
summary_box(io, "TreeMesh{" * string(NDIMS) * ", " * string(TreeType) * "}",
diff --git a/src/semidiscretization/semidiscretization.jl b/src/semidiscretization/semidiscretization.jl
index c784f716426..fe7858e31ee 100644
--- a/src/semidiscretization/semidiscretization.jl
+++ b/src/semidiscretization/semidiscretization.jl
@@ -335,6 +335,10 @@ function get_element_variables!(element_variables, u_ode,
get_element_variables!(element_variables, u, mesh_equations_solver_cache(semi)...)
end
+function get_node_variables!(node_variables, semi::AbstractSemidiscretization)
+ get_node_variables!(node_variables, mesh_equations_solver_cache(semi)...)
+end
+
# To implement AMR and use OrdinaryDiffEq.jl etc., we have to be a bit creative.
# Since the caches of the SciML ecosystem are immutable structs, we cannot simply
# change the underlying arrays therein. Hence, to support changing the number of
diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl
index a74bed59eb8..1cd42debd3a 100644
--- a/src/semidiscretization/semidiscretization_coupled.jl
+++ b/src/semidiscretization/semidiscretization_coupled.jl
@@ -71,7 +71,7 @@ function Base.show(io::IO, ::MIME"text/plain", semi::SemidiscretizationCoupled)
summary_line(increment_indent(io), "source terms", semi.semis[i].source_terms)
summary_line(increment_indent(io), "solver", solver |> typeof |> nameof)
end
- summary_line(io, "total #DOFs", ndofs(semi))
+ summary_line(io, "total #DOFs per field", ndofs(semi))
summary_footer(io)
end
end
diff --git a/src/semidiscretization/semidiscretization_hyperbolic.jl b/src/semidiscretization/semidiscretization_hyperbolic.jl
index 50b2c21c14e..9d465bfcc5f 100644
--- a/src/semidiscretization/semidiscretization_hyperbolic.jl
+++ b/src/semidiscretization/semidiscretization_hyperbolic.jl
@@ -243,7 +243,7 @@ function Base.show(io::IO, ::MIME"text/plain", semi::SemidiscretizationHyperboli
summary_line(io, "source terms", semi.source_terms)
summary_line(io, "solver", semi.solver |> typeof |> nameof)
- summary_line(io, "total #DOFs", ndofs(semi))
+ summary_line(io, "total #DOFs per field", ndofs(semi))
summary_footer(io)
end
end
diff --git a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl
index b12ecadb58b..35340d65b1e 100644
--- a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl
+++ b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl
@@ -228,7 +228,7 @@ function Base.show(io::IO, ::MIME"text/plain",
summary_line(io, "source terms", semi.source_terms)
summary_line(io, "solver", semi.solver |> typeof |> nameof)
summary_line(io, "parabolic solver", semi.solver_parabolic |> typeof |> nameof)
- summary_line(io, "total #DOFs", ndofs(semi))
+ summary_line(io, "total #DOFs per field", ndofs(semi))
summary_footer(io)
end
end
diff --git a/src/solvers/dg.jl b/src/solvers/dg.jl
index 4883f34ed16..c93e7418a03 100644
--- a/src/solvers/dg.jl
+++ b/src/solvers/dg.jl
@@ -12,6 +12,11 @@ function get_element_variables!(element_variables, u, mesh, equations,
nothing
end
+function get_node_variables!(node_variables, mesh, equations,
+ volume_integral::AbstractVolumeIntegral, dg, cache)
+ nothing
+end
+
"""
VolumeIntegralStrongForm()
@@ -214,6 +219,18 @@ function Base.show(io::IO, mime::MIME"text/plain",
end
end
+function get_node_variables!(node_variables, mesh, equations,
+ volume_integral::VolumeIntegralSubcellLimiting, dg, cache)
+ # While for the element-wise limiting with `VolumeIntegralShockCapturingHG` the indicator is
+ # called here to get up-to-date values for IO, this is not easily possible in this case
+ # because the calculation is very integrated into the method.
+ # See also https://github.com/trixi-framework/Trixi.jl/pull/1611#discussion_r1334553206.
+ # Therefore, the coefficients at `t=t^{n-1}` are saved. Thus, the coefficients of the first
+ # stored solution (initial condition) are not yet defined and were manually set to `NaN`.
+ get_node_variables!(node_variables, volume_integral.limiter, volume_integral,
+ equations)
+end
+
# TODO: FD. Should this definition live in a different file because it is
# not strictly a DG method?
"""
@@ -403,6 +420,10 @@ function get_element_variables!(element_variables, u, mesh, equations, dg::DG, c
dg, cache)
end
+function get_node_variables!(node_variables, mesh, equations, dg::DG, cache)
+ get_node_variables!(node_variables, mesh, equations, dg.volume_integral, dg, cache)
+end
+
const MeshesDGSEM = Union{TreeMesh, StructuredMesh, StructuredMeshView, UnstructuredMesh2D,
P4estMesh, T8codeMesh}
diff --git a/src/solvers/dgmulti/dg.jl b/src/solvers/dgmulti/dg.jl
index bc76aa1a9d2..182a486dce5 100644
--- a/src/solvers/dgmulti/dg.jl
+++ b/src/solvers/dgmulti/dg.jl
@@ -302,7 +302,12 @@ function calc_volume_integral!(du, u, mesh::DGMultiMesh,
@threaded for e in eachelement(mesh, dg, cache)
flux_values = local_values_threaded[Threads.threadid()]
for i in eachdim(mesh)
- flux_values .= flux.(view(u_values, :, e), i, equations)
+ # Here, the broadcasting operation does allocate
+ #flux_values .= flux.(view(u_values, :, e), i, equations)
+ # Use loop instead
+ for j in eachindex(flux_values)
+ flux_values[j] = flux(u_values[j, e], i, equations)
+ end
for j in eachdim(mesh)
apply_to_each_field(mul_by_accum!(weak_differentiation_matrices[j],
dxidxhatj[i, j][1, e]),
@@ -327,6 +332,7 @@ function calc_volume_integral!(du, u, mesh::DGMultiMesh{NDIMS, <:NonAffine},
@threaded for e in eachelement(mesh, dg, cache)
flux_values = cache.flux_threaded[Threads.threadid()]
for i in eachdim(mesh)
+ # Here, the broadcasting operation does not allocate
flux_values[i] .= flux.(view(u_values, :, e), i, equations)
end
diff --git a/src/solvers/dgmulti/dg_parabolic.jl b/src/solvers/dgmulti/dg_parabolic.jl
index 72dbe2c4256..7dfe4430244 100644
--- a/src/solvers/dgmulti/dg_parabolic.jl
+++ b/src/solvers/dgmulti/dg_parabolic.jl
@@ -62,9 +62,10 @@ end
function transform_variables!(u_transformed, u, mesh,
equations_parabolic::AbstractEquationsParabolic,
dg::DGMulti, parabolic_scheme, cache, cache_parabolic)
+ transformation = gradient_variable_transformation(equations_parabolic)
+
@threaded for i in eachindex(u)
- u_transformed[i] = gradient_variable_transformation(equations_parabolic)(u[i],
- equations_parabolic)
+ u_transformed[i] = transformation(u[i], equations_parabolic)
end
end
diff --git a/src/solvers/dgmulti/sbp.jl b/src/solvers/dgmulti/sbp.jl
index ba02d812041..d434d3146ce 100644
--- a/src/solvers/dgmulti/sbp.jl
+++ b/src/solvers/dgmulti/sbp.jl
@@ -36,312 +36,6 @@ function DGMulti(element_type::AbstractElemShape,
surface_integral = surface_integral, volume_integral = volume_integral)
end
-function construct_1d_operators(D::AbstractDerivativeOperator, tol)
- nodes_1d = collect(grid(D))
- M = SummationByPartsOperators.mass_matrix(D)
- if M isa UniformScaling
- weights_1d = M * ones(Bool, length(nodes_1d))
- else
- weights_1d = diag(M)
- end
-
- # StartUpDG assumes nodes from -1 to +1. Thus, we need to re-scale everything.
- # We can adjust the grid spacing as follows.
- xmin = SummationByPartsOperators.xmin(D)
- xmax = SummationByPartsOperators.xmax(D)
- factor = 2 / (xmax - xmin)
- @. nodes_1d = factor * (nodes_1d - xmin) - 1
- @. weights_1d = factor * weights_1d
-
- D_1d = droptol!(inv(factor) * sparse(D), tol)
- I_1d = Diagonal(ones(Bool, length(nodes_1d)))
-
- return nodes_1d, weights_1d, D_1d, I_1d
-end
-
-function StartUpDG.RefElemData(element_type::Line,
- D::AbstractDerivativeOperator;
- tol = 100 * eps())
- approximation_type = D
- N = SummationByPartsOperators.accuracy_order(D) # kind of polynomial degree
-
- # 1D operators
- nodes_1d, weights_1d, D_1d = construct_1d_operators(D, tol)
-
- # volume
- rq = r = nodes_1d
- wq = weights_1d
- Dr = D_1d
- M = Diagonal(wq)
- Pq = LinearAlgebra.I
- Vq = LinearAlgebra.I
-
- VDM = nothing # unused generalized Vandermonde matrix
-
- rst = (r,)
- rstq = (rq,)
- Drst = (Dr,)
-
- # face
- face_vertices = StartUpDG.face_vertices(element_type)
- face_mask = [1, length(nodes_1d)]
-
- rf = [-1.0; 1.0]
- nrJ = [-1.0; 1.0]
- wf = [1.0; 1.0]
- if D isa AbstractPeriodicDerivativeOperator
- # we do not need any face stuff for periodic operators
- Vf = spzeros(length(wf), length(wq))
- else
- Vf = sparse([1, 2], [1, length(nodes_1d)], [1.0, 1.0])
- end
- LIFT = Diagonal(wq) \ (Vf' * Diagonal(wf))
-
- rstf = (rf,)
- nrstJ = (nrJ,)
-
- # low order interpolation nodes
- r1 = StartUpDG.nodes(element_type, 1)
- V1 = StartUpDG.vandermonde(element_type, 1, r) /
- StartUpDG.vandermonde(element_type, 1, r1)
-
- return RefElemData(element_type, approximation_type, N,
- face_vertices, V1,
- rst, VDM, face_mask,
- rst, LinearAlgebra.I, # plotting
- rstq, wq, Vq, # quadrature
- rstf, wf, Vf, nrstJ, # faces
- M, Pq, Drst, LIFT)
-end
-
-function StartUpDG.RefElemData(element_type::Quad,
- D::AbstractDerivativeOperator;
- tol = 100 * eps())
- approximation_type = D
- N = SummationByPartsOperators.accuracy_order(D) # kind of polynomial degree
-
- # 1D operators
- nodes_1d, weights_1d, D_1d, I_1d = construct_1d_operators(D, tol)
-
- # volume
- s, r = vec.(StartUpDG.NodesAndModes.meshgrid(nodes_1d)) # this is to match
- # ordering of nrstJ
- rq = r
- sq = s
- wr, ws = vec.(StartUpDG.NodesAndModes.meshgrid(weights_1d))
- wq = wr .* ws
- Dr = kron(I_1d, D_1d)
- Ds = kron(D_1d, I_1d)
- M = Diagonal(wq)
- Pq = LinearAlgebra.I
- Vq = LinearAlgebra.I
-
- VDM = nothing # unused generalized Vandermonde matrix
-
- rst = (r, s)
- rstq = (rq, sq)
- Drst = (Dr, Ds)
-
- # face
- face_vertices = StartUpDG.face_vertices(element_type)
- face_mask = vcat(StartUpDG.find_face_nodes(element_type, r, s)...)
-
- rf, sf, wf, nrJ, nsJ = StartUpDG.init_face_data(element_type,
- quad_rule_face = (nodes_1d, weights_1d))
- if D isa AbstractPeriodicDerivativeOperator
- # we do not need any face stuff for periodic operators
- Vf = spzeros(length(wf), length(wq))
- else
- Vf = sparse(eachindex(face_mask), face_mask, ones(Bool, length(face_mask)))
- end
- LIFT = Diagonal(wq) \ (Vf' * Diagonal(wf))
-
- rstf = (rf, sf)
- nrstJ = (nrJ, nsJ)
-
- # low order interpolation nodes
- r1, s1 = StartUpDG.nodes(element_type, 1)
- V1 = StartUpDG.vandermonde(element_type, 1, r, s) /
- StartUpDG.vandermonde(element_type, 1, r1, s1)
-
- return RefElemData(element_type, approximation_type, N,
- face_vertices, V1,
- rst, VDM, face_mask,
- rst, LinearAlgebra.I, # plotting
- rstq, wq, Vq, # quadrature
- rstf, wf, Vf, nrstJ, # faces
- M, Pq, Drst, LIFT)
-end
-
-function StartUpDG.RefElemData(element_type::Hex,
- D::AbstractDerivativeOperator;
- tol = 100 * eps())
- approximation_type = D
- N = SummationByPartsOperators.accuracy_order(D) # kind of polynomial degree
-
- # 1D operators
- nodes_1d, weights_1d, D_1d, I_1d = construct_1d_operators(D, tol)
-
- # volume
- # to match ordering of nrstJ
- s, r, t = vec.(StartUpDG.NodesAndModes.meshgrid(nodes_1d, nodes_1d, nodes_1d))
- rq = r
- sq = s
- tq = t
- wr, ws, wt = vec.(StartUpDG.NodesAndModes.meshgrid(weights_1d, weights_1d, weights_1d))
- wq = wr .* ws .* wt
- Dr = kron(I_1d, I_1d, D_1d)
- Ds = kron(I_1d, D_1d, I_1d)
- Dt = kron(D_1d, I_1d, I_1d)
- M = Diagonal(wq)
- Pq = LinearAlgebra.I
- Vq = LinearAlgebra.I
-
- VDM = nothing # unused generalized Vandermonde matrix
-
- rst = (r, s, t)
- rstq = (rq, sq, tq)
- Drst = (Dr, Ds, Dt)
-
- # face
- face_vertices = StartUpDG.face_vertices(element_type)
- face_mask = vcat(StartUpDG.find_face_nodes(element_type, r, s, t)...)
-
- rf, sf, tf, wf, nrJ, nsJ, ntJ = let
- rf, sf = vec.(StartUpDG.NodesAndModes.meshgrid(nodes_1d, nodes_1d))
- wr, ws = vec.(StartUpDG.NodesAndModes.meshgrid(weights_1d, weights_1d))
- wf = wr .* ws
- StartUpDG.init_face_data(element_type, quad_rule_face = (rf, sf, wf))
- end
- Vf = sparse(eachindex(face_mask), face_mask, ones(Bool, length(face_mask)))
- LIFT = Diagonal(wq) \ (Vf' * Diagonal(wf))
-
- rstf = (rf, sf, tf)
- nrstJ = (nrJ, nsJ, ntJ)
-
- # low order interpolation nodes
- r1, s1, t1 = StartUpDG.nodes(element_type, 1)
- V1 = StartUpDG.vandermonde(element_type, 1, r, s, t) /
- StartUpDG.vandermonde(element_type, 1, r1, s1, t1)
-
- return RefElemData(element_type, approximation_type, N,
- face_vertices, V1,
- rst, VDM, face_mask,
- rst, LinearAlgebra.I, # plotting
- rstq, wq, Vq, # quadrature
- rstf, wf, Vf, nrstJ, # faces
- M, Pq, Drst, LIFT)
-end
-
-# specialized Hex constructor in 3D to reduce memory usage.
-function StartUpDG.RefElemData(element_type::Hex,
- D::AbstractPeriodicDerivativeOperator;
- tol = 100 * eps())
- approximation_type = D
- N = SummationByPartsOperators.accuracy_order(D) # kind of polynomial degree
-
- # 1D operators
- nodes_1d, weights_1d, D_1d, I_1d = construct_1d_operators(D, tol)
-
- # volume
- # to match ordering of nrstJ
- s, r, t = vec.(StartUpDG.NodesAndModes.meshgrid(nodes_1d, nodes_1d, nodes_1d))
- rq = r
- sq = s
- tq = t
- wr, ws, wt = vec.(StartUpDG.NodesAndModes.meshgrid(weights_1d, weights_1d, weights_1d))
- wq = wr .* ws .* wt
- Dr = kron(I_1d, I_1d, D_1d)
- Ds = kron(I_1d, D_1d, I_1d)
- Dt = kron(D_1d, I_1d, I_1d)
- M = Diagonal(wq)
- Pq = LinearAlgebra.I
- Vq = LinearAlgebra.I
-
- VDM = nothing # unused generalized Vandermonde matrix
-
- rst = (r, s, t)
- rstq = (rq, sq, tq)
- Drst = (Dr, Ds, Dt)
-
- # face
- # We do not need any face data for periodic operators. Thus, we just
- # pass `nothing` to save memory.
- face_vertices = ntuple(_ -> nothing, 3)
- face_mask = nothing
- wf = nothing
- rstf = ntuple(_ -> nothing, 3)
- nrstJ = ntuple(_ -> nothing, 3)
- Vf = nothing
- LIFT = nothing
-
- # low order interpolation nodes
- V1 = nothing # do not need to store V1, since we specialize StartUpDG.MeshData to avoid using it.
-
- return RefElemData(element_type, approximation_type, N,
- face_vertices, V1,
- rst, VDM, face_mask,
- rst, LinearAlgebra.I, # plotting
- rstq, wq, Vq, # quadrature
- rstf, wf, Vf, nrstJ, # faces
- M, Pq, Drst, LIFT)
-end
-
-function Base.show(io::IO, mime::MIME"text/plain",
- rd::RefElemData{NDIMS, ElementType, ApproximationType}) where {NDIMS,
- ElementType <:
- StartUpDG.AbstractElemShape,
- ApproximationType <:
- AbstractDerivativeOperator
- }
- @nospecialize rd
- print(io, "RefElemData for an approximation using an ")
- show(IOContext(io, :compact => true), rd.approximation_type)
- print(io, " on $(rd.element_type) element")
-end
-
-function Base.show(io::IO,
- rd::RefElemData{NDIMS, ElementType, ApproximationType}) where {NDIMS,
- ElementType <:
- StartUpDG.AbstractElemShape,
- ApproximationType <:
- AbstractDerivativeOperator
- }
- @nospecialize rd
- print(io, "RefElemData{", summary(rd.approximation_type), ", ", rd.element_type, "}")
-end
-
-function StartUpDG.inverse_trace_constant(rd::RefElemData{NDIMS, ElementType,
- ApproximationType}) where {NDIMS,
- ElementType <:
- Union{
- Line,
- Quad,
- Hex
- },
- ApproximationType <:
- AbstractDerivativeOperator
- }
- D = rd.approximation_type
-
- # the inverse trace constant is the maximum eigenvalue corresponding to
- # M_f * v = λ * M * v
- # where M_f is the face mass matrix and M is the volume mass matrix.
- # Since M is diagonal and since M_f is just the boundary "mask" matrix
- # (which extracts the first and last entries of a vector), the maximum
- # eigenvalue is the inverse of the first or last mass matrix diagonal.
- left_weight = SummationByPartsOperators.left_boundary_weight(D)
- right_weight = SummationByPartsOperators.right_boundary_weight(D)
- max_eigenvalue = max(inv(left_weight), inv(right_weight))
-
- # For tensor product elements, the trace constant for higher dimensional
- # elements is the one-dimensional trace constant multiplied by `NDIMS`. See
- # "GPU-accelerated discontinuous Galerkin methods on hybrid meshes."
- # Chan, Jesse, et al (2016), https://doi.org/10.1016/j.jcp.2016.04.003
- # for more details (specifically, Appendix A.1, Theorem A.4).
- return NDIMS * max_eigenvalue
-end
-
# type alias for specializing on a periodic SBP operator
const DGMultiPeriodicFDSBP{NDIMS, ApproxType, ElemType} = DGMulti{NDIMS, ElemType,
ApproxType,
@@ -450,30 +144,6 @@ end
@muladd begin
#! format: noindent
-# This is used in `estimate_dt`. `estimate_h` uses that `Jf / J = O(h^{NDIMS-1}) / O(h^{NDIMS}) = O(1/h)`.
-# However, since we do not initialize `Jf` for periodic FDSBP operators, we specialize `estimate_h`
-# based on the reference grid provided by SummationByPartsOperators.jl and information about the domain size
-# provided by `md::MeshData``.
-function StartUpDG.estimate_h(e, rd::RefElemData{NDIMS, ElementType, ApproximationType},
- md::MeshData) where {NDIMS,
- ElementType <:
- StartUpDG.AbstractElemShape,
- ApproximationType <:
- SummationByPartsOperators.AbstractPeriodicDerivativeOperator
- }
- D = rd.approximation_type
- x = grid(D)
-
- # we assume all SummationByPartsOperators.jl reference grids are rescaled to [-1, 1]
- xmin = SummationByPartsOperators.xmin(D)
- xmax = SummationByPartsOperators.xmax(D)
- factor = 2 / (xmax - xmin)
-
- # If the domain has size L^NDIMS, then `minimum(md.J)^(1 / NDIMS) = L`.
- # WARNING: this is not a good estimate on anisotropic grids.
- return minimum(diff(x)) * factor * minimum(md.J)^(1 / NDIMS)
-end
-
# specialized for DGMultiPeriodicFDSBP since there are no face nodes
# and thus no inverse trace constant for periodic domains.
function estimate_dt(mesh::DGMultiMesh, dg::DGMultiPeriodicFDSBP)
diff --git a/src/solvers/dgsem_p4est/dg_2d.jl b/src/solvers/dgsem_p4est/dg_2d.jl
index 97b931fa325..36624f2ce8a 100644
--- a/src/solvers/dgsem_p4est/dg_2d.jl
+++ b/src/solvers/dgsem_p4est/dg_2d.jl
@@ -275,9 +275,9 @@ function prolong2boundaries!(cache, u,
return nothing
end
-function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing,
+function calc_boundary_flux!(cache, t, boundary_condition::BC, boundary_indexing,
mesh::Union{P4estMesh{2}, T8codeMesh{2}},
- equations, surface_integral, dg::DG)
+ equations, surface_integral, dg::DG) where {BC}
@unpack boundaries = cache
@unpack surface_flux_values = cache.elements
index_range = eachnode(dg)
@@ -501,6 +501,11 @@ function calc_mortar_flux!(surface_flux_values,
# copying in the correct orientation
u_buffer = cache.u_threaded[Threads.threadid()]
+ # in calc_interface_flux!, the interface flux is computed once over each
+ # interface using the normal from the "primary" element. The result is then
+ # passed back to the "secondary" element, flipping the sign to account for the
+ # change in the normal direction. For mortars, this sign flip occurs in
+ # "mortar_fluxes_to_elements!" instead.
mortar_fluxes_to_elements!(surface_flux_values,
mesh, equations, mortar_l2, dg, cache,
mortar, fstar, u_buffer)
diff --git a/src/solvers/dgsem_p4est/dg_3d.jl b/src/solvers/dgsem_p4est/dg_3d.jl
index dc69329474f..4c0845ba9af 100644
--- a/src/solvers/dgsem_p4est/dg_3d.jl
+++ b/src/solvers/dgsem_p4est/dg_3d.jl
@@ -580,6 +580,11 @@ function calc_mortar_flux!(surface_flux_values,
# copying in the correct orientation
u_buffer = cache.u_threaded[Threads.threadid()]
+ # in calc_interface_flux!, the interface flux is computed once over each
+ # interface using the normal from the "primary" element. The result is then
+ # passed back to the "secondary" element, flipping the sign to account for the
+ # change in the normal direction. For mortars, this sign flip occurs in
+ # "mortar_fluxes_to_elements!" instead.
mortar_fluxes_to_elements!(surface_flux_values,
mesh, equations, mortar_l2, dg, cache,
mortar, fstar, u_buffer, fstar_tmp)
diff --git a/src/solvers/dgsem_tree/container_viscous_1d.jl b/src/solvers/dgsem_tree/container_viscous_1d.jl
new file mode 100644
index 00000000000..a4919f75396
--- /dev/null
+++ b/src/solvers/dgsem_tree/container_viscous_1d.jl
@@ -0,0 +1,58 @@
+mutable struct ViscousContainer1D{uEltype <: Real}
+ u_transformed::Array{uEltype, 3}
+ gradients::Array{uEltype, 3}
+ flux_viscous::Array{uEltype, 3}
+
+ # internal `resize!`able storage
+ _u_transformed::Vector{uEltype}
+ _gradients::Vector{uEltype}
+ _flux_viscous::Vector{uEltype}
+
+ function ViscousContainer1D{uEltype}(n_vars::Integer, n_nodes::Integer,
+ n_elements::Integer) where {uEltype <: Real}
+ new(Array{uEltype, 3}(undef, n_vars, n_nodes, n_elements),
+ Array{uEltype, 3}(undef, n_vars, n_nodes, n_elements),
+ Array{uEltype, 3}(undef, n_vars, n_nodes, n_elements),
+ Vector{uEltype}(undef, n_vars * n_nodes * n_elements),
+ Vector{uEltype}(undef, n_vars * n_nodes * n_elements),
+ Vector{uEltype}(undef, n_vars * n_nodes * n_elements))
+ end
+end
+
+function init_viscous_container(n_vars::Integer, n_nodes::Integer,
+ n_elements::Integer,
+ ::Type{uEltype}) where {uEltype <: Real}
+ return ViscousContainer1D{uEltype}(n_vars, n_nodes, n_elements)
+end
+
+# Only one-dimensional `Array`s are `resize!`able in Julia.
+# Hence, we use `Vector`s as internal storage and `resize!`
+# them whenever needed. Then, we reuse the same memory by
+# `unsafe_wrap`ping multi-dimensional `Array`s around the
+# internal storage.
+function Base.resize!(viscous_container::ViscousContainer1D, equations, dg, cache)
+ capacity = nvariables(equations) * nnodes(dg) * nelements(dg, cache)
+ resize!(viscous_container._u_transformed, capacity)
+ resize!(viscous_container._gradients, capacity)
+ resize!(viscous_container._flux_viscous, capacity)
+
+ viscous_container.u_transformed = unsafe_wrap(Array,
+ pointer(viscous_container._u_transformed),
+ (nvariables(equations),
+ nnodes(dg),
+ nelements(dg, cache)))
+
+ viscous_container.gradients = unsafe_wrap(Array,
+ pointer(viscous_container._gradients),
+ (nvariables(equations),
+ nnodes(dg),
+ nelements(dg, cache)))
+
+ viscous_container.flux_viscous = unsafe_wrap(Array,
+ pointer(viscous_container._flux_viscous),
+ (nvariables(equations),
+ nnodes(dg),
+ nelements(dg, cache)))
+
+ return nothing
+end
diff --git a/src/solvers/dgsem_tree/containers_2d.jl b/src/solvers/dgsem_tree/containers_2d.jl
index 9148b936312..9e9fe88c15b 100644
--- a/src/solvers/dgsem_tree/containers_2d.jl
+++ b/src/solvers/dgsem_tree/containers_2d.jl
@@ -1325,16 +1325,16 @@ mutable struct ContainerSubcellLimiterIDP2D{uEltype <: Real}
alpha::Array{uEltype, 3} # [i, j, element]
alpha1::Array{uEltype, 3}
alpha2::Array{uEltype, 3}
- variable_bounds::Vector{Array{uEltype, 3}}
+ variable_bounds::Dict{Symbol, Array{uEltype, 3}}
# internal `resize!`able storage
_alpha::Vector{uEltype}
_alpha1::Vector{uEltype}
_alpha2::Vector{uEltype}
- _variable_bounds::Vector{Vector{uEltype}}
+ _variable_bounds::Dict{Symbol, Vector{uEltype}}
end
function ContainerSubcellLimiterIDP2D{uEltype}(capacity::Integer, n_nodes,
- length) where {uEltype <: Real}
+ bound_keys) where {uEltype <: Real}
nan_uEltype = convert(uEltype, NaN)
# Initialize fields with defaults
@@ -1345,12 +1345,12 @@ function ContainerSubcellLimiterIDP2D{uEltype}(capacity::Integer, n_nodes,
_alpha2 = fill(nan_uEltype, n_nodes * (n_nodes + 1) * capacity)
alpha2 = unsafe_wrap(Array, pointer(_alpha2), (n_nodes, n_nodes + 1, capacity))
- _variable_bounds = Vector{Vector{uEltype}}(undef, length)
- variable_bounds = Vector{Array{uEltype, 3}}(undef, length)
- for i in 1:length
- _variable_bounds[i] = fill(nan_uEltype, n_nodes * n_nodes * capacity)
- variable_bounds[i] = unsafe_wrap(Array, pointer(_variable_bounds[i]),
- (n_nodes, n_nodes, capacity))
+ _variable_bounds = Dict{Symbol, Vector{uEltype}}()
+ variable_bounds = Dict{Symbol, Array{uEltype, 3}}()
+ for key in bound_keys
+ _variable_bounds[key] = fill(nan_uEltype, n_nodes * n_nodes * capacity)
+ variable_bounds[key] = unsafe_wrap(Array, pointer(_variable_bounds[key]),
+ (n_nodes, n_nodes, capacity))
end
return ContainerSubcellLimiterIDP2D{uEltype}(alpha, alpha1, alpha2,
@@ -1369,9 +1369,10 @@ nnodes(container::ContainerSubcellLimiterIDP2D) = size(container.alpha, 1)
function Base.resize!(container::ContainerSubcellLimiterIDP2D, capacity)
n_nodes = nnodes(container)
- @unpack _alpha, _alpha1, _alpha2 = container
+ (; _alpha, _alpha1, _alpha2) = container
resize!(_alpha, n_nodes * n_nodes * capacity)
container.alpha = unsafe_wrap(Array, pointer(_alpha), (n_nodes, n_nodes, capacity))
+ container.alpha .= convert(eltype(container.alpha), NaN)
resize!(_alpha1, (n_nodes + 1) * n_nodes * capacity)
container.alpha1 = unsafe_wrap(Array, pointer(_alpha1),
(n_nodes + 1, n_nodes, capacity))
@@ -1379,11 +1380,12 @@ function Base.resize!(container::ContainerSubcellLimiterIDP2D, capacity)
container.alpha2 = unsafe_wrap(Array, pointer(_alpha2),
(n_nodes, n_nodes + 1, capacity))
- @unpack _variable_bounds = container
- for i in 1:length(_variable_bounds)
- resize!(_variable_bounds[i], n_nodes * n_nodes * capacity)
- container.variable_bounds[i] = unsafe_wrap(Array, pointer(_variable_bounds[i]),
- (n_nodes, n_nodes, capacity))
+ (; _variable_bounds) = container
+ for (key, _) in _variable_bounds
+ resize!(_variable_bounds[key], n_nodes * n_nodes * capacity)
+ container.variable_bounds[key] = unsafe_wrap(Array,
+ pointer(_variable_bounds[key]),
+ (n_nodes, n_nodes, capacity))
end
return nothing
diff --git a/src/solvers/dgsem_tree/dg.jl b/src/solvers/dgsem_tree/dg.jl
index 6e02bc1d94a..ff37bad3b3a 100644
--- a/src/solvers/dgsem_tree/dg.jl
+++ b/src/solvers/dgsem_tree/dg.jl
@@ -54,6 +54,9 @@ include("containers.jl")
# Dimension-agnostic parallel setup
include("dg_parallel.jl")
+# Helper struct for parabolic AMR
+include("container_viscous_1d.jl")
+
# 1D DG implementation
include("dg_1d.jl")
include("dg_1d_parabolic.jl")
diff --git a/src/solvers/dgsem_tree/dg_1d_parabolic.jl b/src/solvers/dgsem_tree/dg_1d_parabolic.jl
index c2aa75388c8..97e31e0e22b 100644
--- a/src/solvers/dgsem_tree/dg_1d_parabolic.jl
+++ b/src/solvers/dgsem_tree/dg_1d_parabolic.jl
@@ -17,7 +17,8 @@ function rhs_parabolic!(du, u, t, mesh::TreeMesh{1},
equations_parabolic::AbstractEquationsParabolic,
initial_condition, boundary_conditions_parabolic, source_terms,
dg::DG, parabolic_scheme, cache, cache_parabolic)
- @unpack u_transformed, gradients, flux_viscous = cache_parabolic
+ @unpack viscous_container = cache_parabolic
+ @unpack u_transformed, gradients, flux_viscous = viscous_container
# Convert conservative variables to a form more suitable for viscous flux calculations
@trixi_timeit timer() "transform variables" begin
@@ -105,12 +106,13 @@ end
function transform_variables!(u_transformed, u, mesh::TreeMesh{1},
equations_parabolic::AbstractEquationsParabolic,
dg::DG, parabolic_scheme, cache, cache_parabolic)
+ transformation = gradient_variable_transformation(equations_parabolic)
+
@threaded for element in eachelement(dg, cache)
# Calculate volume terms in one element
for i in eachnode(dg)
u_node = get_node_vars(u, equations_parabolic, dg, i, element)
- u_transformed_node = gradient_variable_transformation(equations_parabolic)(u_node,
- equations_parabolic)
+ u_transformed_node = transformation(u_node, equations_parabolic)
set_node_vars!(u_transformed, u_transformed_node, equations_parabolic, dg,
i, element)
end
@@ -147,16 +149,18 @@ function prolong2interfaces!(cache_parabolic, flux_viscous,
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG, cache)
@unpack interfaces = cache_parabolic
+ @unpack neighbor_ids = interfaces
+ interfaces_u = interfaces.u
@threaded for interface in eachinterface(dg, cache)
- left_element = interfaces.neighbor_ids[1, interface]
- right_element = interfaces.neighbor_ids[2, interface]
+ left_element = neighbor_ids[1, interface]
+ right_element = neighbor_ids[2, interface]
# interface in x-direction
for v in eachvariable(equations_parabolic)
- # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
- interfaces.u[1, v, interface] = flux_viscous[v, nnodes(dg), left_element]
- interfaces.u[2, v, interface] = flux_viscous[v, 1, right_element]
+ # OBS! `interfaces_u` stores the interpolated *fluxes* and *not the solution*!
+ interfaces_u[1, v, interface] = flux_viscous[v, nnodes(dg), left_element]
+ interfaces_u[2, v, interface] = flux_viscous[v, 1, right_element]
end
end
@@ -204,21 +208,22 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG, cache)
@unpack boundaries = cache_parabolic
- @unpack neighbor_sides = boundaries
+ @unpack neighbor_sides, neighbor_ids = boundaries
+ boundaries_u = boundaries.u
@threaded for boundary in eachboundary(dg, cache_parabolic)
- element = boundaries.neighbor_ids[boundary]
+ element = neighbor_ids[boundary]
if neighbor_sides[boundary] == 1
# element in -x direction of boundary
for v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[1, v, boundary] = flux_viscous[v, nnodes(dg), element]
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[1, v, boundary] = flux_viscous[v, nnodes(dg), element]
end
else # Element in +x direction of boundary
for v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[2, v, boundary] = flux_viscous[v, 1, element]
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[2, v, boundary] = flux_viscous[v, 1, element]
end
end
end
@@ -530,18 +535,15 @@ function create_cache_parabolic(mesh::TreeMesh{1},
elements = init_elements(leaf_cell_ids, mesh, equations_hyperbolic, dg.basis, RealT,
uEltype)
- n_vars = nvariables(equations_hyperbolic)
- n_nodes = nnodes(elements)
- n_elements = nelements(elements)
- u_transformed = Array{uEltype}(undef, n_vars, n_nodes, n_elements)
- gradients = similar(u_transformed)
- flux_viscous = similar(u_transformed)
+ viscous_container = init_viscous_container(nvariables(equations_hyperbolic),
+ nnodes(elements), nelements(elements),
+ uEltype)
interfaces = init_interfaces(leaf_cell_ids, mesh, elements)
boundaries = init_boundaries(leaf_cell_ids, mesh, elements)
- cache = (; elements, interfaces, boundaries, gradients, flux_viscous, u_transformed)
+ cache = (; elements, interfaces, boundaries, viscous_container)
return cache
end
@@ -552,8 +554,10 @@ end
# where f(u) is the inviscid flux and g(u) is the viscous flux.
function apply_jacobian_parabolic!(du, mesh::TreeMesh{1},
equations::AbstractEquationsParabolic, dg::DG, cache)
+ @unpack inverse_jacobian = cache.elements
+
@threaded for element in eachelement(dg, cache)
- factor = cache.elements.inverse_jacobian[element]
+ factor = inverse_jacobian[element]
for i in eachnode(dg)
for v in eachvariable(equations)
diff --git a/src/solvers/dgsem_tree/dg_2d_parabolic.jl b/src/solvers/dgsem_tree/dg_2d_parabolic.jl
index 0da25230380..1c32703c7c3 100644
--- a/src/solvers/dgsem_tree/dg_2d_parabolic.jl
+++ b/src/solvers/dgsem_tree/dg_2d_parabolic.jl
@@ -118,12 +118,13 @@ end
function transform_variables!(u_transformed, u, mesh::Union{TreeMesh{2}, P4estMesh{2}},
equations_parabolic::AbstractEquationsParabolic,
dg::DG, parabolic_scheme, cache, cache_parabolic)
+ transformation = gradient_variable_transformation(equations_parabolic)
+
@threaded for element in eachelement(dg, cache)
# Calculate volume terms in one element
for j in eachnode(dg), i in eachnode(dg)
u_node = get_node_vars(u, equations_parabolic, dg, i, j, element)
- u_transformed_node = gradient_variable_transformation(equations_parabolic)(u_node,
- equations_parabolic)
+ u_transformed_node = transformation(u_node, equations_parabolic)
set_node_vars!(u_transformed, u_transformed_node, equations_parabolic, dg,
i, j, element)
end
@@ -168,30 +169,31 @@ function prolong2interfaces!(cache_parabolic, flux_viscous,
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG, cache)
@unpack interfaces = cache_parabolic
- @unpack orientations = interfaces
+ @unpack orientations, neighbor_ids = interfaces
+ interfaces_u = interfaces.u
flux_viscous_x, flux_viscous_y = flux_viscous
@threaded for interface in eachinterface(dg, cache)
- left_element = interfaces.neighbor_ids[1, interface]
- right_element = interfaces.neighbor_ids[2, interface]
+ left_element = neighbor_ids[1, interface]
+ right_element = neighbor_ids[2, interface]
if orientations[interface] == 1
# interface in x-direction
for j in eachnode(dg), v in eachvariable(equations_parabolic)
- # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
- interfaces.u[1, v, j, interface] = flux_viscous_x[v, nnodes(dg), j,
+ # OBS! `interfaces_u` stores the interpolated *fluxes* and *not the solution*!
+ interfaces_u[1, v, j, interface] = flux_viscous_x[v, nnodes(dg), j,
left_element]
- interfaces.u[2, v, j, interface] = flux_viscous_x[v, 1, j,
+ interfaces_u[2, v, j, interface] = flux_viscous_x[v, 1, j,
right_element]
end
else # if orientations[interface] == 2
# interface in y-direction
for i in eachnode(dg), v in eachvariable(equations_parabolic)
- # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
- interfaces.u[1, v, i, interface] = flux_viscous_y[v, i, nnodes(dg),
+ # OBS! `interfaces_u` stores the interpolated *fluxes* and *not the solution*!
+ interfaces_u[1, v, i, interface] = flux_viscous_y[v, i, nnodes(dg),
left_element]
- interfaces.u[2, v, i, interface] = flux_viscous_y[v, i, 1,
+ interfaces_u[2, v, i, interface] = flux_viscous_y[v, i, 1,
right_element]
end
end
@@ -244,25 +246,26 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG, cache)
@unpack boundaries = cache_parabolic
- @unpack orientations, neighbor_sides = boundaries
+ @unpack orientations, neighbor_sides, neighbor_ids = boundaries
+ boundaries_u = boundaries.u
flux_viscous_x, flux_viscous_y = flux_viscous
@threaded for boundary in eachboundary(dg, cache_parabolic)
- element = boundaries.neighbor_ids[boundary]
+ element = neighbor_ids[boundary]
if orientations[boundary] == 1
# boundary in x-direction
if neighbor_sides[boundary] == 1
# element in -x direction of boundary
for l in eachnode(dg), v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[1, v, l, boundary] = flux_viscous_x[v, nnodes(dg), l,
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[1, v, l, boundary] = flux_viscous_x[v, nnodes(dg), l,
element]
end
else # Element in +x direction of boundary
for l in eachnode(dg), v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[2, v, l, boundary] = flux_viscous_x[v, 1, l, element]
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[2, v, l, boundary] = flux_viscous_x[v, 1, l, element]
end
end
else # if orientations[boundary] == 2
@@ -270,15 +273,15 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
if neighbor_sides[boundary] == 1
# element in -y direction of boundary
for l in eachnode(dg), v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[1, v, l, boundary] = flux_viscous_y[v, l, nnodes(dg),
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[1, v, l, boundary] = flux_viscous_y[v, l, nnodes(dg),
element]
end
else
# element in +y direction of boundary
for l in eachnode(dg), v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[2, v, l, boundary] = flux_viscous_y[v, l, 1, element]
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[2, v, l, boundary] = flux_viscous_y[v, l, 1, element]
end
end
end
@@ -608,7 +611,7 @@ function prolong2mortars!(cache, flux_viscous::Tuple{AbstractArray, AbstractArra
end
# NOTE: Use analogy to "calc_mortar_flux!" for hyperbolic eqs with no nonconservative terms.
-# Reasoning: "calc_interface_flux!" for parabolic part is implemented as the version for
+# Reasoning: "calc_interface_flux!" for parabolic part is implemented as the version for
# hyperbolic terms with conserved terms only, i.e., no nonconservative terms.
function calc_mortar_flux!(surface_flux_values,
mesh::TreeMesh{2},
@@ -932,10 +935,12 @@ end
# This is because the parabolic fluxes are assumed to be of the form
# `du/dt + df/dx = dg/dx + source(x,t)`,
# where f(u) is the inviscid flux and g(u) is the viscous flux.
-function apply_jacobian_parabolic!(du, mesh::Union{TreeMesh{2}, P4estMesh{2}},
+function apply_jacobian_parabolic!(du, mesh::TreeMesh{2},
equations::AbstractEquationsParabolic, dg::DG, cache)
+ @unpack inverse_jacobian = cache.elements
+
@threaded for element in eachelement(dg, cache)
- factor = cache.elements.inverse_jacobian[element]
+ factor = inverse_jacobian[element]
for j in eachnode(dg), i in eachnode(dg)
for v in eachvariable(equations)
@@ -946,4 +951,22 @@ function apply_jacobian_parabolic!(du, mesh::Union{TreeMesh{2}, P4estMesh{2}},
return nothing
end
+
+function apply_jacobian_parabolic!(du, mesh::P4estMesh{2},
+ equations::AbstractEquationsParabolic,
+ dg::DG, cache)
+ @unpack inverse_jacobian = cache.elements
+
+ @threaded for element in eachelement(dg, cache)
+ for j in eachnode(dg), i in eachnode(dg)
+ factor = inverse_jacobian[i, j, element]
+
+ for v in eachvariable(equations)
+ du[v, i, j, element] *= factor
+ end
+ end
+ end
+
+ return nothing
+end
end # @muladd
diff --git a/src/solvers/dgsem_tree/dg_3d_parabolic.jl b/src/solvers/dgsem_tree/dg_3d_parabolic.jl
index 2745d312b37..37492dbcb91 100644
--- a/src/solvers/dgsem_tree/dg_3d_parabolic.jl
+++ b/src/solvers/dgsem_tree/dg_3d_parabolic.jl
@@ -118,12 +118,13 @@ end
function transform_variables!(u_transformed, u, mesh::Union{TreeMesh{3}, P4estMesh{3}},
equations_parabolic::AbstractEquationsParabolic,
dg::DG, parabolic_scheme, cache, cache_parabolic)
+ transformation = gradient_variable_transformation(equations_parabolic)
+
@threaded for element in eachelement(dg, cache)
# Calculate volume terms in one element
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
u_node = get_node_vars(u, equations_parabolic, dg, i, j, k, element)
- u_transformed_node = gradient_variable_transformation(equations_parabolic)(u_node,
- equations_parabolic)
+ u_transformed_node = transformation(u_node, equations_parabolic)
set_node_vars!(u_transformed, u_transformed_node, equations_parabolic, dg,
i, j, k, element)
end
@@ -175,43 +176,44 @@ function prolong2interfaces!(cache_parabolic, flux_viscous,
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG, cache)
@unpack interfaces = cache_parabolic
- @unpack orientations = interfaces
+ @unpack orientations, neighbor_ids = interfaces
+ interfaces_u = interfaces.u
flux_viscous_x, flux_viscous_y, flux_viscous_z = flux_viscous
@threaded for interface in eachinterface(dg, cache)
- left_element = interfaces.neighbor_ids[1, interface]
- right_element = interfaces.neighbor_ids[2, interface]
+ left_element = neighbor_ids[1, interface]
+ right_element = neighbor_ids[2, interface]
if orientations[interface] == 1
# interface in x-direction
for k in eachnode(dg), j in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
- interfaces.u[1, v, j, k, interface] = flux_viscous_x[v, nnodes(dg), j,
+ # OBS! `interfaces_u` stores the interpolated *fluxes* and *not the solution*!
+ interfaces_u[1, v, j, k, interface] = flux_viscous_x[v, nnodes(dg), j,
k, left_element]
- interfaces.u[2, v, j, k, interface] = flux_viscous_x[v, 1, j, k,
+ interfaces_u[2, v, j, k, interface] = flux_viscous_x[v, 1, j, k,
right_element]
end
elseif orientations[interface] == 2
# interface in y-direction
for k in eachnode(dg), i in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
- interfaces.u[1, v, i, k, interface] = flux_viscous_y[v, i, nnodes(dg),
+ # OBS! `interfaces_u` stores the interpolated *fluxes* and *not the solution*!
+ interfaces_u[1, v, i, k, interface] = flux_viscous_y[v, i, nnodes(dg),
k, left_element]
- interfaces.u[2, v, i, k, interface] = flux_viscous_y[v, i, 1, k,
+ interfaces_u[2, v, i, k, interface] = flux_viscous_y[v, i, 1, k,
right_element]
end
else # if orientations[interface] == 3
# interface in z-direction
for j in eachnode(dg), i in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*!
- interfaces.u[1, v, i, j, interface] = flux_viscous_z[v, i, j,
+ # OBS! `interfaces_u` stores the interpolated *fluxes* and *not the solution*!
+ interfaces_u[1, v, i, j, interface] = flux_viscous_z[v, i, j,
nnodes(dg),
left_element]
- interfaces.u[2, v, i, j, interface] = flux_viscous_z[v, i, j, 1,
+ interfaces_u[2, v, i, j, interface] = flux_viscous_z[v, i, j, 1,
right_element]
end
end
@@ -265,11 +267,12 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
equations_parabolic::AbstractEquationsParabolic,
surface_integral, dg::DG, cache)
@unpack boundaries = cache_parabolic
- @unpack orientations, neighbor_sides = boundaries
+ @unpack orientations, neighbor_sides, neighbor_ids = boundaries
+ boundaries_u = boundaries.u
flux_viscous_x, flux_viscous_y, flux_viscous_z = flux_viscous
@threaded for boundary in eachboundary(dg, cache_parabolic)
- element = boundaries.neighbor_ids[boundary]
+ element = neighbor_ids[boundary]
if orientations[boundary] == 1
# boundary in x-direction
@@ -277,15 +280,15 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
# element in -x direction of boundary
for k in eachnode(dg), j in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[1, v, j, k, boundary] = flux_viscous_x[v, nnodes(dg),
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[1, v, j, k, boundary] = flux_viscous_x[v, nnodes(dg),
j, k, element]
end
else # Element in +x direction of boundary
for k in eachnode(dg), j in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[2, v, j, k, boundary] = flux_viscous_x[v, 1, j, k,
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[2, v, j, k, boundary] = flux_viscous_x[v, 1, j, k,
element]
end
end
@@ -295,8 +298,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
# element in -y direction of boundary
for k in eachnode(dg), i in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[1, v, i, k, boundary] = flux_viscous_y[v, i,
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[1, v, i, k, boundary] = flux_viscous_y[v, i,
nnodes(dg), k,
element]
end
@@ -304,8 +307,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
# element in +y direction of boundary
for k in eachnode(dg), i in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[2, v, i, k, boundary] = flux_viscous_y[v, i, 1, k,
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[2, v, i, k, boundary] = flux_viscous_y[v, i, 1, k,
element]
end
end
@@ -315,8 +318,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
# element in -z direction of boundary
for j in eachnode(dg), i in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[1, v, i, j, boundary] = flux_viscous_z[v, i, j,
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[1, v, i, j, boundary] = flux_viscous_z[v, i, j,
nnodes(dg),
element]
end
@@ -324,8 +327,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous,
# element in +z direction of boundary
for j in eachnode(dg), i in eachnode(dg),
v in eachvariable(equations_parabolic)
- # OBS! `boundaries.u` stores the interpolated *fluxes* and *not the solution*!
- boundaries.u[2, v, i, j, boundary] = flux_viscous_z[v, i, j, 1,
+ # OBS! `boundaries_u` stores the interpolated *fluxes* and *not the solution*!
+ boundaries_u[2, v, i, j, boundary] = flux_viscous_z[v, i, j, 1,
element]
end
end
@@ -820,7 +823,7 @@ function prolong2mortars!(cache,
end
# NOTE: Use analogy to "calc_mortar_flux!" for hyperbolic eqs with no nonconservative terms.
-# Reasoning: "calc_interface_flux!" for parabolic part is implemented as the version for
+# Reasoning: "calc_interface_flux!" for parabolic part is implemented as the version for
# hyperbolic terms with conserved terms only, i.e., no nonconservative terms.
function calc_mortar_flux!(surface_flux_values,
mesh::TreeMesh{3},
@@ -1122,10 +1125,13 @@ end
# This is because the parabolic fluxes are assumed to be of the form
# `du/dt + df/dx = dg/dx + source(x,t)`,
# where f(u) is the inviscid flux and g(u) is the viscous flux.
-function apply_jacobian_parabolic!(du, mesh::Union{TreeMesh{3}, P4estMesh{3}},
- equations::AbstractEquationsParabolic, dg::DG, cache)
+function apply_jacobian_parabolic!(du, mesh::TreeMesh{3},
+ equations::AbstractEquationsParabolic,
+ dg::DG, cache)
+ @unpack inverse_jacobian = cache.elements
+
@threaded for element in eachelement(dg, cache)
- factor = cache.elements.inverse_jacobian[element]
+ factor = inverse_jacobian[element]
for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
for v in eachvariable(equations)
@@ -1136,4 +1142,22 @@ function apply_jacobian_parabolic!(du, mesh::Union{TreeMesh{3}, P4estMesh{3}},
return nothing
end
+
+function apply_jacobian_parabolic!(du, mesh::P4estMesh{3},
+ equations::AbstractEquationsParabolic,
+ dg::DG, cache)
+ @unpack inverse_jacobian = cache.elements
+
+ @threaded for element in eachelement(dg, cache)
+ for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg)
+ factor = inverse_jacobian[i, j, k, element]
+
+ for v in eachvariable(equations)
+ du[v, i, j, k, element] *= factor
+ end
+ end
+ end
+
+ return nothing
+end
end # @muladd
diff --git a/src/solvers/dgsem_tree/subcell_limiters.jl b/src/solvers/dgsem_tree/subcell_limiters.jl
index 3a707de3bc7..55d402954bf 100644
--- a/src/solvers/dgsem_tree/subcell_limiters.jl
+++ b/src/solvers/dgsem_tree/subcell_limiters.jl
@@ -26,7 +26,7 @@ The bounds are calculated using the low-order FV solution. The positivity limite
!!! note
This limiter and the correction callback [`SubcellLimiterIDPCorrection`](@ref) only work together.
- Without the callback, no limiting takes place, leading to a standard flux-differencing DGSEM scheme.
+ Without the callback, no correction takes place, leading to a standard low-order FV scheme.
## References
@@ -52,9 +52,10 @@ function SubcellLimiterIDP(equations::AbstractEquations, basis;
positivity_variables_cons = [],
positivity_correction_factor = 0.1)
positivity = (length(positivity_variables_cons) > 0)
- number_bounds = length(positivity_variables_cons)
- cache = create_cache(SubcellLimiterIDP, equations, basis, number_bounds)
+ bound_keys = Tuple(Symbol(string(v), "_min") for v in positivity_variables_cons)
+
+ cache = create_cache(SubcellLimiterIDP, equations, basis, bound_keys)
SubcellLimiterIDP{typeof(positivity_correction_factor), typeof(cache)}(positivity,
positivity_variables_cons,
@@ -100,4 +101,11 @@ function Base.show(io::IO, ::MIME"text/plain", limiter::SubcellLimiterIDP)
summary_box(io, "SubcellLimiterIDP", setup)
end
end
+
+function get_node_variables!(node_variables, limiter::SubcellLimiterIDP,
+ ::VolumeIntegralSubcellLimiting, equations)
+ node_variables[:limiting_coefficient] = limiter.cache.subcell_limiter_coefficients.alpha
+
+ return nothing
+end
end # @muladd
diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl
index 09ab84ed11a..5e00ab4e903 100644
--- a/src/solvers/dgsem_tree/subcell_limiters_2d.jl
+++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl
@@ -6,17 +6,22 @@
#! format: noindent
# this method is used when the limiter is constructed as for shock-capturing volume integrals
-function create_cache(indicator::Type{SubcellLimiterIDP},
- equations::AbstractEquations{2},
- basis::LobattoLegendreBasis, number_bounds)
+function create_cache(limiter::Type{SubcellLimiterIDP}, equations::AbstractEquations{2},
+ basis::LobattoLegendreBasis, bound_keys)
subcell_limiter_coefficients = Trixi.ContainerSubcellLimiterIDP2D{real(basis)
}(0,
nnodes(basis),
- number_bounds)
-
- cache = (; subcell_limiter_coefficients)
+ bound_keys)
+
+ # Memory for bounds checking routine with `BoundsCheckCallback`.
+ # The first entry of each vector contains the maximum deviation since the last export.
+ # The second one contains the total maximum deviation.
+ idp_bounds_delta = Dict{Symbol, Vector{real(basis)}}()
+ for key in bound_keys
+ idp_bounds_delta[key] = zeros(real(basis), 2)
+ end
- return cache
+ return (; subcell_limiter_coefficients, idp_bounds_delta)
end
function (limiter::SubcellLimiterIDP)(u::AbstractArray{<:Any, 4}, semi, dg::DGSEM, t,
@@ -26,8 +31,7 @@ function (limiter::SubcellLimiterIDP)(u::AbstractArray{<:Any, 4}, semi, dg::DGSE
alpha .= zero(eltype(alpha))
if limiter.positivity
- @trixi_timeit timer() "positivity" idp_positivity!(alpha, limiter, u, dt,
- semi)
+ @trixi_timeit timer() "positivity" idp_positivity!(alpha, limiter, u, dt, semi)
end
# Calculate alpha1 and alpha2
@@ -50,22 +54,21 @@ end
@inline function idp_positivity!(alpha, limiter, u, dt, semi)
# Conservative variables
- for (index, variable) in enumerate(limiter.positivity_variables_cons)
- idp_positivity!(alpha, limiter, u, dt, semi, variable, index)
+ for variable in limiter.positivity_variables_cons
+ idp_positivity!(alpha, limiter, u, dt, semi, variable)
end
return nothing
end
-@inline function idp_positivity!(alpha, limiter, u, dt, semi, variable, index)
+@inline function idp_positivity!(alpha, limiter, u, dt, semi, variable)
mesh, equations, dg, cache = mesh_equations_solver_cache(semi)
- @unpack antidiffusive_flux1, antidiffusive_flux2 = cache.antidiffusive_fluxes
- @unpack inverse_weights = dg.basis
- @unpack positivity_correction_factor = limiter
-
- @unpack variable_bounds = limiter.cache.subcell_limiter_coefficients
+ (; antidiffusive_flux1, antidiffusive_flux2) = cache.antidiffusive_fluxes
+ (; inverse_weights) = dg.basis
+ (; positivity_correction_factor) = limiter
- var_min = variable_bounds[index]
+ (; variable_bounds) = limiter.cache.subcell_limiter_coefficients
+ var_min = variable_bounds[Symbol(string(variable), "_min")]
@threaded for element in eachelement(dg, semi.cache)
inverse_jacobian = cache.elements.inverse_jacobian[element]
diff --git a/src/solvers/dgsem_unstructured/dg_2d.jl b/src/solvers/dgsem_unstructured/dg_2d.jl
index 7b8dafdddd2..b12a96c4c31 100644
--- a/src/solvers/dgsem_unstructured/dg_2d.jl
+++ b/src/solvers/dgsem_unstructured/dg_2d.jl
@@ -357,9 +357,9 @@ function calc_boundary_flux_by_type!(cache, t, BCs::Tuple{}, BC_indices::Tuple{}
nothing
end
-function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing,
+function calc_boundary_flux!(cache, t, boundary_condition::BC, boundary_indexing,
mesh::UnstructuredMesh2D, equations,
- surface_integral, dg::DG)
+ surface_integral, dg::DG) where {BC}
@unpack surface_flux_values = cache.elements
@unpack element_id, element_side_id = cache.boundaries
diff --git a/src/time_integration/methods_SSP.jl b/src/time_integration/methods_SSP.jl
index 8ecad69748b..5b72682d48e 100644
--- a/src/time_integration/methods_SSP.jl
+++ b/src/time_integration/methods_SSP.jl
@@ -24,14 +24,21 @@ The third-order SSP Runge-Kutta method of Shu and Osher.
This is an experimental feature and may change in future releases.
"""
struct SimpleSSPRK33{StageCallbacks} <: SimpleAlgorithmSSP
- a::SVector{3, Float64}
- b::SVector{3, Float64}
+ numerator_a::SVector{3, Float64}
+ numerator_b::SVector{3, Float64}
+ denominator::SVector{3, Float64}
c::SVector{3, Float64}
stage_callbacks::StageCallbacks
function SimpleSSPRK33(; stage_callbacks = ())
- a = SVector(0.0, 3 / 4, 1 / 3)
- b = SVector(1.0, 1 / 4, 2 / 3)
+ # Mathematically speaking, it is not necessary for the algorithm to split the factors
+ # into numerator and denominator. Otherwise, however, rounding errors of the order of
+ # the machine accuracy will occur, which will add up over time and thus endanger the
+ # conservation of the simulation.
+ # See also https://github.com/trixi-framework/Trixi.jl/pull/1640.
+ numerator_a = SVector(0.0, 3.0, 1.0) # a = numerator_a / denominator
+ numerator_b = SVector(1.0, 1.0, 2.0) # b = numerator_b / denominator
+ denominator = SVector(1.0, 4.0, 3.0)
c = SVector(0.0, 1.0, 1 / 2)
# Butcher tableau
@@ -42,7 +49,8 @@ struct SimpleSSPRK33{StageCallbacks} <: SimpleAlgorithmSSP
# --------------------
# b | 1/6 1/6 2/3
- new{typeof(stage_callbacks)}(a, b, c, stage_callbacks)
+ new{typeof(stage_callbacks)}(numerator_a, numerator_b, denominator, c,
+ stage_callbacks)
end
end
@@ -166,7 +174,9 @@ function solve!(integrator::SimpleIntegratorSSP)
end
# perform convex combination
- @. integrator.u = alg.a[stage] * integrator.r0 + alg.b[stage] * integrator.u
+ @. integrator.u = (alg.numerator_a[stage] * integrator.r0 +
+ alg.numerator_b[stage] * integrator.u) /
+ alg.denominator[stage]
end
integrator.iter += 1
@@ -221,7 +231,9 @@ function Base.resize!(integrator::SimpleIntegratorSSP, new_size)
resize!(integrator.r0, new_size)
# Resize container
- resize!(integrator.p, new_size)
+ # new_size = n_variables * n_nodes^n_dims * n_elements
+ n_elements = nelements(integrator.p.solver, integrator.p.cache)
+ resize!(integrator.p, n_elements)
end
function Base.resize!(semi::AbstractSemidiscretization, new_size)
diff --git a/test/Project.toml b/test/Project.toml
index 7115a19b441..c45be49a5d0 100644
--- a/test/Project.toml
+++ b/test/Project.toml
@@ -1,13 +1,5 @@
-[compat]
-BSON = "0.3.3"
-CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10"
-Flux = "0.13 - 0.13.12" # TODO: Return to "0.13" once https://github.com/FluxML/Flux.jl/issues/2204 is resolved
-ForwardDiff = "0.10"
-MPI = "0.20"
-OrdinaryDiffEq = "6.49.1"
-Plots = "1.16"
-
[deps]
+Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
BSON = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0"
CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0"
Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
@@ -21,6 +13,16 @@ Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
+[compat]
+Aqua = "0.7"
+BSON = "0.3.3"
+CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10"
+Flux = "0.13.15, 0.14"
+ForwardDiff = "0.10"
+MPI = "0.20"
+OrdinaryDiffEq = "6.49.1"
+Plots = "1.16"
+
[preferences.OrdinaryDiffEq]
PrecompileAutoSpecialize = false
PrecompileAutoSwitch = false
diff --git a/test/runtests.jl b/test/runtests.jl
index f1adbaaf1df..7e195fe7402 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -109,6 +109,7 @@ const TRIXI_NTHREADS = clamp(Sys.CPU_THREADS, 2, 3)
@time if TRIXI_TEST == "all" || TRIXI_TEST == "misc_part2"
include("test_special_elixirs.jl")
+ include("test_aqua.jl")
end
@time if TRIXI_TEST == "all" || TRIXI_TEST == "performance_specializations_part1"
diff --git a/test/test_aqua.jl b/test/test_aqua.jl
new file mode 100644
index 00000000000..f7ab4f545d0
--- /dev/null
+++ b/test/test_aqua.jl
@@ -0,0 +1,18 @@
+module TestAqua
+
+using Aqua
+using Test
+using Trixi
+
+include("test_trixi.jl")
+
+@timed_testset "Aqua.jl" begin
+ Aqua.test_all(Trixi,
+ ambiguities = false,
+ # exceptions necessary for adding a new method `StartUpDG.estimate_h`
+ # in src/solvers/dgmulti/sbp.jl
+ piracy = (treat_as_own = [Trixi.StartUpDG.RefElemData,
+ Trixi.StartUpDG.MeshData],))
+end
+
+end #module
diff --git a/test/test_mpi_tree.jl b/test/test_mpi_tree.jl
index 8403fcf1b04..8f08a9d72e7 100644
--- a/test/test_mpi_tree.jl
+++ b/test/test_mpi_tree.jl
@@ -23,10 +23,22 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows()
end
@trixi_testset "elixir_advection_restart.jl" begin
- @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"),
- # Expected errors are exactly the same as in the serial test!
- l2 = [7.81674284320524e-6],
- linf = [6.314906965243505e-5])
+ using OrdinaryDiffEq: RDPK3SpFSAL49
+ Trixi.mpi_isroot() && println("═"^100)
+ Trixi.mpi_isroot() && println(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"))
+ trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"),
+ alg = RDPK3SpFSAL49(), tspan = (0.0, 10.0))
+ l2_expected, linf_expected = analysis_callback(sol)
+
+ Trixi.mpi_isroot() && println("═"^100)
+ Trixi.mpi_isroot() && println(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"))
+ # Errors are exactly the same as in the elixir_advection_extended.jl
+ trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"),
+ alg = RDPK3SpFSAL49())
+ l2_actual, linf_actual = analysis_callback(sol)
+
+ Trixi.mpi_isroot() && @test l2_actual == l2_expected
+ Trixi.mpi_isroot() && @test linf_actual == linf_expected
end
@trixi_testset "elixir_advection_mortar.jl" begin
diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl
index c4ce2619e15..31dfe1d35a5 100644
--- a/test/test_p4est_2d.jl
+++ b/test/test_p4est_2d.jl
@@ -24,7 +24,7 @@ isdir(outdir) && rm(outdir, recursive=true)
l2 = [3.198940059144588e-5],
linf = [0.00030636069494005547])
- # Ensure that we do not have excessive memory allocations
+ # Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
let
t = sol.t[end]
@@ -102,6 +102,15 @@ isdir(outdir) && rm(outdir, recursive=true)
l2 = [0.020291447969983396, 0.017479614254319948, 0.011387644425613437, 0.0514420126021293],
linf = [0.3582779022370579, 0.32073537890751663, 0.221818049107692, 0.9209559420400415],
tspan = (0.0, 0.15))
+
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000
+ end
end
@trixi_testset "elixir_euler_forward_step_amr.jl" begin
diff --git a/test/test_parabolic_1d.jl b/test/test_parabolic_1d.jl
index 06a55100d62..f00138c698c 100644
--- a/test/test_parabolic_1d.jl
+++ b/test/test_parabolic_1d.jl
@@ -20,6 +20,28 @@ isdir(outdir) && rm(outdir, recursive=true)
)
end
+ @trixi_testset "TreeMesh1D: elixir_advection_diffusion.jl (AMR)" begin
+ @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", "elixir_advection_diffusion.jl"),
+ tspan=(0.0, 0.0), initial_refinement_level = 5)
+ tspan=(0.0, 1.0)
+ ode = semidiscretize(semi, tspan)
+ amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable=first),
+ base_level=4,
+ med_level=5, med_threshold=0.1,
+ max_level=6, max_threshold=0.6)
+ amr_callback = AMRCallback(semi, amr_controller,
+ interval=5,
+ adapt_initial_condition=true)
+
+ # Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver
+ callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, amr_callback)
+ sol = solve(ode, KenCarp4(autodiff=false), abstol=time_abs_tol, reltol=time_int_tol,
+ save_everystep=false, callback=callbacks)
+ l2_error, linf_error = analysis_callback(sol)
+ @test l2_error ≈ [6.4878111416468355e-6]
+ @test linf_error ≈ [3.258075790424364e-5]
+ end
+
@trixi_testset "TreeMesh1D: elixir_navierstokes_convergence_periodic.jl" begin
@test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", "elixir_navierstokes_convergence_periodic.jl"),
l2 = [0.0001133835907077494, 6.226282245610444e-5, 0.0002820171699999139],
@@ -53,6 +75,25 @@ isdir(outdir) && rm(outdir, recursive=true)
linf = [0.002754803146635787, 0.0028567714697580906, 0.012941794048176192]
)
end
+
+ @trixi_testset "TreeMesh1D: elixir_navierstokes_convergence_walls_amr.jl" begin
+ @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", "elixir_navierstokes_convergence_walls_amr.jl"),
+ equations_parabolic = CompressibleNavierStokesDiffusion1D(equations, mu=mu(),
+ Prandtl=prandtl_number()),
+ l2 = [2.5278824700860636e-5, 2.5540078777006958e-5, 0.00012118655083858043],
+ linf = [0.0001466387075579334, 0.00019422427462629705, 0.0009556446847707178]
+ )
+ end
+
+ @trixi_testset "TreeMesh1D: elixir_navierstokes_convergence_walls_amr.jl: GradientVariablesEntropy" begin
+ @test_trixi_include(joinpath(examples_dir(), "tree_1d_dgsem", "elixir_navierstokes_convergence_walls_amr.jl"),
+ equations_parabolic = CompressibleNavierStokesDiffusion1D(equations, mu=mu(),
+ Prandtl=prandtl_number(),
+ gradient_variables = GradientVariablesEntropy()),
+ l2 = [2.459359632523962e-5, 2.3928390718460263e-5, 0.00011252414117082376],
+ linf = [0.0001185052018830568, 0.00018987717854305393, 0.0009597503607920999]
+ )
+ end
end
# Clean up afterwards: delete Trixi output directory
diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl
index 1564a33dc41..98aa337afb4 100644
--- a/test/test_parabolic_2d.jl
+++ b/test/test_parabolic_2d.jl
@@ -143,9 +143,9 @@ isdir(outdir) && rm(outdir, recursive=true)
callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback)
sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol,
ode_default_options()..., callback=callbacks)
- ac_sol = analysis_callback(sol)
- @test ac_sol.l2[1] ≈ 1.67452550744728e-6
- @test ac_sol.linf[1] ≈ 7.905059166368744e-6
+ l2_error, linf_error = analysis_callback(sol)
+ @test l2_error ≈ [1.67452550744728e-6]
+ @test linf_error ≈ [7.905059166368744e-6]
# Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
@@ -229,9 +229,9 @@ isdir(outdir) && rm(outdir, recursive=true)
callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback)
sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol, dt = 1e-5,
ode_default_options()..., callback=callbacks)
- ac_sol = analysis_callback(sol)
- @test ac_sol.l2 ≈ [0.00024296959173852447; 0.0002093263158670915; 0.0005390572390977262; 0.00026753561392341537]
- @test ac_sol.linf ≈ [0.0016210102053424436; 0.002593287648655501; 0.002953907343823712; 0.002077119120180271]
+ l2_error, linf_error = analysis_callback(sol)
+ @test l2_error ≈ [0.00024296959173852447; 0.0002093263158670915; 0.0005390572390977262; 0.00026753561392341537]
+ @test linf_error ≈ [0.0016210102053424436; 0.002593287648655501; 0.002953907343823712; 0.002077119120180271]
end
@trixi_testset "TreeMesh2D: elixir_navierstokes_lid_driven_cavity.jl" begin
@@ -260,24 +260,16 @@ isdir(outdir) && rm(outdir, recursive=true)
@trixi_testset "P4estMesh2D: elixir_advection_diffusion_periodic_curved.jl" begin
@test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", "elixir_advection_diffusion_periodic_curved.jl"),
trees_per_dimension = (1, 1), initial_refinement_level = 2, tspan=(0.0, 0.5),
- l2 = [0.012380458938507371],
- linf = [0.10860506906472567]
- )
- end
-
- @trixi_testset "P4estMesh2D: elixir_advection_diffusion_periodic_curved.jl" begin
- @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", "elixir_advection_diffusion_periodic_curved.jl"),
- trees_per_dimension = (1, 1), initial_refinement_level = 2, tspan=(0.0, 0.5),
- l2 = [0.012380458938507371],
- linf = [0.10860506906472567]
+ l2 = [0.006708147442490916],
+ linf = [0.04807038397976693]
)
end
@trixi_testset "P4estMesh2D: elixir_advection_diffusion_nonperiodic_curved.jl" begin
@test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", "elixir_advection_diffusion_nonperiodic_curved.jl"),
trees_per_dimension = (1, 1), initial_refinement_level = 2, tspan=(0.0, 0.5),
- l2 = [0.04933902988507035],
- linf = [0.2550261714590271]
+ l2 = [0.00919917034843865],
+ linf = [0.14186297438393505]
)
end
@@ -289,6 +281,14 @@ isdir(outdir) && rm(outdir, recursive=true)
)
end
+ @trixi_testset "P4estMesh2D: elixir_navierstokes_convergence_nonperiodic.jl" begin
+ @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", "elixir_navierstokes_convergence_nonperiodic.jl"),
+ initial_refinement_level = 1, tspan=(0.0, 0.2),
+ l2 = [0.00040364962558511795, 0.0005869762481506936, 0.00091488537427274, 0.0011984191566376762],
+ linf = [0.0024993634941723464, 0.009487866203944725, 0.004505829506628117, 0.011634902776245681]
+ )
+ end
+
@trixi_testset "P4estMesh2D: elixir_navierstokes_lid_driven_cavity.jl" begin
@test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", "elixir_navierstokes_lid_driven_cavity.jl"),
initial_refinement_level = 2, tspan=(0.0, 0.5),
diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl
index d607962afa0..86076460294 100644
--- a/test/test_parabolic_3d.jl
+++ b/test/test_parabolic_3d.jl
@@ -85,7 +85,7 @@ isdir(outdir) && rm(outdir, recursive=true)
num_leafs = length(LLID)
@assert num_leafs % 16 == 0
Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs/16)])
- tspan=(0.0, 1.0)
+ tspan=(0.0, 0.25)
semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver;
boundary_conditions=(boundary_conditions, boundary_conditions_parabolic),
source_terms=source_terms_navier_stokes_convergence_test)
@@ -94,9 +94,9 @@ isdir(outdir) && rm(outdir, recursive=true)
callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback)
sol = solve(ode, RDPK3SpFSAL49(); abstol=time_int_tol, reltol=time_int_tol, dt = 1e-5,
ode_default_options()..., callback=callbacks)
- ac_sol = analysis_callback(sol)
- @test ac_sol.l2 ≈ [0.0003991794175622818; 0.0008853745163670504; 0.0010658655552066817; 0.0008785559918324284; 0.001403163458422815]
- @test ac_sol.linf ≈ [0.0035306410538458177; 0.01505692306169911; 0.008862444161110705; 0.015065647972869856; 0.030402714743065218]
+ l2_error, linf_error = analysis_callback(sol)
+ @test l2_error ≈ [0.0003109336253407314, 0.0006473493036803503, 0.0007705277238213672, 0.0006280517917198335, 0.000903927789884075]
+ @test linf_error ≈ [0.0023694155365339142, 0.010634932622402863, 0.006772070862236412, 0.010640551561726901, 0.019256819038719897]
end
@trixi_testset "TreeMesh3D: elixir_navierstokes_taylor_green_vortex.jl" begin
@@ -114,7 +114,7 @@ isdir(outdir) && rm(outdir, recursive=true)
num_leafs = length(LLID)
@assert num_leafs % 32 == 0
Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs/32)])
- tspan=(0.0, 10.0)
+ tspan=(0.0, 0.1)
semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic),
initial_condition, solver)
ode = semidiscretize(semi, tspan)
@@ -127,9 +127,9 @@ isdir(outdir) && rm(outdir, recursive=true)
sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false),
dt=5e-3,
save_everystep=false, callback=callbacks);
- ac_sol = analysis_callback(sol)
- @test ac_sol.l2 ≈ [0.0013666103707729502; 0.2313581629543744; 0.2308164306264533; 0.17460246787819503; 0.28121914446544005]
- @test ac_sol.linf ≈ [0.006938093883741336; 1.028235074139312; 1.0345438209717241; 1.0821111605203542; 1.2669636522564645]
+ l2_error, linf_error = analysis_callback(sol)
+ @test l2_error ≈ [7.314319856736271e-5, 0.006266480163542894, 0.006266489911815533, 0.008829222305770226, 0.0032859166842329228]
+ @test linf_error ≈ [0.0002943968186086554, 0.013876261980614757, 0.013883619864959451, 0.025201279960491936, 0.018679364985388247]
# Ensure that we do not have excessive memory allocations
# (e.g., from type instabilities)
diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl
index 75937ba82ad..6d528abc7af 100644
--- a/test/test_structured_2d.jl
+++ b/test/test_structured_2d.jl
@@ -220,6 +220,30 @@ isdir(outdir) && rm(outdir, recursive=true)
tspan = (0.0, 0.3))
end
+ @trixi_testset "elixir_eulerpolytropic_convergence.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_convergence.jl"),
+ l2 = [0.0016688820596537988, 0.0025921681885685425, 0.003280950351435014],
+ linf = [0.010994679664394269, 0.01331197845637, 0.020080117011346488])
+ end
+
+ @trixi_testset "elixir_eulerpolytropic_ec.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_ec.jl"),
+ l2 = [0.03647890611450939, 0.025284915444045052, 0.025340697771609126],
+ linf = [0.32516731565355583, 0.37509762516540046, 0.29812843284727336])
+ end
+
+ @trixi_testset "elixir_eulerpolytropic_isothermal_wave.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_isothermal_wave.jl"),
+ l2 = [0.004998778491726366, 0.004998916000294425, 9.259136963058664e-17],
+ linf = [0.010001103673834888, 0.010051165098399503, 7.623942913643681e-16])
+ end
+
+ @trixi_testset "elixir_eulerpolytropic_wave.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_wave.jl"),
+ l2 = [0.23642682112204072, 0.20904264390331334, 8.174982691297391e-17],
+ linf = [0.4848250368349989, 0.253350873815695, 4.984552457753618e-16])
+ end
+
@trixi_testset "elixir_hypdiff_nonperiodic.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_hypdiff_nonperiodic.jl"),
l2 = [0.8799744480157664, 0.8535008397034816, 0.7851383019164209],
diff --git a/test/test_threaded.jl b/test/test_threaded.jl
index 9b30836d0ed..b13b5d0f5fc 100644
--- a/test/test_threaded.jl
+++ b/test/test_threaded.jl
@@ -12,27 +12,38 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
@testset "Threaded tests" begin
@testset "TreeMesh" begin
@trixi_testset "elixir_advection_restart.jl" begin
- @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_restart.jl"),
- # Expected errors are exactly the same as in the serial test!
- l2 = [7.81674284320524e-6],
- linf = [6.314906965243505e-5])
+ elixir = joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_extended.jl")
+ Trixi.mpi_isroot() && println("═"^100)
+ Trixi.mpi_isroot() && println(elixir)
+ trixi_include(@__MODULE__, elixir, tspan = (0.0, 10.0))
+ l2_expected, linf_expected = analysis_callback(sol)
+
+ elixir = joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_restart.jl")
+ Trixi.mpi_isroot() && println("═"^100)
+ Trixi.mpi_isroot() && println(elixir)
+ # Errors are exactly the same as in the elixir_advection_extended.jl
+ trixi_include(@__MODULE__, elixir)
+ l2_actual, linf_actual = analysis_callback(sol)
+
+ Trixi.mpi_isroot() && @test l2_actual == l2_expected
+ Trixi.mpi_isroot() && @test linf_actual == linf_expected
- # Ensure that we do not have excessive memory allocations
- # (e.g., from type instabilities)
- let
- t = sol.t[end]
- u_ode = sol.u[end]
- du_ode = similar(u_ode)
- @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
- end
+ # Ensure that we do not have excessive memory allocations
+ # (e.g., from type instabilities)
+ let
+ t = sol.t[end]
+ u_ode = sol.u[end]
+ du_ode = similar(u_ode)
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
+ end
end
@trixi_testset "elixir_advection_restart.jl with threaded time integration" begin
@test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_restart.jl"),
alg = CarpenterKennedy2N54(williamson_condition = false, thread = OrdinaryDiffEq.True()),
# Expected errors are exactly the same as in the serial test!
- l2 = [7.81674284320524e-6],
- linf = [6.314906965243505e-5])
+ l2 = [8.005068880114254e-6],
+ linf = [6.39093577996519e-5])
end
@trixi_testset "elixir_advection_amr_refine_twice.jl" begin
@@ -301,11 +312,7 @@ Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive=true)
t = sol.t[end]
u_ode = sol.u[end]
du_ode = similar(u_ode)
- if (Threads.nthreads() < 2) || (VERSION < v"1.9")
- @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
- else
- @test_broken (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
- end
+ @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 5000
end
end
diff --git a/test/test_tree_1d_shallowwater.jl b/test/test_tree_1d_shallowwater.jl
index cafa17edd4c..09fb2d9e432 100644
--- a/test/test_tree_1d_shallowwater.jl
+++ b/test/test_tree_1d_shallowwater.jl
@@ -102,7 +102,8 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem")
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_beach.jl"),
l2 = [0.17979210479598923, 1.2377495706611434, 6.289818963361573e-8],
linf = [0.845938394800688, 3.3740800777086575, 4.4541473087633676e-7],
- tspan = (0.0, 0.05))
+ tspan = (0.0, 0.05),
+ atol = 3e-10) # see https://github.com/trixi-framework/Trixi.jl/issues/1617
end
@trixi_testset "elixir_shallowwater_parabolic_bowl.jl" begin
@@ -111,6 +112,20 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem")
linf = [0.00041080213807871235, 0.00014823261488938177, 2.220446049250313e-16],
tspan = (0.0, 0.05))
end
+
+ @trixi_testset "elixir_shallow_water_quasi_1d_source_terms.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallow_water_quasi_1d_source_terms.jl"),
+ l2 = [6.37048760275098e-5, 0.0002745658116815704, 4.436491725647962e-6, 8.872983451152218e-6],
+ linf = [0.00026747526881631956, 0.0012106730729152249, 9.098379777500165e-6, 1.8196759554278685e-5],
+ tspan = (0.0, 0.05))
+ end
+
+ @trixi_testset "elixir_shallowwater_quasi_1d_well_balanced.jl" begin
+ @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_quasi_1d_well_balanced.jl"),
+ l2 = [1.4250229186905198e-14, 2.495109919406496e-12, 7.408599286788738e-17, 2.7205812409138776e-16],
+ linf = [5.284661597215745e-14, 2.74056233065078e-12, 2.220446049250313e-16, 8.881784197001252e-16],
+ tspan = (0.0, 100.0))
+ end
end
end # module
diff --git a/test/test_tree_2d_advection.jl b/test/test_tree_2d_advection.jl
index 973d0caf88b..36cb1e882cc 100644
--- a/test/test_tree_2d_advection.jl
+++ b/test/test_tree_2d_advection.jl
@@ -25,10 +25,22 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem")
end
@trixi_testset "elixir_advection_restart.jl" begin
- @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"),
- # Expected errors are exactly the same as in the parallel test!
- l2 = [7.81674284320524e-6],
- linf = [6.314906965243505e-5])
+ using OrdinaryDiffEq: SSPRK43
+ println("═"^100)
+ println(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"))
+ trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"),
+ alg = SSPRK43(), tspan = (0.0, 10.0))
+ l2_expected, linf_expected = analysis_callback(sol)
+
+ println("═"^100)
+ println(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"))
+ # Errors are exactly the same as in the elixir_advection_extended.jl
+ trixi_include(@__MODULE__, joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"),
+ alg = SSPRK43())
+ l2_actual, linf_actual = analysis_callback(sol)
+
+ @test l2_actual == l2_expected
+ @test linf_actual == linf_expected
end
@trixi_testset "elixir_advection_mortar.jl" begin
diff --git a/test/test_tree_2d_euler.jl b/test/test_tree_2d_euler.jl
index e1e3ad32e7d..1b8a261a60d 100644
--- a/test/test_tree_2d_euler.jl
+++ b/test/test_tree_2d_euler.jl
@@ -140,7 +140,7 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem")
@trixi_testset "elixir_euler_sedov_blast_wave_neuralnetwork_perssonperaire.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_sedov_blast_wave_neuralnetwork_perssonperaire.jl"),
l2 = [0.0845430093623868, 0.09271459184623232, 0.09271459184623232, 0.4377291875101709],
- linf = [1.3608553480069898, 1.6822884847136004, 1.6822884847135997, 4.220147414536653],
+ linf = [1.3608553480069898, 1.6822884847136004, 1.6822884847135997, 4.2201475428867035],
maxiters = 30,
coverage_override = (maxiters=6,))
end
diff --git a/test/test_tree_2d_eulermulti.jl b/test/test_tree_2d_eulermulti.jl
index 606afca1034..0ae46a92ef8 100644
--- a/test/test_tree_2d_eulermulti.jl
+++ b/test/test_tree_2d_eulermulti.jl
@@ -20,11 +20,16 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem")
end
@trixi_testset "elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl" begin
+ rm("out/deviations.txt", force=true)
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl"),
l2 = [81.52845664909304, 2.5455678559421346, 63229.190712645846, 0.19929478404550321, 0.011068604228443425],
linf = [249.21708417382013, 40.33299887640794, 174205.0118831558, 0.6881458768113586, 0.11274401158173972],
initial_refinement_level = 3,
- tspan = (0.0, 0.001))
+ tspan = (0.0, 0.001),
+ output_directory="out")
+ lines = readlines("out/deviations.txt")
+ @test lines[1] == "# iter, simu_time, rho1_min, rho2_min"
+ @test startswith(lines[end], "1")
end
@trixi_testset "elixir_eulermulti_ec.jl" begin
diff --git a/test/test_trixi.jl b/test/test_trixi.jl
index ddace6b4fbe..f2cd0cab94d 100644
--- a/test/test_trixi.jl
+++ b/test/test_trixi.jl
@@ -5,7 +5,7 @@ import Trixi
# inside an elixir.
"""
@test_trixi_include(elixir; l2=nothing, linf=nothing,
- atol=10*eps(), rtol=0.001,
+ atol=500*eps(), rtol=sqrt(eps()),
parameters...)
Test Trixi by calling `trixi_include(elixir; parameters...)`.
diff --git a/utils/trixi2txt.jl b/utils/trixi2txt.jl
index b386f150da4..12a3d46760e 100644
--- a/utils/trixi2txt.jl
+++ b/utils/trixi2txt.jl
@@ -70,7 +70,7 @@ function trixi2txt(filename::AbstractString...;
center_level_0, length_level_0, leaf_cells, coordinates, levels = read_meshfile(meshfile)
# Read data
- labels, data, n_elements, n_nodes, element_variables, time = read_datafile(filename)
+ labels, data, n_elements, n_nodes, element_variables, node_variables, time = read_datafile(filename)
# Check if dimensions match
if length(leaf_cells) != n_elements
@@ -263,7 +263,16 @@ function read_datafile(filename::String)
index += 1
end
- return labels, data, n_elements, n_nodes, element_variables, time
+ # Extract node variable arrays
+ node_variables = Dict{String, Union{Vector{Float64}, Vector{Int}}}()
+ index = 1
+ while haskey(file, "node_variables_$index")
+ varname = read(attributes(file["node_variables_$index"])["name"])
+ node_variables[varname] = read(file["node_variables_$index"])
+ index += 1
+ end
+
+ return labels, data, n_elements, n_nodes, element_variables, node_variables, time
end
end