diff --git a/Docs/sphinx/manual/LMeXControls.rst b/Docs/sphinx/manual/LMeXControls.rst index c7adda03..3c9346d0 100644 --- a/Docs/sphinx/manual/LMeXControls.rst +++ b/Docs/sphinx/manual/LMeXControls.rst @@ -246,6 +246,8 @@ PeleLMeX algorithm peleLM.num_divu_iter = 1 # [OPT, DEF=1] Number of divU iterations to get initial dt estimate peleLM.do_init_proj = 1 # [OPT, DEF=1] Control over initial projection peleLM.advection_scheme = Godunov_BDS # [OPT, DEF=Godunov_PLM] Advection scheme: Godunov_PLM, Godunov_PPM or Godunov_BDS + peleLM.chi_correction_type = DivuFirstIter # [OPT, DEF=DivuEveryIter] When to compute divu for MAC proj divu constraint [DivuEveryIter, DivuFirstIter, NoDivu] + peleLM.print_chi_convergence = 1 # [OPT, DEF=(peleLM.v > 1)] Boolean flag on whether to print size of chi correction on each SDC iter peleLM.incompressible = 0 # [OPT, DEF=0] Enable to run fully incompressible, scalar advance is bypassed peleLM.m_rho = 1.17 # [OPT, DEF=-1] If incompressible, density value [MKS] peleLM.m_mu = 1.8e-5 # [OPT, DEF=-1] If incompressible, kinematic visc. value [MKS] diff --git a/Docs/sphinx/manual/Model.rst b/Docs/sphinx/manual/Model.rst index 9ef481cb..e4910077 100644 --- a/Docs/sphinx/manual/Model.rst +++ b/Docs/sphinx/manual/Model.rst @@ -237,6 +237,7 @@ the time-explicit advective fluxes for :math:`U`, :math:`\rho h`, and :math:`\rh S^{MAC} = \frac{1}{2}(\widehat S^n + \widehat S^{n+1,(k)}) + \sum_{i=0}^k \frac{1}{p_{therm}^{n+1,(i)}}\frac{p_{therm}^{n+1,(i)}-p_0}{\Delta t} + In this update, it is optional whether to update the :math:`\widehat S^{n+1}` term on every SDC iteration, or to simply compute it for :math:`k = 0` and then hold it constant, with the :math:`\chi` correction iterations accounting for changes during the SDC iterations. The latter strategy has been observed to improve convergence in some cases. #. Perform **Step 1** to obtain the time-centered, staggered :math:`U^{ADV}` diff --git a/Source/PeleLMeX.H b/Source/PeleLMeX.H index eb5bd3c8..c0c13f67 100644 --- a/Source/PeleLMeX.H +++ b/Source/PeleLMeX.H @@ -1906,6 +1906,8 @@ public: // SDC int m_nSDCmax = 1; int m_sdcIter = 0; + bool m_print_chi_convergence = false; + int m_chi_correction_type{ChiCorrectionType::DivuEveryIter}; // DeltaT iterations int m_deltaT_verbose = 0; diff --git a/Source/PeleLMeX_Setup.cpp b/Source/PeleLMeX_Setup.cpp index 8430a812..1a76b595 100644 --- a/Source/PeleLMeX_Setup.cpp +++ b/Source/PeleLMeX_Setup.cpp @@ -466,6 +466,9 @@ PeleLM::readParameters() // advance // ----------------------------------------- pp.query("sdc_iterMax", m_nSDCmax); + m_print_chi_convergence = m_verbose > 1; + pp.query("print_chi_convergence", m_print_chi_convergence); + parseUserKey(pp, "chi_correction_type", chicorr, m_chi_correction_type); pp.query("floor_species", m_floor_species); pp.query("dPdt_factor", m_dpdtFactor); pp.query("memory_checks", m_checkMem); diff --git a/Source/PeleLMeX_UMac.cpp b/Source/PeleLMeX_UMac.cpp index fd1c17ce..629f75df 100644 --- a/Source/PeleLMeX_UMac.cpp +++ b/Source/PeleLMeX_UMac.cpp @@ -114,18 +114,48 @@ PeleLM::addChiIncrement( auto const& chiInc_ar = chiIncr[lev].const_array(mfi); auto const& chi_ar = advData->chi[lev].array(mfi); auto const& mac_divu_ar = advData->mac_divu[lev].array(mfi); - amrex::ParallelFor( - gbx, [chi_ar, chiInc_ar, mac_divu_ar, - a_sdcIter] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { - if (a_sdcIter == 1) { - chi_ar(i, j, k) = chiInc_ar(i, j, k); - } else { - chi_ar(i, j, k) += chiInc_ar(i, j, k); - } - mac_divu_ar(i, j, k) += chi_ar(i, j, k); - }); + if (m_chi_correction_type == ChiCorrectionType::DivuFirstIter) { + amrex::ParallelFor( + gbx, [chi_ar, chiInc_ar, mac_divu_ar, + a_sdcIter] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { + if (a_sdcIter == 1) { + chi_ar(i, j, k) = chiInc_ar(i, j, k) + mac_divu_ar(i, j, k); + } else { + chi_ar(i, j, k) += chiInc_ar(i, j, k); + } + mac_divu_ar(i, j, k) = chi_ar(i, j, k); + }); + } else if (m_chi_correction_type == ChiCorrectionType::NoDivu) { + amrex::ParallelFor( + gbx, [chi_ar, chiInc_ar, mac_divu_ar, + a_sdcIter] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { + if (a_sdcIter == 1) { + chi_ar(i, j, k) = chiInc_ar(i, j, k); + } else { + chi_ar(i, j, k) += chiInc_ar(i, j, k); + } + mac_divu_ar(i, j, k) = chi_ar(i, j, k); + }); + } else { // Default: use updated divu every iteration + amrex::ParallelFor( + gbx, [chi_ar, chiInc_ar, mac_divu_ar, + a_sdcIter] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { + if (a_sdcIter == 1) { + chi_ar(i, j, k) = chiInc_ar(i, j, k); + } else { + chi_ar(i, j, k) += chiInc_ar(i, j, k); + } + mac_divu_ar(i, j, k) += chi_ar(i, j, k); + }); + } } } + if (m_print_chi_convergence) { + amrex::Real max_corr = + MLNorm0(GetVecOfConstPtrs(chiIncr)) * m_dt / m_dpdtFactor; + amrex::Print() << " Before SDC " << a_sdcIter + << ": max relative P mismatch is " << max_corr << std::endl; + } } void diff --git a/Source/PeleLMeX_UserKeys.H b/Source/PeleLMeX_UserKeys.H index 1e311d72..09790800 100644 --- a/Source/PeleLMeX_UserKeys.H +++ b/Source/PeleLMeX_UserKeys.H @@ -160,4 +160,22 @@ struct LoadBalanceMethod "load_balancing_method", "chem_load_balancing_method"}; }; const LoadBalanceMethod lbmethod; + +/** + * \brief struct holding PeleLMeX DivU/chi constraint options + default is DivuEveryIter + */ +struct ChiCorrectionType +{ + ChiCorrectionType() = default; + enum { DivuEveryIter = 0, DivuFirstIter, NoDivu }; + const std::map str2int = { + {"divueveryiter", DivuEveryIter}, + {"divufirstiter", DivuFirstIter}, + {"nodivu", NoDivu}, + {"default", DivuEveryIter}}; + const amrex::Array searchKey{"chi_correction_type"}; +}; +const ChiCorrectionType chicorr; + #endif