Skip to content

Commit

Permalink
New Options for Chi Correction in MAC Projections (#401)
Browse files Browse the repository at this point in the history
* document new chi correction stuff

* add new options for chi correction

* fix typo in docs

* clang-format
  • Loading branch information
baperry2 authored Sep 11, 2024
1 parent a802fd9 commit 04d0221
Show file tree
Hide file tree
Showing 6 changed files with 66 additions and 10 deletions.
2 changes: 2 additions & 0 deletions Docs/sphinx/manual/LMeXControls.rst
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,8 @@ PeleLMeX algorithm
peleLM.num_divu_iter = 1 # [OPT, DEF=1] Number of divU iterations to get initial dt estimate
peleLM.do_init_proj = 1 # [OPT, DEF=1] Control over initial projection
peleLM.advection_scheme = Godunov_BDS # [OPT, DEF=Godunov_PLM] Advection scheme: Godunov_PLM, Godunov_PPM or Godunov_BDS
peleLM.chi_correction_type = DivuFirstIter # [OPT, DEF=DivuEveryIter] When to compute divu for MAC proj divu constraint [DivuEveryIter, DivuFirstIter, NoDivu]
peleLM.print_chi_convergence = 1 # [OPT, DEF=(peleLM.v > 1)] Boolean flag on whether to print size of chi correction on each SDC iter
peleLM.incompressible = 0 # [OPT, DEF=0] Enable to run fully incompressible, scalar advance is bypassed
peleLM.m_rho = 1.17 # [OPT, DEF=-1] If incompressible, density value [MKS]
peleLM.m_mu = 1.8e-5 # [OPT, DEF=-1] If incompressible, kinematic visc. value [MKS]
Expand Down
1 change: 1 addition & 0 deletions Docs/sphinx/manual/Model.rst
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,7 @@ the time-explicit advective fluxes for :math:`U`, :math:`\rho h`, and :math:`\rh
S^{MAC} = \frac{1}{2}(\widehat S^n + \widehat S^{n+1,(k)}) + \sum_{i=0}^k \frac{1}{p_{therm}^{n+1,(i)}}\frac{p_{therm}^{n+1,(i)}-p_0}{\Delta t}
In this update, it is optional whether to update the :math:`\widehat S^{n+1}` term on every SDC iteration, or to simply compute it for :math:`k = 0` and then hold it constant, with the :math:`\chi` correction iterations accounting for changes during the SDC iterations. The latter strategy has been observed to improve convergence in some cases.

#. Perform **Step 1** to obtain the time-centered, staggered :math:`U^{ADV}`

Expand Down
2 changes: 2 additions & 0 deletions Source/PeleLMeX.H
Original file line number Diff line number Diff line change
Expand Up @@ -1906,6 +1906,8 @@ public:
// SDC
int m_nSDCmax = 1;
int m_sdcIter = 0;
bool m_print_chi_convergence = false;
int m_chi_correction_type{ChiCorrectionType::DivuEveryIter};

// DeltaT iterations
int m_deltaT_verbose = 0;
Expand Down
3 changes: 3 additions & 0 deletions Source/PeleLMeX_Setup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -466,6 +466,9 @@ PeleLM::readParameters()
// advance
// -----------------------------------------
pp.query("sdc_iterMax", m_nSDCmax);
m_print_chi_convergence = m_verbose > 1;
pp.query("print_chi_convergence", m_print_chi_convergence);
parseUserKey(pp, "chi_correction_type", chicorr, m_chi_correction_type);
pp.query("floor_species", m_floor_species);
pp.query("dPdt_factor", m_dpdtFactor);
pp.query("memory_checks", m_checkMem);
Expand Down
50 changes: 40 additions & 10 deletions Source/PeleLMeX_UMac.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,18 +114,48 @@ PeleLM::addChiIncrement(
auto const& chiInc_ar = chiIncr[lev].const_array(mfi);
auto const& chi_ar = advData->chi[lev].array(mfi);
auto const& mac_divu_ar = advData->mac_divu[lev].array(mfi);
amrex::ParallelFor(
gbx, [chi_ar, chiInc_ar, mac_divu_ar,
a_sdcIter] AMREX_GPU_DEVICE(int i, int j, int k) noexcept {
if (a_sdcIter == 1) {
chi_ar(i, j, k) = chiInc_ar(i, j, k);
} else {
chi_ar(i, j, k) += chiInc_ar(i, j, k);
}
mac_divu_ar(i, j, k) += chi_ar(i, j, k);
});
if (m_chi_correction_type == ChiCorrectionType::DivuFirstIter) {
amrex::ParallelFor(
gbx, [chi_ar, chiInc_ar, mac_divu_ar,
a_sdcIter] AMREX_GPU_DEVICE(int i, int j, int k) noexcept {
if (a_sdcIter == 1) {
chi_ar(i, j, k) = chiInc_ar(i, j, k) + mac_divu_ar(i, j, k);
} else {
chi_ar(i, j, k) += chiInc_ar(i, j, k);
}
mac_divu_ar(i, j, k) = chi_ar(i, j, k);
});
} else if (m_chi_correction_type == ChiCorrectionType::NoDivu) {
amrex::ParallelFor(
gbx, [chi_ar, chiInc_ar, mac_divu_ar,
a_sdcIter] AMREX_GPU_DEVICE(int i, int j, int k) noexcept {
if (a_sdcIter == 1) {
chi_ar(i, j, k) = chiInc_ar(i, j, k);
} else {
chi_ar(i, j, k) += chiInc_ar(i, j, k);
}
mac_divu_ar(i, j, k) = chi_ar(i, j, k);
});
} else { // Default: use updated divu every iteration
amrex::ParallelFor(
gbx, [chi_ar, chiInc_ar, mac_divu_ar,
a_sdcIter] AMREX_GPU_DEVICE(int i, int j, int k) noexcept {
if (a_sdcIter == 1) {
chi_ar(i, j, k) = chiInc_ar(i, j, k);
} else {
chi_ar(i, j, k) += chiInc_ar(i, j, k);
}
mac_divu_ar(i, j, k) += chi_ar(i, j, k);
});
}
}
}
if (m_print_chi_convergence) {
amrex::Real max_corr =
MLNorm0(GetVecOfConstPtrs(chiIncr)) * m_dt / m_dpdtFactor;
amrex::Print() << " Before SDC " << a_sdcIter
<< ": max relative P mismatch is " << max_corr << std::endl;
}
}

void
Expand Down
18 changes: 18 additions & 0 deletions Source/PeleLMeX_UserKeys.H
Original file line number Diff line number Diff line change
Expand Up @@ -160,4 +160,22 @@ struct LoadBalanceMethod
"load_balancing_method", "chem_load_balancing_method"};
};
const LoadBalanceMethod lbmethod;

/**
* \brief struct holding PeleLMeX DivU/chi constraint options
default is DivuEveryIter
*/
struct ChiCorrectionType
{
ChiCorrectionType() = default;
enum { DivuEveryIter = 0, DivuFirstIter, NoDivu };
const std::map<const std::string, int> str2int = {
{"divueveryiter", DivuEveryIter},
{"divufirstiter", DivuFirstIter},
{"nodivu", NoDivu},
{"default", DivuEveryIter}};
const amrex::Array<std::string, 1> searchKey{"chi_correction_type"};
};
const ChiCorrectionType chicorr;

#endif

0 comments on commit 04d0221

Please sign in to comment.