From 1447a9c66b9d1455e7eaec1447677d3c6787169d Mon Sep 17 00:00:00 2001 From: asalmgren Date: Thu, 21 Nov 2024 17:11:17 +0000 Subject: [PATCH] Deployed from erf-model/ERF --- ERF__make__buoyancy_8cpp.html | 460 +++++++++++++++++----------------- 1 file changed, 233 insertions(+), 227 deletions(-) diff --git a/ERF__make__buoyancy_8cpp.html b/ERF__make__buoyancy_8cpp.html index 035611240..d77b30028 100644 --- a/ERF__make__buoyancy_8cpp.html +++ b/ERF__make__buoyancy_8cpp.html @@ -266,242 +266,248 @@

76  if (solverChoice.moisture_type == MoistureType::None) {
77  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
78  {
-
79  buoyancy_fab(i, j, k) = buoyancy_dry_anelastic(i,j,k,
-
80  grav_gpu[2],
-
81  r0_arr,p0_arr,cell_data);
-
82  });
-
83  } else {
-
84  // NOTE: For decomposition in the vertical direction, klo may not
-
85  // reside in the valid box and this call will yield an out
-
86  // of bounds error since it depends upon the surface theta_l
-
87  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
-
88  {
-
89  buoyancy_fab(i, j, k) = buoyancy_moist_anelastic(i,j,k,
-
90  grav_gpu[2],rv_over_rd,
-
91  r0_arr,th0_arr,cell_data);
-
92  });
-
93  }
-
94  } // mfi
-
95  }
-
96  else
-
97  {
-
98  // ******************************************************************************************
-
99  // Dry versions of buoyancy expressions (type 1 and type 2/3 -- types 2 and 3 are equivalent)
-
100  // ******************************************************************************************
-
101  if (solverChoice.moisture_type == MoistureType::None)
-
102  {
-
103  int n_q_dry = 0;
-
104  if (solverChoice.buoyancy_type == 1) {
-
105 #ifdef _OPENMP
-
106 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
-
107 #endif
-
108  for ( MFIter mfi(buoyancy,TilingIfNotGPU()); mfi.isValid(); ++mfi)
-
109  {
-
110  Box tbz = mfi.tilebox();
-
111 
-
112  // We don't compute a source term for z-momentum on the bottom or top domain boundary
-
113  if (tbz.smallEnd(2) == klo) tbz.growLo(2,-1);
-
114  if (tbz.bigEnd(2) == khi) tbz.growHi(2,-1);
-
115 
-
116  const Array4<const Real> & cell_data = S_data[IntVars::cons].array(mfi);
-
117  const Array4< Real> & buoyancy_fab = buoyancy.array(mfi);
-
118 
-
119  // Base state density
-
120  const Array4<const Real>& r0_arr = r0.const_array(mfi);
+
79  //
+
80  // Return -rho0 g (thetaprime / theta0)
+
81  //
+
82  buoyancy_fab(i, j, k) = buoyancy_dry_anelastic(i,j,k,
+
83  grav_gpu[2],
+
84  r0_arr,p0_arr,cell_data);
+
85  });
+
86  } else {
+
87  // NOTE: For decomposition in the vertical direction, klo may not
+
88  // reside in the valid box and this call will yield an out
+
89  // of bounds error since it depends upon the surface theta_l
+
90  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
+
91  {
+
92  //
+
93  // Return -rho0 g (thetaprime / theta0)
+
94  //
+
95  buoyancy_fab(i, j, k) = buoyancy_moist_anelastic(i,j,k,
+
96  grav_gpu[2],rv_over_rd,
+
97  r0_arr,th0_arr,cell_data);
+
98  });
+
99  }
+
100  } // mfi
+
101  }
+
102  else
+
103  {
+
104  // ******************************************************************************************
+
105  // Dry versions of buoyancy expressions (type 1 and type 2/3 -- types 2 and 3 are equivalent)
+
106  // ******************************************************************************************
+
107  if (solverChoice.moisture_type == MoistureType::None)
+
108  {
+
109  int n_q_dry = 0;
+
110  if (solverChoice.buoyancy_type == 1) {
+
111 #ifdef _OPENMP
+
112 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
+
113 #endif
+
114  for ( MFIter mfi(buoyancy,TilingIfNotGPU()); mfi.isValid(); ++mfi)
+
115  {
+
116  Box tbz = mfi.tilebox();
+
117 
+
118  // We don't compute a source term for z-momentum on the bottom or top domain boundary
+
119  if (tbz.smallEnd(2) == klo) tbz.growLo(2,-1);
+
120  if (tbz.bigEnd(2) == khi) tbz.growHi(2,-1);
121 
-
122  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
-
123  {
-
124  buoyancy_fab(i, j, k) = buoyancy_type1(i,j,k,n_q_dry,grav_gpu[2],r0_arr,cell_data);
-
125  });
-
126  } // mfi
+
122  const Array4<const Real> & cell_data = S_data[IntVars::cons].array(mfi);
+
123  const Array4< Real> & buoyancy_fab = buoyancy.array(mfi);
+
124 
+
125  // Base state density
+
126  const Array4<const Real>& r0_arr = r0.const_array(mfi);
127 
-
128  }
-
129  else // (buoyancy_type != 1)
-
130  {
-
131  // We now use the base state rather than planar average because
-
132  // 1) we don't want to average over the limited region of the fine level if doing multilevel.
-
133  // 2) it's cheaper to use the base state than to compute the horizontal averages
-
134  // 3) when running in a smallish domain, the horizontal average may evolve over time,
-
135  // which is not necessarily the intended behavior
-
136  //
-
137 #ifdef _OPENMP
-
138 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
-
139 #endif
-
140  for ( MFIter mfi(buoyancy,TilingIfNotGPU()); mfi.isValid(); ++mfi)
-
141  {
-
142  Box tbz = mfi.tilebox();
-
143 
-
144  // We don't compute a source term for z-momentum on the bottom or top boundary
-
145  if (tbz.smallEnd(2) == klo) tbz.growLo(2,-1);
-
146  if (tbz.bigEnd(2) == khi) tbz.growHi(2,-1);
-
147 
-
148  // Base state density and pressure
-
149  const Array4<const Real>& r0_arr = r0.const_array(mfi);
-
150  const Array4<const Real>& p0_arr = p0.const_array(mfi);
-
151  const Array4<const Real>& th0_arr = th0.const_array(mfi);
-
152 
-
153  const Array4<const Real> & cell_data = S_data[IntVars::cons].array(mfi);
-
154  const Array4< Real> & buoyancy_fab = buoyancy.array(mfi);
-
155 
-
156  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
-
157  {
-
158  buoyancy_fab(i, j, k) = buoyancy_dry_default(i,j,k,
-
159  grav_gpu[2],rd_over_cp,
-
160  r0_arr,p0_arr,th0_arr,cell_data);
-
161  });
-
162  } // mfi
-
163  } // buoyancy_type
-
164  } // moisture type
-
165  else
-
166  {
-
167  // ******************************************************************************************
-
168  // Moist versions of buoyancy expressions
-
169  // ******************************************************************************************
-
170 
-
171  if ( (solverChoice.moisture_type == MoistureType::Kessler_NoRain) ||
-
172  (solverChoice.moisture_type == MoistureType::SAM) ||
-
173  (solverChoice.moisture_type == MoistureType::SAM_NoPrecip_NoIce) )
-
174  {
-
175  AMREX_ALWAYS_ASSERT(solverChoice.buoyancy_type == 1);
-
176  }
-
177 
-
178  if (solverChoice.buoyancy_type == 1) {
-
179 
-
180 #ifdef _OPENMP
-
181 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
-
182 #endif
-
183  for ( MFIter mfi(buoyancy,TilingIfNotGPU()); mfi.isValid(); ++mfi)
-
184  {
-
185  Box tbz = mfi.tilebox();
-
186 
-
187  // We don't compute a source term for z-momentum on the bottom or top domain boundary
-
188  if (tbz.smallEnd(2) == klo) tbz.growLo(2,-1);
-
189  if (tbz.bigEnd(2) == khi) tbz.growHi(2,-1);
-
190 
-
191  const Array4<const Real> & cell_data = S_data[IntVars::cons].array(mfi);
-
192  const Array4< Real> & buoyancy_fab = buoyancy.array(mfi);
-
193 
-
194  // Base state density
-
195  const Array4<const Real>& r0_arr = r0.const_array(mfi);
+
128  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
+
129  {
+
130  buoyancy_fab(i, j, k) = buoyancy_type1(i,j,k,n_q_dry,grav_gpu[2],r0_arr,cell_data);
+
131  });
+
132  } // mfi
+
133 
+
134  }
+
135  else // (buoyancy_type != 1)
+
136  {
+
137  // We now use the base state rather than planar average because
+
138  // 1) we don't want to average over the limited region of the fine level if doing multilevel.
+
139  // 2) it's cheaper to use the base state than to compute the horizontal averages
+
140  // 3) when running in a smallish domain, the horizontal average may evolve over time,
+
141  // which is not necessarily the intended behavior
+
142  //
+
143 #ifdef _OPENMP
+
144 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
+
145 #endif
+
146  for ( MFIter mfi(buoyancy,TilingIfNotGPU()); mfi.isValid(); ++mfi)
+
147  {
+
148  Box tbz = mfi.tilebox();
+
149 
+
150  // We don't compute a source term for z-momentum on the bottom or top boundary
+
151  if (tbz.smallEnd(2) == klo) tbz.growLo(2,-1);
+
152  if (tbz.bigEnd(2) == khi) tbz.growHi(2,-1);
+
153 
+
154  // Base state density and pressure
+
155  const Array4<const Real>& r0_arr = r0.const_array(mfi);
+
156  const Array4<const Real>& p0_arr = p0.const_array(mfi);
+
157  const Array4<const Real>& th0_arr = th0.const_array(mfi);
+
158 
+
159  const Array4<const Real> & cell_data = S_data[IntVars::cons].array(mfi);
+
160  const Array4< Real> & buoyancy_fab = buoyancy.array(mfi);
+
161 
+
162  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
+
163  {
+
164  buoyancy_fab(i, j, k) = buoyancy_dry_default(i,j,k,
+
165  grav_gpu[2],rd_over_cp,
+
166  r0_arr,p0_arr,th0_arr,cell_data);
+
167  });
+
168  } // mfi
+
169  } // buoyancy_type
+
170  } // moisture type
+
171  else
+
172  {
+
173  // ******************************************************************************************
+
174  // Moist versions of buoyancy expressions
+
175  // ******************************************************************************************
+
176 
+
177  if ( (solverChoice.moisture_type == MoistureType::Kessler_NoRain) ||
+
178  (solverChoice.moisture_type == MoistureType::SAM) ||
+
179  (solverChoice.moisture_type == MoistureType::SAM_NoPrecip_NoIce) )
+
180  {
+
181  AMREX_ALWAYS_ASSERT(solverChoice.buoyancy_type == 1);
+
182  }
+
183 
+
184  if (solverChoice.buoyancy_type == 1) {
+
185 
+
186 #ifdef _OPENMP
+
187 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
+
188 #endif
+
189  for ( MFIter mfi(buoyancy,TilingIfNotGPU()); mfi.isValid(); ++mfi)
+
190  {
+
191  Box tbz = mfi.tilebox();
+
192 
+
193  // We don't compute a source term for z-momentum on the bottom or top domain boundary
+
194  if (tbz.smallEnd(2) == klo) tbz.growLo(2,-1);
+
195  if (tbz.bigEnd(2) == khi) tbz.growHi(2,-1);
196 
-
197  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
-
198  {
-
199  buoyancy_fab(i, j, k) = buoyancy_type1(i,j,k,n_qstate,
-
200  grav_gpu[2],r0_arr,cell_data);
-
201  });
-
202  } // mfi
-
203 
-
204  } else {
-
205 
-
206  PlaneAverage state_ave(&(S_data[IntVars::cons]), geom, solverChoice.ave_plane);
-
207  PlaneAverage prim_ave(&S_prim , geom, solverChoice.ave_plane);
-
208 
-
209  // Compute horizontal averages of all components of each field
-
210  state_ave.compute_averages(ZDir(), state_ave.field());
-
211  prim_ave.compute_averages(ZDir(), prim_ave.field());
-
212 
-
213  int ncell = state_ave.ncell_line();
+
197  const Array4<const Real> & cell_data = S_data[IntVars::cons].array(mfi);
+
198  const Array4< Real> & buoyancy_fab = buoyancy.array(mfi);
+
199 
+
200  // Base state density
+
201  const Array4<const Real>& r0_arr = r0.const_array(mfi);
+
202 
+
203  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
+
204  {
+
205  buoyancy_fab(i, j, k) = buoyancy_type1(i,j,k,n_qstate,
+
206  grav_gpu[2],r0_arr,cell_data);
+
207  });
+
208  } // mfi
+
209 
+
210  } else {
+
211 
+
212  PlaneAverage state_ave(&(S_data[IntVars::cons]), geom, solverChoice.ave_plane);
+
213  PlaneAverage prim_ave(&S_prim , geom, solverChoice.ave_plane);
214 
-
215  Gpu::HostVector <Real> rho_h(ncell), theta_h(ncell);
-
216  Gpu::DeviceVector<Real> rho_d(ncell), theta_d(ncell);
-
217 
-
218  state_ave.line_average(Rho_comp, rho_h);
-
219  Gpu::copyAsync(Gpu::hostToDevice, rho_h.begin(), rho_h.end(), rho_d.begin());
+
215  // Compute horizontal averages of all components of each field
+
216  state_ave.compute_averages(ZDir(), state_ave.field());
+
217  prim_ave.compute_averages(ZDir(), prim_ave.field());
+
218 
+
219  int ncell = state_ave.ncell_line();
220 
-
221  prim_ave.line_average(PrimTheta_comp, theta_h);
-
222  Gpu::copyAsync(Gpu::hostToDevice, theta_h.begin(), theta_h.end(), theta_d.begin());
+
221  Gpu::HostVector <Real> rho_h(ncell), theta_h(ncell);
+
222  Gpu::DeviceVector<Real> rho_d(ncell), theta_d(ncell);
223 
-
224  Real* rho_d_ptr = rho_d.data();
-
225  Real* theta_d_ptr = theta_d.data();
+
224  state_ave.line_average(Rho_comp, rho_h);
+
225  Gpu::copyAsync(Gpu::hostToDevice, rho_h.begin(), rho_h.end(), rho_d.begin());
226 
-
227  // Average valid moisture vars
-
228  Gpu::HostVector <Real> qv_h(ncell) , qc_h(ncell) , qp_h(ncell);
-
229  Gpu::DeviceVector<Real> qv_d(ncell,0.0), qc_d(ncell,0.0), qp_d(ncell,0.0);
-
230  if (n_qstate >=1) {
-
231  prim_ave.line_average(PrimQ1_comp, qv_h);
-
232  Gpu::copyAsync(Gpu::hostToDevice, qv_h.begin(), qv_h.end(), qv_d.begin());
-
233  }
-
234  if (n_qstate >=2) {
-
235  prim_ave.line_average(PrimQ2_comp, qc_h);
-
236  Gpu::copyAsync(Gpu::hostToDevice, qc_h.begin(), qc_h.end(), qc_d.begin());
-
237  }
-
238  if (n_qstate >=3) {
-
239  prim_ave.line_average(PrimQ3_comp, qp_h);
-
240  Gpu::copyAsync(Gpu::hostToDevice, qp_h.begin(), qp_h.end(), qp_d.begin());
-
241  }
-
242  Real* qv_d_ptr = qv_d.data();
-
243  Real* qc_d_ptr = qc_d.data();
-
244  Real* qp_d_ptr = qp_d.data();
-
245 
-
246  if (solverChoice.buoyancy_type == 2 || solverChoice.buoyancy_type == 4 ) {
-
247 
-
248 #ifdef _OPENMP
-
249 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
-
250 #endif
-
251  for ( MFIter mfi(buoyancy,TilingIfNotGPU()); mfi.isValid(); ++mfi)
-
252  {
-
253  Box tbz = mfi.tilebox();
-
254 
-
255  // We don't compute a source term for z-momentum on the bottom or top domain boundary
-
256  if (tbz.smallEnd(2) == klo) tbz.growLo(2,-1);
-
257  if (tbz.bigEnd(2) == khi) tbz.growHi(2,-1);
-
258 
-
259  const Array4< Real> & buoyancy_fab = buoyancy.array(mfi);
+
227  prim_ave.line_average(PrimTheta_comp, theta_h);
+
228  Gpu::copyAsync(Gpu::hostToDevice, theta_h.begin(), theta_h.end(), theta_d.begin());
+
229 
+
230  Real* rho_d_ptr = rho_d.data();
+
231  Real* theta_d_ptr = theta_d.data();
+
232 
+
233  // Average valid moisture vars
+
234  Gpu::HostVector <Real> qv_h(ncell) , qc_h(ncell) , qp_h(ncell);
+
235  Gpu::DeviceVector<Real> qv_d(ncell,0.0), qc_d(ncell,0.0), qp_d(ncell,0.0);
+
236  if (n_qstate >=1) {
+
237  prim_ave.line_average(PrimQ1_comp, qv_h);
+
238  Gpu::copyAsync(Gpu::hostToDevice, qv_h.begin(), qv_h.end(), qv_d.begin());
+
239  }
+
240  if (n_qstate >=2) {
+
241  prim_ave.line_average(PrimQ2_comp, qc_h);
+
242  Gpu::copyAsync(Gpu::hostToDevice, qc_h.begin(), qc_h.end(), qc_d.begin());
+
243  }
+
244  if (n_qstate >=3) {
+
245  prim_ave.line_average(PrimQ3_comp, qp_h);
+
246  Gpu::copyAsync(Gpu::hostToDevice, qp_h.begin(), qp_h.end(), qp_d.begin());
+
247  }
+
248  Real* qv_d_ptr = qv_d.data();
+
249  Real* qc_d_ptr = qc_d.data();
+
250  Real* qp_d_ptr = qp_d.data();
+
251 
+
252  if (solverChoice.buoyancy_type == 2 || solverChoice.buoyancy_type == 4 ) {
+
253 
+
254 #ifdef _OPENMP
+
255 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
+
256 #endif
+
257  for ( MFIter mfi(buoyancy,TilingIfNotGPU()); mfi.isValid(); ++mfi)
+
258  {
+
259  Box tbz = mfi.tilebox();
260 
-
261  const Array4<const Real> & cell_data = S_data[IntVars::cons].array(mfi);
-
262  const Array4<const Real> & cell_prim = S_prim.array(mfi);
-
263 
-
264  // TODO: ice has not been dealt with (q1=qv, q2=qv, q3=qp)
-
265  if (solverChoice.buoyancy_type == 2) {
-
266  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
-
267  {
-
268  buoyancy_fab(i, j, k) = buoyancy_type2(i,j,k,n_qstate,grav_gpu[2],
-
269  rho_d_ptr,theta_d_ptr,
-
270  qv_d_ptr,qc_d_ptr,qp_d_ptr,
-
271  cell_prim,cell_data);
-
272  });
-
273  } else {
-
274  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
-
275  {
-
276  buoyancy_fab(i, j, k) = buoyancy_type4(i,j,k,n_qstate,grav_gpu[2],
-
277  rho_d_ptr,theta_d_ptr,
-
278  qv_d_ptr,qc_d_ptr,qp_d_ptr,
-
279  cell_prim,cell_data);
-
280  });
-
281  }
-
282  } // mfi
-
283 
-
284  } else if (solverChoice.buoyancy_type == 3) {
-
285 #ifdef _OPENMP
-
286 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
-
287 #endif
-
288  for ( MFIter mfi(buoyancy,TilingIfNotGPU()); mfi.isValid(); ++mfi)
-
289  {
-
290  Box tbz = mfi.tilebox();
-
291 
-
292  // We don't compute a source term for z-momentum on the bottom or top domain boundary
-
293  if (tbz.smallEnd(2) == klo) tbz.growLo(2,-1);
-
294  if (tbz.bigEnd(2) == khi) tbz.growHi(2,-1);
-
295 
-
296  const Array4< Real> & buoyancy_fab = buoyancy.array(mfi);
+
261  // We don't compute a source term for z-momentum on the bottom or top domain boundary
+
262  if (tbz.smallEnd(2) == klo) tbz.growLo(2,-1);
+
263  if (tbz.bigEnd(2) == khi) tbz.growHi(2,-1);
+
264 
+
265  const Array4< Real> & buoyancy_fab = buoyancy.array(mfi);
+
266 
+
267  const Array4<const Real> & cell_data = S_data[IntVars::cons].array(mfi);
+
268  const Array4<const Real> & cell_prim = S_prim.array(mfi);
+
269 
+
270  // TODO: ice has not been dealt with (q1=qv, q2=qv, q3=qp)
+
271  if (solverChoice.buoyancy_type == 2) {
+
272  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
+
273  {
+
274  buoyancy_fab(i, j, k) = buoyancy_type2(i,j,k,n_qstate,grav_gpu[2],
+
275  rho_d_ptr,theta_d_ptr,
+
276  qv_d_ptr,qc_d_ptr,qp_d_ptr,
+
277  cell_prim,cell_data);
+
278  });
+
279  } else {
+
280  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
+
281  {
+
282  buoyancy_fab(i, j, k) = buoyancy_type4(i,j,k,n_qstate,grav_gpu[2],
+
283  rho_d_ptr,theta_d_ptr,
+
284  qv_d_ptr,qc_d_ptr,qp_d_ptr,
+
285  cell_prim,cell_data);
+
286  });
+
287  }
+
288  } // mfi
+
289 
+
290  } else if (solverChoice.buoyancy_type == 3) {
+
291 #ifdef _OPENMP
+
292 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
+
293 #endif
+
294  for ( MFIter mfi(buoyancy,TilingIfNotGPU()); mfi.isValid(); ++mfi)
+
295  {
+
296  Box tbz = mfi.tilebox();
297 
-
298  const Array4<const Real> & cell_data = S_data[IntVars::cons].array(mfi);
-
299  const Array4<const Real> & cell_prim = S_prim.array(mfi);
-
300 
-
301  // TODO: ice has not been dealt with (q1=qv, q2=qv, q3=qp)
-
302 
-
303  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
-
304  {
-
305  buoyancy_fab(i, j, k) = buoyancy_type3(i,j,k,n_qstate,grav_gpu[2],
-
306  rho_d_ptr,theta_d_ptr,qv_d_ptr,
-
307  cell_prim,cell_data);
-
308  });
-
309  } // mfi
-
310  } // buoyancy_type
-
311  } // not buoyancy_type == 1
-
312  } // has moisture
-
313  } // anelastic?
-
314 }
+
298  // We don't compute a source term for z-momentum on the bottom or top domain boundary
+
299  if (tbz.smallEnd(2) == klo) tbz.growLo(2,-1);
+
300  if (tbz.bigEnd(2) == khi) tbz.growHi(2,-1);
+
301 
+
302  const Array4< Real> & buoyancy_fab = buoyancy.array(mfi);
+
303 
+
304  const Array4<const Real> & cell_data = S_data[IntVars::cons].array(mfi);
+
305  const Array4<const Real> & cell_prim = S_prim.array(mfi);
+
306 
+
307  // TODO: ice has not been dealt with (q1=qv, q2=qv, q3=qp)
+
308 
+
309  ParallelFor(tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k)
+
310  {
+
311  buoyancy_fab(i, j, k) = buoyancy_type3(i,j,k,n_qstate,grav_gpu[2],
+
312  rho_d_ptr,theta_d_ptr,qv_d_ptr,
+
313  cell_prim,cell_data);
+
314  });
+
315  } // mfi
+
316  } // buoyancy_type
+
317  } // not buoyancy_type == 1
+
318  } // has moisture
+
319  } // anelastic?
+
320 }
constexpr amrex::Real R_v
Definition: ERF_Constants.H:11
constexpr amrex::Real R_d
Definition: ERF_Constants.H:10
DirectionSelector< 2 > ZDir
Definition: ERF_DirectionSelector.H:38