diff --git a/docs/user/next/exercises/2_divergence_exercise_solution.ipynb b/docs/user/next/exercises/2_divergence_exercise_solution.ipynb
index ef093a374c..8422b8dabd 100644
--- a/docs/user/next/exercises/2_divergence_exercise_solution.ipynb
+++ b/docs/user/next/exercises/2_divergence_exercise_solution.ipynb
@@ -1,230 +1,230 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "id": "c841c53b",
- "metadata": {},
- "source": [
- "# 3. Divergence"
- ]
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "c841c53b",
+ "metadata": {},
+ "source": [
+ "# 3. Divergence"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bcac0f9b",
+ "metadata": {},
+ "source": [
+ "Next we will translate a divergence stencil. We approximate the divergence of a vector field $\\mathbf{v}$ at the middle point of a cell $\\mathbf{P}$ in the following way: We take the dot product of the normal velocity $\\mathbf{n}_e$ of each direct neighbor edge of $\\mathbf{P}$ with $\\mathbf{v}_e$ which is multipled with the edge length $L_e$. The contributions from all three edges of a cell are summed up and then divided by the area of the cell $A_P$. In the next pictures we can see a graphical representation of all of the quantities involved:\n",
+ "\n",
+ "![](../divergence_picture.png \"Divergence\")\n",
+ "\n",
+ "And the equation:\n",
+ "\n",
+ "![](../divergence_formula.png \"Divergence\")\n",
+ "\n",
+ "The orientation of the edge has to factor in, since we do not know, in general, if the normal of an edge is pointed inwards or outwards of any cell we are looking at. We cannot have only outwards pointing edge normals, because if we look at two neighboring cells, the normal of their shared edge has to point outwards for one of the cells, but inwards for the other.\n",
+ "\n",
+ "![](../edge_orientation.png \"Edge Orientation\")\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4eba62c1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from helpers import *"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0cb870eb",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def divergence_numpy(\n",
+ " c2e: np.array,\n",
+ " u: np.array,\n",
+ " v: np.array,\n",
+ " nx: np.array,\n",
+ " ny: np.array,\n",
+ " L: np.array,\n",
+ " A: np.array,\n",
+ " edge_orientation: np.array,\n",
+ ") -> np.array:\n",
+ " uv_div = (\n",
+ " np.sum(\n",
+ " (u[c2e] * nx[c2e] + v[c2e] * ny[c2e]) * L[c2e] * edge_orientation, axis=1\n",
+ " )\n",
+ " / A\n",
+ " )\n",
+ " return uv_div"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8fc6416f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "@gtx.field_operator\n",
+ "def divergence(\n",
+ " u: gtx.Field[[E], float],\n",
+ " v: gtx.Field[[E], float],\n",
+ " nx: gtx.Field[[E], float],\n",
+ " ny: gtx.Field[[E], float],\n",
+ " L: gtx.Field[[E], float],\n",
+ " A: gtx.Field[[C], float],\n",
+ " edge_orientation: gtx.Field[[C, C2EDim], float],\n",
+ ") -> gtx.Field[[C], float]:\n",
+ " uv_div = (\n",
+ " neighbor_sum(\n",
+ " (u(C2E) * nx(C2E) + v(C2E) * ny(C2E)) * L(C2E) * edge_orientation,\n",
+ " axis=C2EDim,\n",
+ " )\n",
+ " / A\n",
+ " )\n",
+ " return uv_div"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5dbd2f62",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def test_divergence():\n",
+ " backend = None\n",
+ " # backend = gtfn_cpu\n",
+ " # backend = gtfn_gpu\n",
+ "\n",
+ " cell_domain = gtx.domain({C: n_cells})\n",
+ " edge_domain = gtx.domain({E: n_edges})\n",
+ "\n",
+ " u = random_field(edge_domain, allocator=backend)\n",
+ " v = random_field(edge_domain, allocator=backend)\n",
+ " nx = random_field(edge_domain, allocator=backend)\n",
+ " ny = random_field(edge_domain, allocator=backend)\n",
+ " L = random_field(edge_domain, allocator=backend)\n",
+ " A = random_field(cell_domain, allocator=backend)\n",
+ " edge_orientation = random_sign(\n",
+ " gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend\n",
+ " )\n",
+ "\n",
+ " divergence_ref = divergence_numpy(\n",
+ " c2e_table,\n",
+ " u.asnumpy(),\n",
+ " v.asnumpy(),\n",
+ " nx.asnumpy(),\n",
+ " ny.asnumpy(),\n",
+ " L.asnumpy(),\n",
+ " A.asnumpy(),\n",
+ " edge_orientation.asnumpy(),\n",
+ " )\n",
+ "\n",
+ " c2e_connectivity = gtx.NeighborTableOffsetProvider(\n",
+ " c2e_table, C, E, 3, has_skip_values=False\n",
+ " )\n",
+ "\n",
+ " divergence_gt4py = gtx.zeros(cell_domain, allocator=backend)\n",
+ "\n",
+ " divergence(\n",
+ " u,\n",
+ " v,\n",
+ " nx,\n",
+ " ny,\n",
+ " L,\n",
+ " A,\n",
+ " edge_orientation,\n",
+ " out=divergence_gt4py,\n",
+ " offset_provider={C2E.value: c2e_connectivity},\n",
+ " )\n",
+ "\n",
+ " assert np.allclose(divergence_gt4py.asnumpy(), divergence_ref)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bbcb9bf5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "test_divergence()\n",
+ "print(\"Test successful\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "5cd78463",
+ "metadata": {},
+ "source": [
+ "## 3. Divergence in ICON"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b9a35e28",
+ "metadata": {},
+ "source": [
+ "In ICON we can find a divergence in diffusion which looks somewhat like this, but also quite a bit different:\n",
+ "\n",
+ "```fortran\n",
+ " DO jb = i_startblk,i_endblk\n",
+ "\n",
+ " CALL get_indices_c(p_patch, jb, i_startblk, i_endblk, &\n",
+ " i_startidx, i_endidx, rl_start, rl_end)\n",
+ " DO jk = 1, nlev\n",
+ " DO jc = i_startidx, i_endidx\n",
+ "\n",
+ " div(jc,jk) = p_nh_prog%vn(ieidx(jc,jb,1),jk,ieblk(jc,jb,1))*p_int%geofac_div(jc,1,jb) + &\n",
+ " p_nh_prog%vn(ieidx(jc,jb,2),jk,ieblk(jc,jb,2))*p_int%geofac_div(jc,2,jb) + &\n",
+ " p_nh_prog%vn(ieidx(jc,jb,3),jk,ieblk(jc,jb,3))*p_int%geofac_div(jc,3,jb)\n",
+ " ENDDO\n",
+ " ENDDO\n",
+ " ENDDO\n",
+ "```\n",
+ "\n",
+ "Two assumptions are necessary to derive the ICON version of the divergence starting from our version above:\n",
+ "* Assume that the velocity components $u$ is always orthogonal and the velocity component $v$ is always parallel to the edge, in ICON these are called $vn$ and $vt$ where the n stands for normal and the t for tangential.\n",
+ "* At ICON startup time merge all constants (such as cell area $A_P$ and edge length $L_e$) into one array of geometrical factors `p_int%geofac_div`, which are constant during time stepping:\n",
+ "\n",
+ "```fortran\n",
+ " DO jb = i_startblk, i_endblk\n",
+ "\n",
+ " CALL get_indices_c(ptr_patch, jb, i_startblk, i_endblk, &\n",
+ " & i_startidx, i_endidx, rl_start, rl_end)\n",
+ "\n",
+ " DO je = 1, ptr_patch%geometry_info%cell_type\n",
+ " DO jc = i_startidx, i_endidx\n",
+ "\n",
+ " ile = ptr_patch%cells%edge_idx(jc,jb,je)\n",
+ " ibe = ptr_patch%cells%edge_blk(jc,jb,je)\n",
+ "\n",
+ " ptr_int%geofac_div(jc,je,jb) = &\n",
+ " & ptr_patch%edges%primal_edge_length(ile,ibe) * &\n",
+ " & ptr_patch%cells%edge_orientation(jc,jb,je) / &\n",
+ " & ptr_patch%cells%area(jc,jb)\n",
+ "\n",
+ " ENDDO !cell loop\n",
+ " ENDDO\n",
+ "\n",
+ " END DO !block loop\n",
+ "\n",
+ "```"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ }
},
- {
- "cell_type": "markdown",
- "id": "bcac0f9b",
- "metadata": {},
- "source": [
- "Next we will translate a divergence stencil. We approximate the divergence of a vector field $\\mathbf{v}$ at the middle point of a cell $\\mathbf{P}$ in the following way: We take the dot product of the normal velocity $\\mathbf{n}_e$ of each direct neighbor edge of $\\mathbf{P}$ with $\\mathbf{v}_e$ which is multipled with the edge length $L_e$. The contributions from all three edges of a cell are summed up and then divided by the area of the cell $A_P$. In the next pictures we can see a graphical representation of all of the quantities involved:\n",
- "\n",
- "![](../divergence_picture.png \"Divergence\")\n",
- "\n",
- "And the equation:\n",
- "\n",
- "![](../divergence_formula.png \"Divergence\")\n",
- "\n",
- "The orientation of the edge has to factor in, since we do not know, in general, if the normal of an edge is pointed inwards or outwards of any cell we are looking at. We cannot have only outwards pointing edge normals, because if we look at two neighboring cells, the normal of their shared edge has to point outwards for one of the cells, but inwards for the other.\n",
- "\n",
- "![](../edge_orientation.png \"Edge Orientation\")\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "4eba62c1",
- "metadata": {},
- "outputs": [],
- "source": [
- "from helpers import *"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "0cb870eb",
- "metadata": {},
- "outputs": [],
- "source": [
- "def divergence_numpy(\n",
- " c2e: np.array,\n",
- " u: np.array,\n",
- " v: np.array,\n",
- " nx: np.array,\n",
- " ny: np.array,\n",
- " L: np.array,\n",
- " A: np.array,\n",
- " edge_orientation: np.array,\n",
- ") -> np.array:\n",
- " uv_div = (\n",
- " np.sum(\n",
- " (u[c2e] * nx[c2e] + v[c2e] * ny[c2e]) * L[c2e] * edge_orientation, axis=1\n",
- " )\n",
- " / A\n",
- " )\n",
- " return uv_div"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "8fc6416f",
- "metadata": {},
- "outputs": [],
- "source": [
- "@gtx.field_operator\n",
- "def divergence(\n",
- " u: gtx.Field[[E], float],\n",
- " v: gtx.Field[[E], float],\n",
- " nx: gtx.Field[[E], float],\n",
- " ny: gtx.Field[[E], float],\n",
- " L: gtx.Field[[E], float],\n",
- " A: gtx.Field[[C], float],\n",
- " edge_orientation: gtx.Field[[C, C2EDim], float],\n",
- ") -> gtx.Field[[C], float]:\n",
- " uv_div = (\n",
- " neighbor_sum(\n",
- " (u(C2E) * nx(C2E) + v(C2E) * ny(C2E)) * L(C2E) * edge_orientation,\n",
- " axis=C2EDim,\n",
- " )\n",
- " / A\n",
- " )\n",
- " return uv_div"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "5dbd2f62",
- "metadata": {},
- "outputs": [],
- "source": [
- "def test_divergence():\n",
- " backend = None\n",
- " # backend = gtfn_cpu\n",
- " # backend = gtfn_gpu\n",
- "\n",
- " cell_domain = gtx.domain({C: n_cells})\n",
- " edge_domain = gtx.domain({E: n_edges})\n",
- "\n",
- " u = random_field_new(edge_domain, allocator=backend)\n",
- " v = random_field_new(edge_domain, allocator=backend)\n",
- " nx = random_field_new(edge_domain, allocator=backend)\n",
- " ny = random_field_new(edge_domain, allocator=backend)\n",
- " L = random_field_new(edge_domain, allocator=backend)\n",
- " A = random_field_new(cell_domain, allocator=backend)\n",
- " edge_orientation = random_sign(\n",
- " gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend\n",
- " )\n",
- "\n",
- " divergence_ref = divergence_numpy(\n",
- " c2e_table,\n",
- " u.asnumpy(),\n",
- " v.asnumpy(),\n",
- " nx.asnumpy(),\n",
- " ny.asnumpy(),\n",
- " L.asnumpy(),\n",
- " A.asnumpy(),\n",
- " edge_orientation.asnumpy(),\n",
- " )\n",
- "\n",
- " c2e_connectivity = gtx.NeighborTableOffsetProvider(\n",
- " c2e_table, C, E, 3, has_skip_values=False\n",
- " )\n",
- "\n",
- " divergence_gt4py = gtx.zeros(cell_domain, allocator=backend)\n",
- "\n",
- " divergence(\n",
- " u,\n",
- " v,\n",
- " nx,\n",
- " ny,\n",
- " L,\n",
- " A,\n",
- " edge_orientation,\n",
- " out=divergence_gt4py,\n",
- " offset_provider={C2E.value: c2e_connectivity},\n",
- " )\n",
- "\n",
- " assert np.allclose(divergence_gt4py.asnumpy(), divergence_ref)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "bbcb9bf5",
- "metadata": {},
- "outputs": [],
- "source": [
- "test_divergence()\n",
- "print(\"Test successful\")"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "5cd78463",
- "metadata": {},
- "source": [
- "## 3. Divergence in ICON"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "b9a35e28",
- "metadata": {},
- "source": [
- "In ICON we can find a divergence in diffusion which looks somewhat like this, but also quite a bit different:\n",
- "\n",
- "```fortran\n",
- " DO jb = i_startblk,i_endblk\n",
- "\n",
- " CALL get_indices_c(p_patch, jb, i_startblk, i_endblk, &\n",
- " i_startidx, i_endidx, rl_start, rl_end)\n",
- " DO jk = 1, nlev\n",
- " DO jc = i_startidx, i_endidx\n",
- "\n",
- " div(jc,jk) = p_nh_prog%vn(ieidx(jc,jb,1),jk,ieblk(jc,jb,1))*p_int%geofac_div(jc,1,jb) + &\n",
- " p_nh_prog%vn(ieidx(jc,jb,2),jk,ieblk(jc,jb,2))*p_int%geofac_div(jc,2,jb) + &\n",
- " p_nh_prog%vn(ieidx(jc,jb,3),jk,ieblk(jc,jb,3))*p_int%geofac_div(jc,3,jb)\n",
- " ENDDO\n",
- " ENDDO\n",
- " ENDDO\n",
- "```\n",
- "\n",
- "Two assumptions are necessary to derive the ICON version of the divergence starting from our version above:\n",
- "* Assume that the velocity components $u$ is always orthogonal and the velocity component $v$ is always parallel to the edge, in ICON these are called $vn$ and $vt$ where the n stands for normal and the t for tangential.\n",
- "* At ICON startup time merge all constants (such as cell area $A_P$ and edge length $L_e$) into one array of geometrical factors `p_int%geofac_div`, which are constant during time stepping:\n",
- "\n",
- "```fortran\n",
- " DO jb = i_startblk, i_endblk\n",
- "\n",
- " CALL get_indices_c(ptr_patch, jb, i_startblk, i_endblk, &\n",
- " & i_startidx, i_endidx, rl_start, rl_end)\n",
- "\n",
- " DO je = 1, ptr_patch%geometry_info%cell_type\n",
- " DO jc = i_startidx, i_endidx\n",
- "\n",
- " ile = ptr_patch%cells%edge_idx(jc,jb,je)\n",
- " ibe = ptr_patch%cells%edge_blk(jc,jb,je)\n",
- "\n",
- " ptr_int%geofac_div(jc,je,jb) = &\n",
- " & ptr_patch%edges%primal_edge_length(ile,ibe) * &\n",
- " & ptr_patch%cells%edge_orientation(jc,jb,je) / &\n",
- " & ptr_patch%cells%area(jc,jb)\n",
- "\n",
- " ENDDO !cell loop\n",
- " ENDDO\n",
- "\n",
- " END DO !block loop\n",
- "\n",
- "```"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
+ "nbformat": 4,
+ "nbformat_minor": 5
}
diff --git a/docs/user/next/exercises/3_gradient_exercise_solution.ipynb b/docs/user/next/exercises/3_gradient_exercise_solution.ipynb
index 5544bc3aab..69e587a1d6 100644
--- a/docs/user/next/exercises/3_gradient_exercise_solution.ipynb
+++ b/docs/user/next/exercises/3_gradient_exercise_solution.ipynb
@@ -1,158 +1,158 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "id": "8cfd1e20",
- "metadata": {},
- "source": [
- "# 4. Gradient"
- ]
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "8cfd1e20",
+ "metadata": {},
+ "source": [
+ "# 4. Gradient"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "72aa4c96",
+ "metadata": {},
+ "source": [
+ "Another example is the gradient defined at the center of a Cell $\\mathbf{P}$ of a scalar function $f$. We approximate this by taking the sum over the three edges and multiplying $f(e)$ with the edge normal $\\mathbf{n}_e$ and the edge length $L_e$ and dividing the resulting sum with the cell area $A_P$.\n",
+ "The result will be the two components of the gradient vector.\n",
+ "\n",
+ "![](../gradient_picture.png \"Divergence\")\n",
+ "\n",
+ "![](../gradient_formula.png \"Divergence\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1bddcf59",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from helpers import *\n",
+ "\n",
+ "\n",
+ "import gt4py.next as gtx"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "37b0bc43",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def gradient_numpy(\n",
+ " c2e: np.array,\n",
+ " f: np.array,\n",
+ " nx: np.array,\n",
+ " ny: np.array,\n",
+ " L: np.array,\n",
+ " A: np.array,\n",
+ " edge_orientation: np.array,\n",
+ ") -> gtx.tuple[np.array, np.array]:\n",
+ " # edge_orientation = np.expand_dims(edge_orientation, axis=-1)\n",
+ " f_x = np.sum(f[c2e] * nx[c2e] * L[c2e] * edge_orientation, axis=1) / A\n",
+ " f_y = np.sum(f[c2e] * ny[c2e] * L[c2e] * edge_orientation, axis=1) / A\n",
+ " return f_x, f_y"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "afa80e49",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "@gtx.field_operator\n",
+ "def gradient(\n",
+ " f: gtx.Field[[E], float],\n",
+ " nx: gtx.Field[[E], float],\n",
+ " ny: gtx.Field[[E], float],\n",
+ " L: gtx.Field[[E], float],\n",
+ " A: gtx.Field[[C], float],\n",
+ " edge_orientation: gtx.Field[[C, C2EDim], float],\n",
+ ") -> gtx.tuple[gtx.Field[[C], float], gtx.Field[[C], float]]:\n",
+ " f_x = neighbor_sum(f(C2E) * nx(C2E) * L(C2E) * edge_orientation, axis=C2EDim) / A\n",
+ " f_y = neighbor_sum(f(C2E) * ny(C2E) * L(C2E) * edge_orientation, axis=C2EDim) / A\n",
+ " return f_x, f_y"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "84b02762",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def test_gradient():\n",
+ " backend = None\n",
+ " # backend = gtfn_cpu\n",
+ " # backend = gtfn_gpu\n",
+ "\n",
+ " cell_domain = gtx.domain({C: n_cells})\n",
+ " edge_domain = gtx.domain({E: n_edges})\n",
+ " \n",
+ " f = random_field(edge_domain, allocator=backend)\n",
+ " nx = random_field(edge_domain, allocator=backend)\n",
+ " ny = random_field(edge_domain, allocator=backend)\n",
+ " L = random_field(edge_domain, allocator=backend)\n",
+ " A = random_field(cell_domain, allocator=backend)\n",
+ " edge_orientation = random_sign(\n",
+ " gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend\n",
+ " )\n",
+ "\n",
+ " gradient_numpy_x, gradient_numpy_y = gradient_numpy(\n",
+ " c2e_table,\n",
+ " f.asnumpy(),\n",
+ " nx.asnumpy(),\n",
+ " ny.asnumpy(),\n",
+ " L.asnumpy(),\n",
+ " A.asnumpy(),\n",
+ " edge_orientation.asnumpy(),\n",
+ " )\n",
+ "\n",
+ " c2e_connectivity = gtx.NeighborTableOffsetProvider(c2e_table, C, E, 3, has_skip_values=False)\n",
+ "\n",
+ " gradient_gt4py_x = gtx.zeros(cell_domain, allocator=backend) \n",
+ " gradient_gt4py_y = gtx.zeros(cell_domain, allocator=backend) \n",
+ "\n",
+ " gradient(\n",
+ " f,\n",
+ " nx,\n",
+ " ny,\n",
+ " L,\n",
+ " A,\n",
+ " edge_orientation,\n",
+ " out=(gradient_gt4py_x, gradient_gt4py_y),\n",
+ " offset_provider={C2E.value: c2e_connectivity},\n",
+ " )\n",
+ "\n",
+ " assert np.allclose(gradient_gt4py_x.asnumpy(), gradient_numpy_x)\n",
+ " assert np.allclose(gradient_gt4py_y.asnumpy(), gradient_numpy_y)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9267fe99",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "test_gradient()\n",
+ "print(\"Test successful\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "formats": "ipynb,md:myst"
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ }
},
- {
- "cell_type": "markdown",
- "id": "72aa4c96",
- "metadata": {},
- "source": [
- "Another example is the gradient defined at the center of a Cell $\\mathbf{P}$ of a scalar function $f$. We approximate this by taking the sum over the three edges and multiplying $f(e)$ with the edge normal $\\mathbf{n}_e$ and the edge length $L_e$ and dividing the resulting sum with the cell area $A_P$.\n",
- "The result will be the two components of the gradient vector.\n",
- "\n",
- "![](../gradient_picture.png \"Divergence\")\n",
- "\n",
- "![](../gradient_formula.png \"Divergence\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "1bddcf59",
- "metadata": {},
- "outputs": [],
- "source": [
- "from helpers import *\n",
- "\n",
- "\n",
- "import gt4py.next as gtx"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "37b0bc43",
- "metadata": {},
- "outputs": [],
- "source": [
- "def gradient_numpy(\n",
- " c2e: np.array,\n",
- " f: np.array,\n",
- " nx: np.array,\n",
- " ny: np.array,\n",
- " L: np.array,\n",
- " A: np.array,\n",
- " edge_orientation: np.array,\n",
- ") -> gtx.tuple[np.array, np.array]:\n",
- " # edge_orientation = np.expand_dims(edge_orientation, axis=-1)\n",
- " f_x = np.sum(f[c2e] * nx[c2e] * L[c2e] * edge_orientation, axis=1) / A\n",
- " f_y = np.sum(f[c2e] * ny[c2e] * L[c2e] * edge_orientation, axis=1) / A\n",
- " return f_x, f_y"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "afa80e49",
- "metadata": {},
- "outputs": [],
- "source": [
- "@gtx.field_operator\n",
- "def gradient(\n",
- " f: gtx.Field[[E], float],\n",
- " nx: gtx.Field[[E], float],\n",
- " ny: gtx.Field[[E], float],\n",
- " L: gtx.Field[[E], float],\n",
- " A: gtx.Field[[C], float],\n",
- " edge_orientation: gtx.Field[[C, C2EDim], float],\n",
- ") -> gtx.tuple[gtx.Field[[C], float], gtx.Field[[C], float]]:\n",
- " f_x = neighbor_sum(f(C2E) * nx(C2E) * L(C2E) * edge_orientation, axis=C2EDim) / A\n",
- " f_y = neighbor_sum(f(C2E) * ny(C2E) * L(C2E) * edge_orientation, axis=C2EDim) / A\n",
- " return f_x, f_y"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "84b02762",
- "metadata": {},
- "outputs": [],
- "source": [
- "def test_gradient():\n",
- " backend = None\n",
- " # backend = gtfn_cpu\n",
- " # backend = gtfn_gpu\n",
- "\n",
- " cell_domain = gtx.domain({C: n_cells})\n",
- " edge_domain = gtx.domain({E: n_edges})\n",
- " \n",
- " f = random_field_new(edge_domain, allocator=backend)\n",
- " nx = random_field_new(edge_domain, allocator=backend)\n",
- " ny = random_field_new(edge_domain, allocator=backend)\n",
- " L = random_field_new(edge_domain, allocator=backend)\n",
- " A = random_field_new(cell_domain, allocator=backend)\n",
- " edge_orientation = random_sign(\n",
- " gtx.domain({C: n_cells, C2EDim: 3}), allocator=backend\n",
- " )\n",
- "\n",
- " gradient_numpy_x, gradient_numpy_y = gradient_numpy(\n",
- " c2e_table,\n",
- " f.asnumpy(),\n",
- " nx.asnumpy(),\n",
- " ny.asnumpy(),\n",
- " L.asnumpy(),\n",
- " A.asnumpy(),\n",
- " edge_orientation.asnumpy(),\n",
- " )\n",
- "\n",
- " c2e_connectivity = gtx.NeighborTableOffsetProvider(c2e_table, C, E, 3, has_skip_values=False)\n",
- "\n",
- " gradient_gt4py_x = gtx.zeros(cell_domain, allocator=backend) \n",
- " gradient_gt4py_y = gtx.zeros(cell_domain, allocator=backend) \n",
- "\n",
- " gradient(\n",
- " f,\n",
- " nx,\n",
- " ny,\n",
- " L,\n",
- " A,\n",
- " edge_orientation,\n",
- " out=(gradient_gt4py_x, gradient_gt4py_y),\n",
- " offset_provider={C2E.value: c2e_connectivity},\n",
- " )\n",
- "\n",
- " assert np.allclose(gradient_gt4py_x.asnumpy(), gradient_numpy_x)\n",
- " assert np.allclose(gradient_gt4py_y.asnumpy(), gradient_numpy_y)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "9267fe99",
- "metadata": {},
- "outputs": [],
- "source": [
- "test_gradient()\n",
- "print(\"Test successful\")"
- ]
- }
- ],
- "metadata": {
- "jupytext": {
- "formats": "ipynb,md:myst"
- },
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
+ "nbformat": 4,
+ "nbformat_minor": 5
}
diff --git a/docs/user/next/exercises/4_curl_exercise_solution.ipynb b/docs/user/next/exercises/4_curl_exercise_solution.ipynb
index d8a12e496f..1211ba6a94 100644
--- a/docs/user/next/exercises/4_curl_exercise_solution.ipynb
+++ b/docs/user/next/exercises/4_curl_exercise_solution.ipynb
@@ -1,165 +1,165 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "id": "9e5abeda",
- "metadata": {},
- "source": [
- "# 5. Curl"
- ]
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "9e5abeda",
+ "metadata": {},
+ "source": [
+ "# 5. Curl"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0bc751b1",
+ "metadata": {},
+ "source": [
+ "As the last example of the easier operations, we take a look at the curl of a vector field $\\mathbf{v}$ defined at a vertex $\\mathbf{N}$.\n",
+ "To approximate this, we once again iterate over all of the direct neighboring edges of the vertex in the center and for each edge take the dot product of the vector field $\\mathbf{v}_e$ with the edge normals $\\mathbf{n}_f$ and multiply that by the dual edge length $\\hat{L}_e$. The resulting neighbor sum is then divided by the dual area $\\hat{A}_N$, which is the area of the Voronoi cell around the Vertex $\\mathbf{N}$.\n",
+ "\n",
+ "\n",
+ "![](../curl_picture.png \"Divergence\")\n",
+ "\n",
+ "\n",
+ "![](../curl_formula.png \"Divergence\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1c1af88b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from helpers import *\n",
+ "\n",
+ "import gt4py.next as gtx"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ce333ad2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def curl_numpy(\n",
+ " v2e: np.array,\n",
+ " u: np.array,\n",
+ " v: np.array,\n",
+ " nx: np.array,\n",
+ " ny: np.array,\n",
+ " dualL: np.array,\n",
+ " dualA: np.array,\n",
+ " edge_orientation: np.array,\n",
+ ") -> np.array:\n",
+ " uv_curl = (\n",
+ " np.sum(\n",
+ " (u[v2e] * nx[v2e] + v[v2e] * ny[v2e]) * dualL[v2e] * edge_orientation,\n",
+ " axis=1,\n",
+ " )\n",
+ " / dualA\n",
+ " )\n",
+ "\n",
+ " return uv_curl"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0925bdb0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "@gtx.field_operator\n",
+ "def curl(\n",
+ " u: gtx.Field[[E], float],\n",
+ " v: gtx.Field[[E], float],\n",
+ " nx: gtx.Field[[E], float],\n",
+ " ny: gtx.Field[[E], float],\n",
+ " dualL: gtx.Field[[E], float],\n",
+ " dualA: gtx.Field[[V], float],\n",
+ " edge_orientation: gtx.Field[[V, V2EDim], float],\n",
+ ") -> gtx.Field[[V], float]:\n",
+ " uv_curl = (\n",
+ " neighbor_sum(\n",
+ " (u(V2E) * nx(V2E) + v(V2E) * ny(V2E)) * dualL(V2E) * edge_orientation,\n",
+ " axis=V2EDim,\n",
+ " )\n",
+ " / dualA\n",
+ " )\n",
+ "\n",
+ " return uv_curl"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5b6ffc9e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def test_curl():\n",
+ " backend = None\n",
+ " # backend = gtfn_cpu\n",
+ " # backend = gtfn_gpu\n",
+ "\n",
+ " edge_domain = gtx.domain({E: n_edges})\n",
+ " vertex_domain = gtx.domain({V: n_vertices})\n",
+ " \n",
+ " u = random_field(edge_domain, allocator=backend)\n",
+ " v = random_field(edge_domain, allocator=backend)\n",
+ " nx = random_field(edge_domain, allocator=backend)\n",
+ " ny = random_field(edge_domain, allocator=backend)\n",
+ " dualL = random_field(edge_domain, allocator=backend)\n",
+ " dualA = random_field(vertex_domain, allocator=backend)\n",
+ " edge_orientation = random_sign(\n",
+ " gtx.domain({V: n_vertices, V2EDim: 6}), allocator=backend\n",
+ " )\n",
+ "\n",
+ " divergence_ref = curl_numpy(\n",
+ " v2e_table,\n",
+ " u.asnumpy(),\n",
+ " v.asnumpy(),\n",
+ " nx.asnumpy(),\n",
+ " ny.asnumpy(),\n",
+ " dualL.asnumpy(),\n",
+ " dualA.asnumpy(),\n",
+ " edge_orientation.asnumpy(),\n",
+ " )\n",
+ "\n",
+ " v2e_connectivity = gtx.NeighborTableOffsetProvider(v2e_table, V, E, 6, has_skip_values=False)\n",
+ "\n",
+ " curl_gt4py = gtx.zeros(vertex_domain, allocator=backend) \n",
+ "\n",
+ " curl(\n",
+ " u, v, nx, ny, dualL, dualA, edge_orientation, out = curl_gt4py, offset_provider = {V2E.value: v2e_connectivity}\n",
+ " )\n",
+ " \n",
+ " assert np.allclose(curl_gt4py.asnumpy(), divergence_ref)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ae651445",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "test_curl()\n",
+ "print(\"Test successful\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "formats": "ipynb,md:myst"
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ }
},
- {
- "cell_type": "markdown",
- "id": "0bc751b1",
- "metadata": {},
- "source": [
- "As the last example of the easier operations, we take a look at the curl of a vector field $\\mathbf{v}$ defined at a vertex $\\mathbf{N}$.\n",
- "To approximate this, we once again iterate over all of the direct neighboring edges of the vertex in the center and for each edge take the dot product of the vector field $\\mathbf{v}_e$ with the edge normals $\\mathbf{n}_f$ and multiply that by the dual edge length $\\hat{L}_e$. The resulting neighbor sum is then divided by the dual area $\\hat{A}_N$, which is the area of the Voronoi cell around the Vertex $\\mathbf{N}$.\n",
- "\n",
- "\n",
- "![](../curl_picture.png \"Divergence\")\n",
- "\n",
- "\n",
- "![](../curl_formula.png \"Divergence\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "1c1af88b",
- "metadata": {},
- "outputs": [],
- "source": [
- "from helpers import *\n",
- "\n",
- "import gt4py.next as gtx"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "ce333ad2",
- "metadata": {},
- "outputs": [],
- "source": [
- "def curl_numpy(\n",
- " v2e: np.array,\n",
- " u: np.array,\n",
- " v: np.array,\n",
- " nx: np.array,\n",
- " ny: np.array,\n",
- " dualL: np.array,\n",
- " dualA: np.array,\n",
- " edge_orientation: np.array,\n",
- ") -> np.array:\n",
- " uv_curl = (\n",
- " np.sum(\n",
- " (u[v2e] * nx[v2e] + v[v2e] * ny[v2e]) * dualL[v2e] * edge_orientation,\n",
- " axis=1,\n",
- " )\n",
- " / dualA\n",
- " )\n",
- "\n",
- " return uv_curl"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "0925bdb0",
- "metadata": {},
- "outputs": [],
- "source": [
- "@gtx.field_operator\n",
- "def curl(\n",
- " u: gtx.Field[[E], float],\n",
- " v: gtx.Field[[E], float],\n",
- " nx: gtx.Field[[E], float],\n",
- " ny: gtx.Field[[E], float],\n",
- " dualL: gtx.Field[[E], float],\n",
- " dualA: gtx.Field[[V], float],\n",
- " edge_orientation: gtx.Field[[V, V2EDim], float],\n",
- ") -> gtx.Field[[V], float]:\n",
- " uv_curl = (\n",
- " neighbor_sum(\n",
- " (u(V2E) * nx(V2E) + v(V2E) * ny(V2E)) * dualL(V2E) * edge_orientation,\n",
- " axis=V2EDim,\n",
- " )\n",
- " / dualA\n",
- " )\n",
- "\n",
- " return uv_curl"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "5b6ffc9e",
- "metadata": {},
- "outputs": [],
- "source": [
- "def test_curl():\n",
- " backend = None\n",
- " # backend = gtfn_cpu\n",
- " # backend = gtfn_gpu\n",
- "\n",
- " edge_domain = gtx.domain({E: n_edges})\n",
- " vertex_domain = gtx.domain({V: n_vertices})\n",
- " \n",
- " u = random_field_new(edge_domain, allocator=backend)\n",
- " v = random_field_new(edge_domain, allocator=backend)\n",
- " nx = random_field_new(edge_domain, allocator=backend)\n",
- " ny = random_field_new(edge_domain, allocator=backend)\n",
- " dualL = random_field_new(edge_domain, allocator=backend)\n",
- " dualA = random_field_new(vertex_domain, allocator=backend)\n",
- " edge_orientation = random_sign(\n",
- " gtx.domain({V: n_vertices, V2EDim: 6}), allocator=backend\n",
- " )\n",
- "\n",
- " divergence_ref = curl_numpy(\n",
- " v2e_table,\n",
- " u.asnumpy(),\n",
- " v.asnumpy(),\n",
- " nx.asnumpy(),\n",
- " ny.asnumpy(),\n",
- " dualL.asnumpy(),\n",
- " dualA.asnumpy(),\n",
- " edge_orientation.asnumpy(),\n",
- " )\n",
- "\n",
- " v2e_connectivity = gtx.NeighborTableOffsetProvider(v2e_table, V, E, 6, has_skip_values=False)\n",
- "\n",
- " curl_gt4py = gtx.zeros(vertex_domain, allocator=backend) \n",
- "\n",
- " curl(\n",
- " u, v, nx, ny, dualL, dualA, edge_orientation, out = curl_gt4py, offset_provider = {V2E.value: v2e_connectivity}\n",
- " )\n",
- " \n",
- " assert np.allclose(curl_gt4py.asnumpy(), divergence_ref)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "ae651445",
- "metadata": {},
- "outputs": [],
- "source": [
- "test_curl()\n",
- "print(\"Test successful\")"
- ]
- }
- ],
- "metadata": {
- "jupytext": {
- "formats": "ipynb,md:myst"
- },
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
+ "nbformat": 4,
+ "nbformat_minor": 5
}
diff --git a/docs/user/next/exercises/7_scan_operator.ipynb b/docs/user/next/exercises/7_scan_operator.ipynb
index d3cc623b73..4e77064dc7 100644
--- a/docs/user/next/exercises/7_scan_operator.ipynb
+++ b/docs/user/next/exercises/7_scan_operator.ipynb
@@ -1,272 +1,272 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "id": "9ba87bfb",
- "metadata": {},
- "source": [
- "## Scan operator"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "2ee9d989",
- "metadata": {},
- "source": [
- "The unique feature of this operator is that it provides the return state of the previous iteration as its first argument (i.e., the result from the previous grid point). In other words, all the arguments of the current `return` will be available (as a tuple) in the next iteration from the first argument of the defined function. \n",
- "\n",
- "Example: A FORTRAN pseudocode for integrating a moisture variable (e.g., cloud water or water vapour) over a column could look as follows:\n",
- "\n",
- "\n",
- "```FORTRAN\n",
- "SUBROUTINE column_integral( var_in, rho, dz, var_out, ie, je, ke )\n",
- " ! Return the column integral of a moist species.\n",
- " INTEGER, INTENT (IN) :: &\n",
- " ie, je, ke ! array dimensions of the I/O-fields (horizontal, horizontal, vertical)\n",
- "\n",
- " REAL (KIND=wp), INTENT (OUT) :: &\n",
- " q_colsum (ie,je) ! Vertically-integrated mass of water species\n",
- "\n",
- " REAL (KIND=wp), INTENT (IN) :: &\n",
- " rho (ie,je,ke), & \n",
- " dz (ie,je,ke), & ! height of model half levels\n",
- " var_in (ie,je,ke) ! humidity mass concentration at time-level nnow\n",
- " \n",
- " !$acc parallel present( iq ) if (lzacc)\n",
- " !$acc loop gang\n",
- " DO j=1,je\n",
- " !$acc loop vector\n",
- " DO i=1,ie\n",
- " q_sum(i,j) = 0.0\n",
- " END DO\n",
- " END DO\n",
- " !$acc end parallel\n",
- " \n",
- " \n",
- " !$acc parallel present( iq, rho, hhl, q ) if (lzacc)\n",
- " DO k = 1, ke ! Vertical loop\n",
- " !$acc loop gang\n",
- " DO j=1,je\n",
- " !$acc loop vector\n",
- " DO i=1,ie\n",
- " q_colsum(i,j) = q_colsum(i,j) + var_in(i,j,k) * rho(i,j,k)* dz(i,j,k)\n",
- " END DO\n",
- " END DO\n",
- " END DO\n",
- " !$acc end parallel\n",
- "END SUBROUTINE column_integral\n",
- "```\n",
- "\n",
- "Where:\n",
- "- `var_in` is the 3D variable that will be summed up\n",
- "- `q_colsum` is the resulting 2D variable\n",
- "- `rho` the air density\n",
- "- `dz`the thickness of the vertical layers\n",
- "\n",
- "In the first loop nest, `column_sum` is set to zero for all grid columns. The vertical dependency enters on the RHS of the second loop nest `q_colsum(i,j) = q_colsum(i,j) + ...`\n",
- "\n",
- "Using the `scan_operator` this operation would be written like this:\n",
- "\n",
- "```python\n",
- "@scan_operator(axis=KDim, forward=True, init=0.0)\n",
- "def column_integral(float: state, float: var, float: rho, float: dz)\n",
- " \"\"\"Return the column integral of a moist species.\"\"\"\n",
- " return var * rho * dz + state\n",
- "```\n",
- "\n",
- "Here the vertical dependency is expressed by the first function argument (`state`). This argument carries the return from the previous k-level and does not need to be specified when the function is called (similar to the `self` argument of Python classes). The argument is intialized to `init=0.0` in the function decorator (first loop nest above) and the dimension of the integral is specified with `axis=KDim`.\n",
- "\n",
- "\n",
- "```python\n",
- "q_colsum = column_integral(qv, rho, dz)\n",
- "```"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "c9e31bff",
- "metadata": {},
- "source": [
- "#### Exercise: port a toy cloud microphysics scheme from python/numpy using the template of a `scan_operator` below"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "bd2fd309",
- "metadata": {},
- "outputs": [
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "9ba87bfb",
+ "metadata": {},
+ "source": [
+ "## Scan operator"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2ee9d989",
+ "metadata": {},
+ "source": [
+ "The unique feature of this operator is that it provides the return state of the previous iteration as its first argument (i.e., the result from the previous grid point). In other words, all the arguments of the current `return` will be available (as a tuple) in the next iteration from the first argument of the defined function. \n",
+ "\n",
+ "Example: A FORTRAN pseudocode for integrating a moisture variable (e.g., cloud water or water vapour) over a column could look as follows:\n",
+ "\n",
+ "\n",
+ "```FORTRAN\n",
+ "SUBROUTINE column_integral( var_in, rho, dz, var_out, ie, je, ke )\n",
+ " ! Return the column integral of a moist species.\n",
+ " INTEGER, INTENT (IN) :: &\n",
+ " ie, je, ke ! array dimensions of the I/O-fields (horizontal, horizontal, vertical)\n",
+ "\n",
+ " REAL (KIND=wp), INTENT (OUT) :: &\n",
+ " q_colsum (ie,je) ! Vertically-integrated mass of water species\n",
+ "\n",
+ " REAL (KIND=wp), INTENT (IN) :: &\n",
+ " rho (ie,je,ke), & \n",
+ " dz (ie,je,ke), & ! height of model half levels\n",
+ " var_in (ie,je,ke) ! humidity mass concentration at time-level nnow\n",
+ " \n",
+ " !$acc parallel present( iq ) if (lzacc)\n",
+ " !$acc loop gang\n",
+ " DO j=1,je\n",
+ " !$acc loop vector\n",
+ " DO i=1,ie\n",
+ " q_sum(i,j) = 0.0\n",
+ " END DO\n",
+ " END DO\n",
+ " !$acc end parallel\n",
+ " \n",
+ " \n",
+ " !$acc parallel present( iq, rho, hhl, q ) if (lzacc)\n",
+ " DO k = 1, ke ! Vertical loop\n",
+ " !$acc loop gang\n",
+ " DO j=1,je\n",
+ " !$acc loop vector\n",
+ " DO i=1,ie\n",
+ " q_colsum(i,j) = q_colsum(i,j) + var_in(i,j,k) * rho(i,j,k)* dz(i,j,k)\n",
+ " END DO\n",
+ " END DO\n",
+ " END DO\n",
+ " !$acc end parallel\n",
+ "END SUBROUTINE column_integral\n",
+ "```\n",
+ "\n",
+ "Where:\n",
+ "- `var_in` is the 3D variable that will be summed up\n",
+ "- `q_colsum` is the resulting 2D variable\n",
+ "- `rho` the air density\n",
+ "- `dz`the thickness of the vertical layers\n",
+ "\n",
+ "In the first loop nest, `column_sum` is set to zero for all grid columns. The vertical dependency enters on the RHS of the second loop nest `q_colsum(i,j) = q_colsum(i,j) + ...`\n",
+ "\n",
+ "Using the `scan_operator` this operation would be written like this:\n",
+ "\n",
+ "```python\n",
+ "@scan_operator(axis=KDim, forward=True, init=0.0)\n",
+ "def column_integral(float: state, float: var, float: rho, float: dz)\n",
+ " \"\"\"Return the column integral of a moist species.\"\"\"\n",
+ " return var * rho * dz + state\n",
+ "```\n",
+ "\n",
+ "Here the vertical dependency is expressed by the first function argument (`state`). This argument carries the return from the previous k-level and does not need to be specified when the function is called (similar to the `self` argument of Python classes). The argument is intialized to `init=0.0` in the function decorator (first loop nest above) and the dimension of the integral is specified with `axis=KDim`.\n",
+ "\n",
+ "\n",
+ "```python\n",
+ "q_colsum = column_integral(qv, rho, dz)\n",
+ "```"
+ ]
+ },
{
- "data": {
- "text/html": [
- "\n",
- "\n"
+ "cell_type": "markdown",
+ "id": "c9e31bff",
+ "metadata": {},
+ "source": [
+ "#### Exercise: port a toy cloud microphysics scheme from python/numpy using the template of a `scan_operator` below"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bd2fd309",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
],
- "text/plain": [
- ""
+ "source": [
+ "from helpers import *\n",
+ "\n",
+ "import gt4py.next as gtx\n",
+ "\n",
+ "backend = None\n",
+ "# backend = gtfn_cpu\n",
+ "# backend = gtfn_gpu"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "74338168",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def toy_microphysics_numpy(qc, qr, autoconversion_rate=0.1, sedimentaion_constant=0.05):\n",
+ " \"\"\"A toy model of a microphysics scheme contaning autoconversion and scavenging\"\"\"\n",
+ "\n",
+ " sedimentation_flux = 0.0\n",
+ "\n",
+ " for cell, k in np.ndindex(qc.shape):\n",
+ " # Autoconversion: Cloud Drops -> Rain Drops\n",
+ " autoconversion_tendency = qc[cell, k] * autoconversion_rate\n",
+ "\n",
+ " qc[cell, k] -= autoconversion_tendency\n",
+ " qr[cell, k] += autoconversion_tendency\n",
+ "\n",
+ " ## Apply sedimentation flux from level above\n",
+ " qr[cell, k] += sedimentation_flux\n",
+ "\n",
+ " ## Remove mass due to sedimentation flux from the current cell\n",
+ " qr[cell, k] -= sedimentation_flux"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "69bf6022",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "@gtx.scan_operator(axis=K, forward=True, init=(0.0, 0.0, 0.0))\n",
+ "def _graupel_toy_scan(\n",
+ " state: tuple[float, float, float], qc_in: float, qr_in: float\n",
+ ") -> tuple[float, float, float]:\n",
+ " autoconversion_rate = 0.1\n",
+ " sedimentaion_constant = 0.05\n",
+ "\n",
+ " # unpack state of previous iteration\n",
+ " # TODO\n",
+ " \n",
+ " # Autoconversion: Cloud Drops -> Rain Drops\n",
+ " # TODO\n",
+ " \n",
+ " ## Add sedimentation flux from level above\n",
+ " # TODO\n",
+ "\n",
+ " # Remove mass due to sedimentation flux\n",
+ " # TODO\n",
+ "\n",
+ " return # TODO"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0de41cb8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "@gtx.field_operator(backend=backend)\n",
+ "def graupel_toy_scan(\n",
+ " qc: gtx.Field[[C, K], float], qr: gtx.Field[[C, K], float]\n",
+ ") -> tuple[\n",
+ " gtx.Field[[C, K], float],\n",
+ " gtx.Field[[C, K], float]\n",
+ "]:\n",
+ " qc, qr, _ = _graupel_toy_scan(qc, qr)\n",
+ "\n",
+ " return qc, qr"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4e0dc8d2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def test_scan_operator():\n",
+ " cell_k_domain = gtx.domain({C: n_cells, K: n_levels})\n",
+ " \n",
+ " qc = random_field(cell_k_domain, allocator=backend)\n",
+ " qr = random_field(cell_k_domain, allocator=backend)\n",
+ "\n",
+ " qc_new = gtx.zeros(cell_k_domain, allocator=backend)\n",
+ " qr_new = gtx.zeros(cell_k_domain, allocator=backend)\n",
+ "\n",
+ " # Initialize Numpy fields from GT4Py fields\n",
+ " qc_numpy = qc.asnumpy().copy()\n",
+ " qr_numpy = qr.asnumpy().copy()\n",
+ "\n",
+ " # Execute the Numpy version of scheme\n",
+ " toy_microphysics_numpy(qc_numpy, qr_numpy)\n",
+ "\n",
+ " # Execute the GT4Py version of scheme\n",
+ " graupel_toy_scan(qc, qr, out=(qc_new, qr_new), offset_provider={})\n",
+ "\n",
+ " # Compare results\n",
+ " assert np.allclose(qc_new.asnumpy(), qc_numpy)\n",
+ " assert np.allclose(qr_new.asnumpy(), qr_numpy)"
]
- },
- "metadata": {},
- "output_type": "display_data"
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a76a6be7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "test_scan_operator()\n",
+ "print(\"Test successful\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "db590d8a",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "formats": "ipynb,md:myst"
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.2"
}
- ],
- "source": [
- "from helpers import *\n",
- "\n",
- "import gt4py.next as gtx\n",
- "\n",
- "backend = None\n",
- "# backend = gtfn_cpu\n",
- "# backend = gtfn_gpu"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "74338168",
- "metadata": {},
- "outputs": [],
- "source": [
- "def toy_microphysics_numpy(qc, qr, autoconversion_rate=0.1, sedimentaion_constant=0.05):\n",
- " \"\"\"A toy model of a microphysics scheme contaning autoconversion and scavenging\"\"\"\n",
- "\n",
- " sedimentation_flux = 0.0\n",
- "\n",
- " for cell, k in np.ndindex(qc.shape):\n",
- " # Autoconversion: Cloud Drops -> Rain Drops\n",
- " autoconversion_tendency = qc[cell, k] * autoconversion_rate\n",
- "\n",
- " qc[cell, k] -= autoconversion_tendency\n",
- " qr[cell, k] += autoconversion_tendency\n",
- "\n",
- " ## Apply sedimentation flux from level above\n",
- " qr[cell, k] += sedimentation_flux\n",
- "\n",
- " ## Remove mass due to sedimentation flux from the current cell\n",
- " qr[cell, k] -= sedimentation_flux"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "69bf6022",
- "metadata": {},
- "outputs": [],
- "source": [
- "@gtx.scan_operator(axis=K, forward=True, init=(0.0, 0.0, 0.0))\n",
- "def _graupel_toy_scan(\n",
- " state: tuple[float, float, float], qc_in: float, qr_in: float\n",
- ") -> tuple[float, float, float]:\n",
- " autoconversion_rate = 0.1\n",
- " sedimentaion_constant = 0.05\n",
- "\n",
- " # unpack state of previous iteration\n",
- " # TODO\n",
- " \n",
- " # Autoconversion: Cloud Drops -> Rain Drops\n",
- " # TODO\n",
- " \n",
- " ## Add sedimentation flux from level above\n",
- " # TODO\n",
- "\n",
- " # Remove mass due to sedimentation flux\n",
- " # TODO\n",
- "\n",
- " return # TODO"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "0de41cb8",
- "metadata": {},
- "outputs": [],
- "source": [
- "@gtx.field_operator(backend=backend)\n",
- "def graupel_toy_scan(\n",
- " qc: gtx.Field[[C, K], float], qr: gtx.Field[[C, K], float]\n",
- ") -> tuple[\n",
- " gtx.Field[[C, K], float],\n",
- " gtx.Field[[C, K], float]\n",
- "]:\n",
- " qc, qr, _ = _graupel_toy_scan(qc, qr)\n",
- "\n",
- " return qc, qr"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "4e0dc8d2",
- "metadata": {},
- "outputs": [],
- "source": [
- "def test_scan_operator():\n",
- " cell_k_domain = gtx.domain({C: n_cells, K: n_levels})\n",
- " \n",
- " qc = random_field_new(cell_k_domain, allocator=backend)\n",
- " qr = random_field_new(cell_k_domain, allocator=backend)\n",
- "\n",
- " qc_new = gtx.zeros(cell_k_domain, allocator=backend)\n",
- " qr_new = gtx.zeros(cell_k_domain, allocator=backend)\n",
- "\n",
- " # Initialize Numpy fields from GT4Py fields\n",
- " qc_numpy = qc.asnumpy().copy()\n",
- " qr_numpy = qr.asnumpy().copy()\n",
- "\n",
- " # Execute the Numpy version of scheme\n",
- " toy_microphysics_numpy(qc_numpy, qr_numpy)\n",
- "\n",
- " # Execute the GT4Py version of scheme\n",
- " graupel_toy_scan(qc, qr, out=(qc_new, qr_new), offset_provider={})\n",
- "\n",
- " # Compare results\n",
- " assert np.allclose(qc_new.asnumpy(), qc_numpy)\n",
- " assert np.allclose(qr_new.asnumpy(), qr_numpy)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "a76a6be7",
- "metadata": {},
- "outputs": [],
- "source": [
- "test_scan_operator()\n",
- "print(\"Test successful\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "db590d8a",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "jupytext": {
- "formats": "ipynb,md:myst"
- },
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
},
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.10.2"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
+ "nbformat": 4,
+ "nbformat_minor": 5
}
diff --git a/docs/user/next/exercises/7_scan_operator_solutions.ipynb b/docs/user/next/exercises/7_scan_operator_solutions.ipynb
index 0014b35e0e..3963f5d6d3 100644
--- a/docs/user/next/exercises/7_scan_operator_solutions.ipynb
+++ b/docs/user/next/exercises/7_scan_operator_solutions.ipynb
@@ -1,282 +1,282 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "id": "9ba87bfb",
- "metadata": {},
- "source": [
- "## Scan operator"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "2ee9d989",
- "metadata": {},
- "source": [
- "The unique feature of this operator is that it provides the return state of the previous iteration as its first argument (i.e., the result from the previous grid point). In other words, all the arguments of the current `return` will be available (as a tuple) in the next iteration from the first argument of the defined function. \n",
- "\n",
- "Example: A FORTRAN pseudocode for integrating a moisture variable (e.g., cloud water or water vapour) over a column could look as follows:\n",
- "\n",
- "\n",
- "```FORTRAN\n",
- "SUBROUTINE column_integral( var_in, rho, dz, var_out, ie, je, ke )\n",
- " ! Return the column integral of a moist species.\n",
- " INTEGER, INTENT (IN) :: &\n",
- " ie, je, ke ! array dimensions of the I/O-fields (horizontal, horizontal, vertical)\n",
- "\n",
- " REAL (KIND=wp), INTENT (OUT) :: &\n",
- " q_colsum (ie,je) ! Vertically-integrated mass of water species\n",
- "\n",
- " REAL (KIND=wp), INTENT (IN) :: &\n",
- " rho (ie,je,ke), & \n",
- " dz (ie,je,ke), & ! height of model half levels\n",
- " var_in (ie,je,ke) ! humidity mass concentration at time-level nnow\n",
- " \n",
- " !$acc parallel present( iq ) if (lzacc)\n",
- " !$acc loop gang\n",
- " DO j=1,je\n",
- " !$acc loop vector\n",
- " DO i=1,ie\n",
- " q_sum(i,j) = 0.0\n",
- " END DO\n",
- " END DO\n",
- " !$acc end parallel\n",
- " \n",
- " \n",
- " !$acc parallel present( iq, rho, hhl, q ) if (lzacc)\n",
- " DO k = 1, ke ! Vertical loop\n",
- " !$acc loop gang\n",
- " DO j=1,je\n",
- " !$acc loop vector\n",
- " DO i=1,ie\n",
- " q_colsum(i,j) = q_colsum(i,j) + var_in(i,j,k) * rho(i,j,k)* dz(i,j,k)\n",
- " END DO\n",
- " END DO\n",
- " END DO\n",
- " !$acc end parallel\n",
- "END SUBROUTINE column_integral\n",
- "```\n",
- "\n",
- "Where:\n",
- "- `var_in` is the 3D variable that will be summed up\n",
- "- `q_colsum` is the resulting 2D variable\n",
- "- `rho` the air density\n",
- "- `dz`the thickness of the vertical layers\n",
- "\n",
- "In the first loop nest, `column_sum` is set to zero for all grid columns. The vertical dependency enters on the RHS of the second loop nest `q_colsum(i,j) = q_colsum(i,j) + ...`\n",
- "\n",
- "Using the `scan_operator` this operation would be written like this:\n",
- "\n",
- "```python\n",
- "@scan_operator(axis=KDim, forward=True, init=0.0)\n",
- "def column_integral(float: state, float: var, float: rho, float: dz)\n",
- " \"\"\"Return the column integral of a moist species.\"\"\"\n",
- " return var * rho * dz + state\n",
- "```\n",
- "\n",
- "Here the vertical dependency is expressed by the first function argument (`state`). This argument carries the return from the previous k-level and does not need to be specified when the function is called (similar to the `self` argument of Python classes). The argument is intialized to `init=0.0` in the function decorator (first loop nest above) and the dimension of the integral is specified with `axis=KDim`.\n",
- "\n",
- "\n",
- "```python\n",
- "q_colsum = column_integral(qv, rho, dz)\n",
- "```"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "c9e31bff",
- "metadata": {},
- "source": [
- "#### Exercise: port a toy cloud microphysics scheme from python/numpy using the template of a `scan_operator` below"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "id": "bd2fd309",
- "metadata": {},
- "outputs": [
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "9ba87bfb",
+ "metadata": {},
+ "source": [
+ "## Scan operator"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2ee9d989",
+ "metadata": {},
+ "source": [
+ "The unique feature of this operator is that it provides the return state of the previous iteration as its first argument (i.e., the result from the previous grid point). In other words, all the arguments of the current `return` will be available (as a tuple) in the next iteration from the first argument of the defined function. \n",
+ "\n",
+ "Example: A FORTRAN pseudocode for integrating a moisture variable (e.g., cloud water or water vapour) over a column could look as follows:\n",
+ "\n",
+ "\n",
+ "```FORTRAN\n",
+ "SUBROUTINE column_integral( var_in, rho, dz, var_out, ie, je, ke )\n",
+ " ! Return the column integral of a moist species.\n",
+ " INTEGER, INTENT (IN) :: &\n",
+ " ie, je, ke ! array dimensions of the I/O-fields (horizontal, horizontal, vertical)\n",
+ "\n",
+ " REAL (KIND=wp), INTENT (OUT) :: &\n",
+ " q_colsum (ie,je) ! Vertically-integrated mass of water species\n",
+ "\n",
+ " REAL (KIND=wp), INTENT (IN) :: &\n",
+ " rho (ie,je,ke), & \n",
+ " dz (ie,je,ke), & ! height of model half levels\n",
+ " var_in (ie,je,ke) ! humidity mass concentration at time-level nnow\n",
+ " \n",
+ " !$acc parallel present( iq ) if (lzacc)\n",
+ " !$acc loop gang\n",
+ " DO j=1,je\n",
+ " !$acc loop vector\n",
+ " DO i=1,ie\n",
+ " q_sum(i,j) = 0.0\n",
+ " END DO\n",
+ " END DO\n",
+ " !$acc end parallel\n",
+ " \n",
+ " \n",
+ " !$acc parallel present( iq, rho, hhl, q ) if (lzacc)\n",
+ " DO k = 1, ke ! Vertical loop\n",
+ " !$acc loop gang\n",
+ " DO j=1,je\n",
+ " !$acc loop vector\n",
+ " DO i=1,ie\n",
+ " q_colsum(i,j) = q_colsum(i,j) + var_in(i,j,k) * rho(i,j,k)* dz(i,j,k)\n",
+ " END DO\n",
+ " END DO\n",
+ " END DO\n",
+ " !$acc end parallel\n",
+ "END SUBROUTINE column_integral\n",
+ "```\n",
+ "\n",
+ "Where:\n",
+ "- `var_in` is the 3D variable that will be summed up\n",
+ "- `q_colsum` is the resulting 2D variable\n",
+ "- `rho` the air density\n",
+ "- `dz`the thickness of the vertical layers\n",
+ "\n",
+ "In the first loop nest, `column_sum` is set to zero for all grid columns. The vertical dependency enters on the RHS of the second loop nest `q_colsum(i,j) = q_colsum(i,j) + ...`\n",
+ "\n",
+ "Using the `scan_operator` this operation would be written like this:\n",
+ "\n",
+ "```python\n",
+ "@scan_operator(axis=KDim, forward=True, init=0.0)\n",
+ "def column_integral(float: state, float: var, float: rho, float: dz)\n",
+ " \"\"\"Return the column integral of a moist species.\"\"\"\n",
+ " return var * rho * dz + state\n",
+ "```\n",
+ "\n",
+ "Here the vertical dependency is expressed by the first function argument (`state`). This argument carries the return from the previous k-level and does not need to be specified when the function is called (similar to the `self` argument of Python classes). The argument is intialized to `init=0.0` in the function decorator (first loop nest above) and the dimension of the integral is specified with `axis=KDim`.\n",
+ "\n",
+ "\n",
+ "```python\n",
+ "q_colsum = column_integral(qv, rho, dz)\n",
+ "```"
+ ]
+ },
{
- "data": {
- "text/html": [
- "\n",
- "\n"
+ "cell_type": "markdown",
+ "id": "c9e31bff",
+ "metadata": {},
+ "source": [
+ "#### Exercise: port a toy cloud microphysics scheme from python/numpy using the template of a `scan_operator` below"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "bd2fd309",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
],
- "text/plain": [
- ""
+ "source": [
+ "from helpers import *\n",
+ "\n",
+ "import gt4py.next as gtx\n",
+ "\n",
+ "backend = None\n",
+ "# backend = gtfn_cpu\n",
+ "# backend = gtfn_gpu"
]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "from helpers import *\n",
- "\n",
- "import gt4py.next as gtx\n",
- "\n",
- "backend = None\n",
- "# backend = gtfn_cpu\n",
- "# backend = gtfn_gpu"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "id": "74338168",
- "metadata": {},
- "outputs": [],
- "source": [
- "def toy_microphysics_numpy(qc, qr, autoconversion_rate=0.1, sedimentaion_constant=0.05):\n",
- " \"\"\"A toy model of a microphysics scheme contaning autoconversion and scavenging\"\"\"\n",
- "\n",
- " sedimentation_flux = 0.0\n",
- "\n",
- " for cell, k in np.ndindex(qc.shape):\n",
- " # Autoconversion: Cloud Drops -> Rain Drops\n",
- " autoconversion_tendency = qc[cell, k] * autoconversion_rate\n",
- "\n",
- " qc[cell, k] -= autoconversion_tendency\n",
- " qr[cell, k] += autoconversion_tendency\n",
- "\n",
- " ## Apply sedimentation flux from level above\n",
- " qr[cell, k] += sedimentation_flux\n",
- "\n",
- " ## Remove mass due to sedimentation flux from the current cell\n",
- " qr[cell, k] -= sedimentation_flux"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "id": "69bf6022",
- "metadata": {},
- "outputs": [],
- "source": [
- "@gtx.scan_operator(axis=K, forward=True, init=(0.0, 0.0, 0.0))\n",
- "def _graupel_toy_scan(\n",
- " state: tuple[float, float, float], qc_in: float, qr_in: float\n",
- ") -> tuple[float, float, float]:\n",
- " autoconversion_rate = 0.1\n",
- " sedimentaion_constant = 0.05\n",
- "\n",
- " # unpack state of previous iteration\n",
- " _, _, sedimentation_flux = state\n",
- "\n",
- " # Autoconversion: Cloud Drops -> Rain Drops\n",
- " autoconv_t = qc_in * autoconversion_rate\n",
- " qc = qc_in - autoconv_t\n",
- " qr = qr_in + autoconv_t\n",
- "\n",
- " ## Add sedimentation flux from level above\n",
- " qr = qr + sedimentation_flux\n",
- "\n",
- " # Remove mass due to sedimentation flux\n",
- " qr = qr - sedimentation_flux\n",
- "\n",
- " return qc, qr, sedimentation_flux"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "id": "0de41cb8",
- "metadata": {},
- "outputs": [],
- "source": [
- "@gtx.field_operator(backend=backend)\n",
- "def graupel_toy_scan(\n",
- " qc: gtx.Field[[C, K], float], qr: gtx.Field[[C, K], float]\n",
- ") -> tuple[\n",
- " gtx.Field[[C, K], float],\n",
- " gtx.Field[[C, K], float]\n",
- "]:\n",
- " qc, qr, _ = _graupel_toy_scan(qc, qr)\n",
- "\n",
- " return qc, qr"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "id": "4e0dc8d2",
- "metadata": {},
- "outputs": [],
- "source": [
- "def test_scan_operator():\n",
- " cell_k_domain = gtx.domain({C: n_cells, K: n_levels})\n",
- " \n",
- " qc = random_field_new(cell_k_domain, allocator=backend)\n",
- " qr = random_field_new(cell_k_domain, allocator=backend)\n",
- "\n",
- " qc_new = gtx.zeros(cell_k_domain, allocator=backend)\n",
- " qr_new = gtx.zeros(cell_k_domain, allocator=backend)\n",
- "\n",
- " # Initialize Numpy fields from GT4Py fields\n",
- " qc_numpy = qc.asnumpy().copy()\n",
- " qr_numpy = qr.asnumpy().copy()\n",
- "\n",
- " # Execute the Numpy version of scheme\n",
- " toy_microphysics_numpy(qc_numpy, qr_numpy)\n",
- "\n",
- " # Execute the GT4Py version of scheme\n",
- " graupel_toy_scan(qc, qr, out=(qc_new, qr_new), offset_provider={})\n",
- "\n",
- " # Compare results\n",
- " assert np.allclose(qc_new.asnumpy(), qc_numpy)\n",
- " assert np.allclose(qr_new.asnumpy(), qr_numpy)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "id": "a76a6be7",
- "metadata": {},
- "outputs": [
+ },
{
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Test successful\n"
- ]
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "74338168",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def toy_microphysics_numpy(qc, qr, autoconversion_rate=0.1, sedimentaion_constant=0.05):\n",
+ " \"\"\"A toy model of a microphysics scheme contaning autoconversion and scavenging\"\"\"\n",
+ "\n",
+ " sedimentation_flux = 0.0\n",
+ "\n",
+ " for cell, k in np.ndindex(qc.shape):\n",
+ " # Autoconversion: Cloud Drops -> Rain Drops\n",
+ " autoconversion_tendency = qc[cell, k] * autoconversion_rate\n",
+ "\n",
+ " qc[cell, k] -= autoconversion_tendency\n",
+ " qr[cell, k] += autoconversion_tendency\n",
+ "\n",
+ " ## Apply sedimentation flux from level above\n",
+ " qr[cell, k] += sedimentation_flux\n",
+ "\n",
+ " ## Remove mass due to sedimentation flux from the current cell\n",
+ " qr[cell, k] -= sedimentation_flux"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "69bf6022",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "@gtx.scan_operator(axis=K, forward=True, init=(0.0, 0.0, 0.0))\n",
+ "def _graupel_toy_scan(\n",
+ " state: tuple[float, float, float], qc_in: float, qr_in: float\n",
+ ") -> tuple[float, float, float]:\n",
+ " autoconversion_rate = 0.1\n",
+ " sedimentaion_constant = 0.05\n",
+ "\n",
+ " # unpack state of previous iteration\n",
+ " _, _, sedimentation_flux = state\n",
+ "\n",
+ " # Autoconversion: Cloud Drops -> Rain Drops\n",
+ " autoconv_t = qc_in * autoconversion_rate\n",
+ " qc = qc_in - autoconv_t\n",
+ " qr = qr_in + autoconv_t\n",
+ "\n",
+ " ## Add sedimentation flux from level above\n",
+ " qr = qr + sedimentation_flux\n",
+ "\n",
+ " # Remove mass due to sedimentation flux\n",
+ " qr = qr - sedimentation_flux\n",
+ "\n",
+ " return qc, qr, sedimentation_flux"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "0de41cb8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "@gtx.field_operator(backend=backend)\n",
+ "def graupel_toy_scan(\n",
+ " qc: gtx.Field[[C, K], float], qr: gtx.Field[[C, K], float]\n",
+ ") -> tuple[\n",
+ " gtx.Field[[C, K], float],\n",
+ " gtx.Field[[C, K], float]\n",
+ "]:\n",
+ " qc, qr, _ = _graupel_toy_scan(qc, qr)\n",
+ "\n",
+ " return qc, qr"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "4e0dc8d2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def test_scan_operator():\n",
+ " cell_k_domain = gtx.domain({C: n_cells, K: n_levels})\n",
+ " \n",
+ " qc = random_field(cell_k_domain, allocator=backend)\n",
+ " qr = random_field(cell_k_domain, allocator=backend)\n",
+ "\n",
+ " qc_new = gtx.zeros(cell_k_domain, allocator=backend)\n",
+ " qr_new = gtx.zeros(cell_k_domain, allocator=backend)\n",
+ "\n",
+ " # Initialize Numpy fields from GT4Py fields\n",
+ " qc_numpy = qc.asnumpy().copy()\n",
+ " qr_numpy = qr.asnumpy().copy()\n",
+ "\n",
+ " # Execute the Numpy version of scheme\n",
+ " toy_microphysics_numpy(qc_numpy, qr_numpy)\n",
+ "\n",
+ " # Execute the GT4Py version of scheme\n",
+ " graupel_toy_scan(qc, qr, out=(qc_new, qr_new), offset_provider={})\n",
+ "\n",
+ " # Compare results\n",
+ " assert np.allclose(qc_new.asnumpy(), qc_numpy)\n",
+ " assert np.allclose(qr_new.asnumpy(), qr_numpy)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "a76a6be7",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Test successful\n"
+ ]
+ }
+ ],
+ "source": [
+ "test_scan_operator()\n",
+ "print(\"Test successful\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "db590d8a",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "formats": "ipynb,md:myst"
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.2"
}
- ],
- "source": [
- "test_scan_operator()\n",
- "print(\"Test successful\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "db590d8a",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "jupytext": {
- "formats": "ipynb,md:myst"
- },
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
},
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.10.2"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
+ "nbformat": 4,
+ "nbformat_minor": 5
}
diff --git a/docs/user/next/exercises/helpers.py b/docs/user/next/exercises/helpers.py
index fbec624d2e..1a448d479e 100644
--- a/docs/user/next/exercises/helpers.py
+++ b/docs/user/next/exercises/helpers.py
@@ -25,11 +25,7 @@ def random_mask(
return gtx.as_field([*dims], (arr))
-def random_field(sizes, *dims, low: float = -1.0, high: float = 1.0) -> MutableLocatedField:
- return gtx.as_field([*dims], np.random.default_rng().uniform(low=low, high=high, size=sizes))
-
-
-def random_field_new(
+def random_field(
domain: gtx.Domain, low: float = -1.0, high: float = 1.0, *, allocator=None
) -> MutableLocatedField:
return gtx.as_field(
@@ -47,14 +43,6 @@ def random_sign(domain: gtx.Domain, allocator=None, dtype=float) -> MutableLocat
)
-def zero_field(sizes, *dims: Dimension, dtype=float) -> MutableLocatedField:
- return gtx.as_field([*dims], np.zeros(shape=sizes, dtype=dtype))
-
-
-def constant_field(value, sizes, *dims, dtype=float) -> MutableLocatedField:
- return gtx.as_field([*dims], value * np.ones(shape=sizes), dtype=dtype)
-
-
def ripple_field(domain: gtx.Domain, *, allocator=None) -> MutableLocatedField:
assert domain.ndim == 2
nx, ny = domain.shape