-
Notifications
You must be signed in to change notification settings - Fork 17
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Tensor product differentiation operators #313
Changes from 6 commits
b1be58a
8fa3321
3778edf
45e859e
22ffacd
d0bd17e
ef667bb
eed2516
1e40a11
f2b0275
036681c
7ad9017
c645dbe
dcd7ca0
683cdd8
1563950
e1380fe
33a54e4
e446bb7
264192c
ba03b3f
8263333
6b50028
5d36bfb
92991a3
49116ab
d87c19f
aec8bdb
c14f08d
b8e24c4
645a504
9b70d0b
ed8aacf
b1c312d
0cb1830
984a984
77d6470
c3f7543
06f5738
e31a8c4
e293bd0
0792c0b
58c39bd
b8b239c
be8a26b
3f8a886
e136538
82b2c09
2bb9228
7e389a7
9951d65
f722b27
4b1e0b6
47d6674
201c018
b2e909e
220c483
fdcf4b8
d9dbffe
d611023
f9a5330
f216355
90d7092
8ebccc7
29be28d
607d368
f03230d
cadb98a
a662445
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -166,6 +166,38 @@ | |
) | ||
|
||
|
||
# {{{ Temporary tools for tensor product operators | ||
# NOTE: Will possibly be removed in a future version of tensor product operator | ||
# development since (I think) it is not entirely necessary | ||
from pytools.tag import Tag | ||
class OutputIsTensorProductDOFArrayOrdered(Tag): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The "I'm a tensor product" tags should probably have axis granularity (instead of whole-array). |
||
pass | ||
|
||
|
||
from grudge.array_context import PyOpenCLArrayContext | ||
class TensorProductArrayContext(PyOpenCLArrayContext): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Make this part of the "normal" |
||
def transform_loopy_program(self, t_unit): | ||
if len(t_unit.callables_table) == 1: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could do this for all? |
||
knl = t_unit.default_entrypoint | ||
if knl.tags_of_type(OutputIsTensorProductDOFArrayOrdered): | ||
new_args = [] | ||
for arg in knl.args: | ||
if arg.is_output: | ||
arg = arg.copy(dim_tags=( | ||
f"N{len(arg.shape)-1}," | ||
+ ",".join(f"N{i}" | ||
for i in range(len(arg.shape)-1)) | ||
)) | ||
|
||
new_args.append(arg) | ||
|
||
knl = knl.copy(args=new_args) | ||
t_unit = t_unit.with_kernel(knl) | ||
|
||
return super().transform_loopy_program(t_unit) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is also going to have a lazy tentacle. |
||
# }}} | ||
|
||
|
||
# {{{ common derivative "kernels" | ||
|
||
def _single_axis_derivative_kernel( | ||
|
@@ -202,19 +234,97 @@ def _gradient_kernel(actx, out_discr, in_discr, get_diff_mat, inv_jac_mat, vec, | |
*, metric_in_matvec): | ||
# See _single_axis_derivative_kernel for comments on the usage scenarios | ||
# (both strong and weak derivative) and their differences. | ||
|
||
def compute_tensor_product_grad(actx, grp, diff_mat, vec, ijm): | ||
"""Exploits tensor product structure to differentiate each coordinate | ||
axis using a single differentiation matrix of shape (nnodes1d, nnodes1d) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Specify what this does with math. :) |
||
""" | ||
|
||
actx_tp = TensorProductArrayContext( | ||
actx.queue, | ||
allocator=actx.allocator, | ||
force_device_scalars=actx._force_device_scalars) | ||
|
||
from modepy.tools import ( | ||
reshape_array_for_tensor_product_space, | ||
unreshape_array_for_tensor_product_space) | ||
|
||
# reshape u to expose tensor product structure | ||
vec = reshape_array_for_tensor_product_space(grp.space, vec) | ||
|
||
# apply differentiation matrix to vec | ||
# check len(vec.shape) since shape is expected to be | ||
# (nelements, nnodes1d, nnodes1d) | ||
if len(vec.shape) == 3: | ||
specs = ["il,elj->eij", | ||
"jl,eil->eij"] | ||
elif len(vec.shape) == 4: | ||
specs = ["il,eljk->eijk", | ||
"jl,eilk->eijk", | ||
"kl,eijl->eijk"] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you make this dimension-independent? |
||
else: | ||
specs = None | ||
assert specs is not None | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Use |
||
|
||
diff_mat = get_diff_mat(actx, grp, grp) | ||
grad = make_obj_array([ | ||
actx_tp.einsum( | ||
spec, | ||
diff_mat, | ||
vec, | ||
arg_names=("diff_mat", "vec"), | ||
tagged=(FirstAxisIsElementsTag(), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
OutputIsTensorProductDOFArrayOrdered())) | ||
for spec in specs | ||
]) | ||
|
||
# unreshape grad to apply geometric factors | ||
# NOTE: In a future version, do not reshape before application of | ||
# geometric factors. Can possibly "chain" the einsum. For example, the | ||
# simplicial case below has einsum with spec | ||
# ("xrei,rij,ei->ei") | ||
# for the strong local gradient case | ||
grad = make_obj_array([ | ||
unreshape_array_for_tensor_product_space(grp.space, grad[i]) | ||
for i in range(grad.shape[0]) | ||
]) | ||
|
||
# apply geometric factors to current grad | ||
# FIXME: using einsum spec ("xrei,xei->xei") throws error: | ||
# "Loopy does not directly support object arrays" | ||
grad = make_obj_array([ | ||
actx_tp.einsum( | ||
"rei,ei->ei", | ||
ijm[i], | ||
grad[i], | ||
tagged=(FirstAxisIsElementsTag(),)), | ||
arg_names=("inv_jac_t", "vec") | ||
for i in range(grad.shape[0]) | ||
]) | ||
|
||
return grad | ||
|
||
from meshmode.discretization.poly_element import \ | ||
TensorProductElementGroupBase | ||
per_group_grads = [ | ||
|
||
compute_tensor_product_grad(actx, in_grp, get_diff_mat, vec_i, ijm_i) | ||
if isinstance(in_grp, TensorProductElementGroupBase) | ||
|
||
# r for rst axis | ||
# x for xyz axis | ||
actx.einsum("xrej,rij,ej->xei" if metric_in_matvec else "xrei,rij,ej->xei", | ||
ijm_i, | ||
get_diff_mat( | ||
actx, | ||
out_element_group=out_grp, | ||
in_element_group=in_grp | ||
), | ||
vec_i, | ||
arg_names=("inv_jac_t", "ref_stiffT_mat", "vec"), | ||
tagged=(FirstAxisIsElementsTag(),)) | ||
else actx.einsum( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Factor this into a function also. |
||
"xrej,rij,ej->xei" if metric_in_matvec else "xrei,rij,ej->xei", | ||
ijm_i, | ||
get_diff_mat( | ||
actx, | ||
out_element_group=out_grp, | ||
in_element_group=in_grp | ||
), | ||
vec_i, | ||
arg_names=("inv_jac_t", "ref_stiffT_mat", "vec"), | ||
tagged=(FirstAxisIsElementsTag(),)) | ||
|
||
for out_grp, in_grp, vec_i, ijm_i in zip( | ||
out_discr.groups, in_discr.groups, vec, | ||
inv_jac_mat)] | ||
|
@@ -259,16 +369,37 @@ def _reference_derivative_matrices(actx: ArrayContext, | |
# _reference_stiffness_transpose_matrices. | ||
assert out_element_group is in_element_group | ||
|
||
from meshmode.mesh import TensorProductElementGroup | ||
|
||
@keyed_memoize_in( | ||
actx, _reference_derivative_matrices, | ||
lambda grp: grp.discretization_key()) | ||
def get_ref_derivative_mats(grp): | ||
from meshmode.discretization.poly_element import diff_matrices | ||
return actx.freeze( | ||
actx.tag_axis( | ||
1, DiscretizationDOFAxisTag(), | ||
actx.from_numpy( | ||
np.asarray(diff_matrices(grp))))) | ||
|
||
from meshmode.discretization.poly_element import \ | ||
TensorProductElementGroupBase | ||
if isinstance(grp, TensorProductElementGroupBase): | ||
import modepy as mp | ||
import numpy.linalg as la | ||
|
||
space1d = grp.space.bases[0] | ||
shape1d = grp.shape.bases[0] | ||
|
||
nodes1d = mp.edge_clustered_nodes_for_space(space1d, shape1d) | ||
basis1d = mp.basis_for_space(space1d, shape1d) | ||
|
||
vdm1d = mp.vandermonde(basis1d.functions, nodes1d) | ||
vdm_p1d = mp.vandermonde(basis1d.gradients, nodes1d)[0] | ||
|
||
return actx.freeze(actx.from_numpy(vdm_p1d @ la.inv(vdm1d))) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. possibly fixed in meshmode PR 384 |
||
|
||
else: | ||
from meshmode.discretization.poly_element import diff_matrices | ||
return actx.freeze( | ||
actx.tag_axis( | ||
1, DiscretizationDOFAxisTag(), | ||
actx.from_numpy( | ||
np.asarray(diff_matrices(grp))))) | ||
return get_ref_derivative_mats(out_element_group) | ||
|
||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Move this to somewhere near the array context.