Recent works have shown that deep neural networks can be employed to solve partial differential equations, giving rise to the framework of physics informed neural networks. We introduce a generalization for these methods that manifests as a scaling parameter which balances the relative importance of the different constraints imposed by partial differential equations. A mathematical motivation of these generalized methods is provided, which shows that for linear and well-posed partial differential equations, the functional form is convex. We then derive a choice for the scaling parameter that is optimal with respect to a measure of relative error. Because this optimal choice relies on having full knowledge of analytical solutions, we also propose a heuristic method to approximate this optimal choice. The proposed methods are compared numerically to the original methods on a variety of model partial differential equations, with the number of data points being updated adaptively. For several problems, including high-dimensional PDEs the proposed methods are shown to significantly enhance accuracy.
@ARTICLE{2020arXiv200206269V,
author = {{van der Meer}, Remco and {Oosterlee}, Cornelis and {Borovykh}, Anastasia},
title = "{Optimally weighted loss functions for solving PDEs with Neural Networks}",
journal = {arXiv e-prints},
keywords = {Mathematics - Numerical Analysis},
year = 2020,
month = feb,
eid = {arXiv:2002.06269},
pages = {arXiv:2002.06269},
archivePrefix = {arXiv},
eprint = {2002.06269},
primaryClass = {math.NA},
adsurl = {https://ui.adsabs.harvard.edu/abs/2020arXiv200206269V},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}