Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Modifications using numpy operations, CI #9

Merged
merged 51 commits into from
Mar 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
51 commits
Select commit Hold shift + click to select a range
3e6c03e
trying to implement auto testing
Mar 13, 2024
2f7b624
trying to implement auto testing
Mar 13, 2024
f90e745
trying to implement auto testing
Mar 13, 2024
75e92b2
trying to implement auto testing
Mar 13, 2024
08b668a
implement CI
Mar 14, 2024
269dcb5
implement CI
Mar 14, 2024
65bd967
implement CI
Mar 14, 2024
3faa6eb
implement CI
Mar 14, 2024
e1228b6
implement CI
Mar 14, 2024
09d1aa0
CI implement
Mar 14, 2024
ab3aaf0
testing script added
Mar 14, 2024
4f7f694
implement CI fix
Mar 14, 2024
3f556ba
mpi hang not observed if plt.show() not last line
Mar 14, 2024
21d5b36
plt.show() not hanging with multiproc
Mar 14, 2024
8e0e656
possible memory issue needs to be fixed
Mar 15, 2024
51ae577
testing suite comments removed
Mar 15, 2024
dee84e5
possible memory issue
Mar 15, 2024
20bec60
investigatingmemory usage
Mar 15, 2024
9906551
updated testing suite file
Mar 15, 2024
d773a21
unit tests update v_1
Mar 15, 2024
2da1208
updated Dockerfile to test memory usage
Mar 15, 2024
4882861
updated testing file
Mar 16, 2024
6062ee5
CI attempt
Mar 16, 2024
48857d7
removed mem_prof import
Mar 16, 2024
29c38f2
update test file
Mar 16, 2024
94ab33c
update CI from 4 proc to 2 proc
Mar 16, 2024
f80fc13
testing if CI fails
Mar 16, 2024
cfc422a
testing suite working as expected
Mar 16, 2024
086a453
clean up
Mar 16, 2024
6660458
testing CI v_1
Mar 16, 2024
a6743bd
testing CI v_1
Mar 16, 2024
7637683
testing CI v_1
Mar 16, 2024
dd564d7
testing CI - expected to fail
Mar 16, 2024
c5a8b82
checking CI after minor changes to return values from modelVerify
Mar 16, 2024
0c5567e
changes to testing suite
Mar 16, 2024
c9ea067
removed testing folder from example folder
Mar 16, 2024
894a510
final change _v1
Mar 16, 2024
a3ba6ba
merged changes
Mar 18, 2024
00fb8b2
removed files
Mar 18, 2024
8cacc43
minor fixes, MPI hang for multiproc pact with destructor in PDEProblem
Mar 18, 2024
1c390ab
MPI lag fixed
Mar 18, 2024
551bbbb
removing commented code
Mar 18, 2024
776e6e8
prior del method, jacobi pc
Mar 19, 2024
b76c9ce
need to fix Regularization.py
Mar 19, 2024
7a50535
Update Regularization.py
uvilla Mar 19, 2024
5ffc3f8
Update prior.py
uvilla Mar 19, 2024
f83d5d4
Update model.py
uvilla Mar 19, 2024
dc78353
Update prior.py
uvilla Mar 19, 2024
9a0e8c2
Update Regularization.py
uvilla Mar 19, 2024
c56ec63
regularization prior example added
Mar 19, 2024
bf55be1
clean up
Mar 19, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 16 additions & 4 deletions .github/workflows/CI_testing.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,32 @@ on:
push:
branches:
- main
pull_request:

defaults:
run:
shell: bash -l {0}

jobs:
style:
runs-on: ubuntu-22.04
container:
image: dolfinx/dolfinx:stable
options: --user 1001 --privileged
name: CI test

steps:
- name: run code
- name: Checkout code
uses: actions/checkout@v2
- name: run the file
run: cd example
run: mpirun -n 1 python3 -u poisson_example.py

- name: run serial check
run: |
cd ./hippylibX/test &&
mpirun -n 1 python3 testing_suite_file.py

- name: run parallel check
run: |
cd ./hippylibX/test &&
mpirun -n 2 python3 testing_suite_file.py


1 change: 1 addition & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
FROM dolfinx/dolfinx:stable
MAINTAINER Venugopal Ranganathan <[email protected]>
RUN pip install memory_profiler
RUN pip install psutil
46 changes: 22 additions & 24 deletions example/poisson_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,9 @@
import os
import dolfinx.fem.petsc


from matplotlib import pyplot as plt

sys.path.append( os.environ.get('HIPPYLIBX_BASE_DIR', "../") )

import hippylibX as hpx

def master_print(comm, *args, **kwargs):
Expand Down Expand Up @@ -79,22 +77,14 @@ def run_inversion(nx : int, ny : int, noise_variance : float, prior_param : dict

pde.solveFwd(u_true,x_true)

xfun = [dlx.fem.Function(Vhi) for Vhi in Vh]

# LIKELIHOOD
hpx.updateFromVector(xfun[hpx.STATE],u_true)
u_fun_true = xfun[hpx.STATE]

hpx.updateFromVector(xfun[hpx.PARAMETER],m_true)
m_fun_true = xfun[hpx.PARAMETER]

d = dlx.fem.Function(Vh[hpx.STATE])
d.x.array[:] = u_true.array[:]
hpx.parRandom.normal_perturb(np.sqrt(noise_variance),d.x)
d.x.scatter_forward()

misfit_form = PoissonMisfitForm(d,noise_variance)
misfit = hpx.NonGaussianContinuousMisfit(msh, Vh, misfit_form)
misfit = hpx.NonGaussianContinuousMisfit(Vh, misfit_form)

prior_mean = dlx.fem.Function(Vh_m)
prior_mean.x.array[:] = 0.01
Expand All @@ -108,17 +98,10 @@ def run_inversion(nx : int, ny : int, noise_variance : float, prior_param : dict
hpx.parRandom.normal(1.,noise)
prior.sample(noise,m0)

hpx.modelVerify(comm,model,m0,is_quadratic=False,misfit_only=True,verbose=(rank == 0))
hpx.modelVerify(comm,model,m0,is_quadratic=False,misfit_only=False,verbose=(rank == 0))


# eps, err_grad, err_H = hpx.modelVerify(comm,model,m0,is_quadratic=False,misfit_only=True,verbose=(rank == 0))

# if(rank == 0):
# print(err_grad,'\n')
# print(err_H)
# plt.show()
data_misfit_True = hpx.modelVerify(model,m0,is_quadratic=False,misfit_only=True,verbose=(rank == 0))

data_misfit_False = hpx.modelVerify(model,m0,is_quadratic=False,misfit_only=False,verbose=(rank == 0))

# # #######################################

prior_mean_copy = prior.generate_parameter(0)
Expand Down Expand Up @@ -151,18 +134,33 @@ def run_inversion(nx : int, ny : int, noise_variance : float, prior_param : dict
master_print (comm, "Termination reason: ", solver.termination_reasons[solver.reason])
master_print (comm, "Final gradient norm: ", solver.final_grad_norm)
master_print (comm, "Final cost: ", solver.final_cost)

optimizer_results = {}
if(solver.termination_reasons[solver.reason] == 'Norm of the gradient less than tolerance'):
optimizer_results['optimizer'] = True
else:
optimizer_results['optimizer'] = False

#######################################

final_results = {"data_misfit_True":data_misfit_True,
"data_misfit_False":data_misfit_False,
"optimizer_results":optimizer_results}


return final_results


#######################################

if __name__ == "__main__":
nx = 64
ny = 64
noise_variance = 1e-4
prior_param = {"gamma": 0.1, "delta": 1.}
run_inversion(nx, ny, noise_variance, prior_param)



comm = MPI.COMM_WORLD
if(comm.rank == 0):
plt.savefig("poisson_result_FD_Gradient_Hessian_Check")
plt.show()

185 changes: 185 additions & 0 deletions example/poisson_example_reg.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
# poisson example using VariationalRegularization prior instead
# of BiLaplacian Prior

import ufl
import dolfinx as dlx
from mpi4py import MPI
import numpy as np
import petsc4py

import sys
import os
import dolfinx.fem.petsc

from matplotlib import pyplot as plt

sys.path.append( os.environ.get('HIPPYLIBX_BASE_DIR', "../") )
import hippylibX as hpx

def master_print(comm, *args, **kwargs):
if comm.rank == 0:
print(*args, **kwargs)

class Poisson_Approximation:
def __init__(self, alpha : float, f : float):

self.alpha = alpha
self.f = f
self.dx = ufl.Measure("dx",metadata={"quadrature_degree":4})
self.ds = ufl.Measure("ds",metadata={"quadrature_degree":4})

def __call__(self, u: dlx.fem.Function, m : dlx.fem.Function, p : dlx.fem.Function) -> ufl.form.Form:
return ufl.exp(m) * ufl.inner(ufl.grad(u), ufl.grad(p))*self.dx + \
self.alpha * ufl.inner(u,p)*self.ds - self.f*p*self.dx


class PoissonMisfitForm:
def __init__(self, d : float, sigma2 : float):
self.d = d
self.sigma2 = sigma2
self.dx = ufl.Measure("dx",metadata={"quadrature_degree":4})

def __call__(self, u : dlx.fem.Function, m: dlx.fem.Function) -> ufl.form.Form:
return .5/self.sigma2*ufl.inner(u - self.d, u - self.d)*self.dx


# functional handler for prior:
class H1TikhonvFunctional:
def __init__(self, gamma, delta):
self.gamma = gamma #These are dlx Constant, Expression, or Function
self.delta = delta
# self.m0 = m0
self.dx = ufl.Measure("dx",metadata={"quadrature_degree":4})


def __call__(self, m): #Here m is a dlx Function
return ufl.inner(self.gamma * ufl.grad(m), ufl.grad(m) ) *self.dx + \
ufl.inner(self.delta * m, m)*self.dx

def run_inversion(nx : int, ny : int, noise_variance : float, prior_param : dict) -> None:
sep = "\n"+"#"*80+"\n"

comm = MPI.COMM_WORLD
rank = comm.rank
nproc = comm.size

msh = dlx.mesh.create_unit_square(comm, nx, ny)

Vh_phi = dlx.fem.FunctionSpace(msh, ("CG", 2))
Vh_m = dlx.fem.FunctionSpace(msh, ("CG", 1))
Vh = [Vh_phi, Vh_m, Vh_phi]

ndofs = [Vh_phi.dofmap.index_map.size_global * Vh_phi.dofmap.index_map_bs, Vh_m.dofmap.index_map.size_global * Vh_m.dofmap.index_map_bs ]
master_print (comm, sep, "Set up the mesh and finite element spaces", sep)
master_print (comm, "Number of dofs: STATE={0}, PARAMETER={1}".format(*ndofs) )

# FORWARD MODEL
alpha = 100.
f = 1.
pde_handler = Poisson_Approximation(alpha, f)
pde = hpx.PDEVariationalProblem(Vh, pde_handler, [], [], is_fwd_linear=True)

# GROUND TRUTH
m_true = dlx.fem.Function(Vh_m)

m_true.interpolate(lambda x: np.log(2 + 7*( ( (x[0] - 0.5)**2 + (x[1] - 0.5)**2)**0.5 > 0.2)) )
m_true.x.scatter_forward()

m_true = m_true.x

u_true = pde.generate_state()

x_true = [u_true, m_true, None]

pde.solveFwd(u_true,x_true)


# LIKELIHOOD
d = dlx.fem.Function(Vh[hpx.STATE])
d.x.array[:] = u_true.array[:]
hpx.parRandom.normal_perturb(np.sqrt(noise_variance),d.x)
d.x.scatter_forward()

misfit_form = PoissonMisfitForm(d,noise_variance)
misfit = hpx.NonGaussianContinuousMisfit(Vh, misfit_form)

prior_mean = dlx.fem.Function(Vh_m)
prior_mean.x.array[:] = 0.01
prior_mean = prior_mean.x

prior_gamma = prior_param['gamma']
prior_delta = prior_param['delta']

prior_handler = H1TikhonvFunctional(prior_gamma, prior_delta)
prior = hpx.VariationalRegularization(Vh_m, prior_handler)
model = hpx.Model(pde, prior, misfit)

m0 = pde.generate_parameter()
hpx.parRandom.normal(1.,m0)

data_misfit_True = hpx.modelVerify(model,m0,is_quadratic=False,misfit_only=True,verbose=(rank == 0))

data_misfit_False = hpx.modelVerify(model,m0,is_quadratic=False,misfit_only=False,verbose=(rank == 0))

# # # #######################################

prior_mean_copy = pde.generate_parameter()
prior_mean_copy.array[:] = prior_mean.array[:]

x = [model.generate_vector(hpx.STATE), prior_mean_copy, model.generate_vector(hpx.ADJOINT)]

if rank == 0:
print( sep, "Find the MAP point", sep)

parameters = hpx.ReducedSpaceNewtonCG_ParameterList()
parameters["rel_tolerance"] = 1e-6
parameters["abs_tolerance"] = 1e-9
parameters["max_iter"] = 500
parameters["cg_coarse_tolerance"] = 5e-1
parameters["globalization"] = "LS"
parameters["GN_iter"] = 20
if rank != 0:
parameters["print_level"] = -1

solver = hpx.ReducedSpaceNewtonCG(model, parameters)

x = solver.solve(x)

if solver.converged:
master_print(comm, "\nConverged in ", solver.it, " iterations.")
else:
master_print(comm, "\nNot Converged")

master_print (comm, "Termination reason: ", solver.termination_reasons[solver.reason])
master_print (comm, "Final gradient norm: ", solver.final_grad_norm)
master_print (comm, "Final cost: ", solver.final_cost)

optimizer_results = {}
if(solver.termination_reasons[solver.reason] == 'Norm of the gradient less than tolerance'):
optimizer_results['optimizer'] = True
else:
optimizer_results['optimizer'] = False


final_results = {"data_misfit_True":data_misfit_True,
"data_misfit_False":data_misfit_False,
"optimizer_results":optimizer_results}


return final_results


#######################################

if __name__ == "__main__":
nx = 64
ny = 64
noise_variance = 1e-4
prior_param = {"gamma": 0.1, "delta": 1.}
run_inversion(nx, ny, noise_variance, prior_param)

comm = MPI.COMM_WORLD
if(comm.rank == 0):
plt.savefig("poisson_result_FD_Gradient_Hessian_Check")
plt.show()

Loading
Loading