-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathLaplacianPAR.edp
94 lines (74 loc) · 2.43 KB
/
LaplacianPAR.edp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
/* Example of problem solving in parallel */
// Usage:
// ff-mpirun -np 12 LaplacianParallel.edp (here 12 is the number of threads (command nproc to know that))
// Need FreeFem++ with PETSc
// Parallel stuff
load "PETSc"
macro partitioner()metis//
macro dimension()2//
include "./macro_ddm.idp"
macro def(i)[i]//
macro init(i)[i]//
//macro meshN()mesh// //these macro are defined in macro_ddm.idp
//macro intN()int2d//
// Parameters
int nn = 500;
real L = 1.;
real H = 1.;
func f = 1.;
func Pk = P1;
// Mesh
border b1(t=0, L){x=t; y=0; label=1;}
border b2(t=0, H){x=L; y=t; label=2;}
border b3(t=L, 0){x=t; y=H; label=3;}
border b4(t=H, 0){x=0; y=t; label=4;}
meshN Th = buildmesh(b1(1) + b2(1) + b3(1) + b4(1)); //build a really coarse mesh (just to build the fespace later)
//meshN Th = square(1, 1, [L*x, H*y]);
int[int] Wall = [1, 2, 3, 4];
// Fespace
fespace Uh(Th, Pk);
// Mesh partition
int[int] ArrayIntersection;
int[int][int] RestrictionIntersection(0);
real[int] D;
meshN ThBorder;
meshN ThGlobal = buildmesh(b1(nn*L) + b2(nn*H) + b3(nn*L) + b4(nn*H)); //build the mesh to partition
//meshN ThGlobal = square(nn*L, nn*H, [L*x, H*y]);
int InterfaceLabel = 10;
int Split = 1;
int Overlap = 1;
build(Th, ThBorder, ThGlobal, InterfaceLabel, Split, Overlap, D, ArrayIntersection, RestrictionIntersection, Uh, Pk, mpiCommWorld, false); //see macro_ddm.idp for detailed parameters
// Macro
macro grad(u) [dx(u), dy(u)] //
// Problem
varf vLaplacian (u, uh) //Problem in varf formulation mandatory
= intN(Th)(
grad(u)' * grad(uh)
)
- intN(Th)(
f * uh
)
+ on(Wall, u=0)
;
matrix<real> Laplacian = vLaplacian(Uh, Uh); //build the sequential matrix
real[int] LaplacianBoundary = vLaplacian(0, Uh);// and right hand side
//// In sequential, you normally do that:
//// Solve
//Uh def(u)=init(0);
//u[] = Laplacian^-1 * LaplacianBoundary;
//// Plot
//plot(u);
// In parallel:
// Matrix construction
dmatrix PLaplacian(Laplacian, ArrayIntersection, RestrictionIntersection, D, bs=1); //build the parallel matrix
set(PLaplacian, sparams="-pc_type lu -pc_factor_mat_solver_package mumps"); //preconditioner LU and MUMPS solver (see PETSc doc for detailed parameters)
// Solve
Uh def(u)=init(0); //define the unknown (must be defined after mesh partitioning)
u[] = PLaplacian^-1 * LaplacianBoundary;
// Export results to vtk (there is not plot in parallel)
{
fespace PV(Th, P1);
PV uu=u;
int[int] Order = [1];
export("Result", Th, uu, Order, mpiCommWorld);
}