Actual source code: ex38.c
1: /*
3: mpiexec -n 8 ./ex38 -ksp_type fbcgs -ksp_rtol 1.e-6 -sub_ksp_type bcgs -sub_ksp_rtol 1.e-3 -pc_type bjacobi -ksp_converged_reason -ksp_monitor -n1 64 -n2 64
5: Contributed by Jie Chen for testing flexible BiCGStab algorithm
6: */
8: static char help[] = "Solves the PDE (in 2D) -laplacian(u) + gamma x dot grad(u) + beta u = 1\n\
9: with zero Dirichlet condition. The discretization is standard centered\n\
10: difference. Input parameters include:\n\
11: -n1 : number of mesh points in 1st dimension (default 64)\n\
12: -n2 : number of mesh points in 2nd dimension (default 64)\n\
13: -h : spacing between mesh points (default 1/n1)\n\
14: -gamma : gamma (default 4/h)\n\
15: -beta : beta (default 0.01/h^2)\n\n";
17: /*T
18: Concepts: KSP^basic parallel example;
19: Concepts: KSP^Laplacian, 2d
20: Concepts: Laplacian, 2d
21: Processors: n
22: T*/
24: /*
25: Include "petscksp.h" so that we can use KSP solvers. Note that this file
26: automatically includes:
27: petscsys.h - base PETSc routines petscvec.h - vectors
28: petscmat.h - matrices
29: petscis.h - index sets petscksp.h - Krylov subspace methods
30: petscviewer.h - viewers petscpc.h - preconditioners
31: */
32: #include <petscksp.h>
34: int main(int argc,char **args)
35: {
36: Vec x,b,u; /* approx solution, RHS, working vector */
37: Mat A; /* linear system matrix */
38: KSP ksp; /* linear solver context */
39: PetscInt n1, n2; /* parameters */
40: PetscReal h, gamma, beta; /* parameters */
41: PetscInt i,j,Ii,J,Istart,Iend;
42: PetscScalar v, co1, co2;
43: #if defined(PETSC_USE_LOG)
44: PetscLogStage stage;
45: #endif
47: PetscInitialize(&argc,&args,(char*)0,help);
48: n1 = 64;
49: n2 = 64;
50: PetscOptionsGetInt(NULL,NULL,"-n1",&n1,NULL);
51: PetscOptionsGetInt(NULL,NULL,"-n2",&n2,NULL);
53: h = 1.0/n1;
54: gamma = 4.0;
55: beta = 0.01;
56: PetscOptionsGetReal(NULL,NULL,"-h",&h,NULL);
57: PetscOptionsGetReal(NULL,NULL,"-gamma",&gamma,NULL);
58: PetscOptionsGetReal(NULL,NULL,"-beta",&beta,NULL);
59: gamma = gamma/h;
60: beta = beta/(h*h);
62: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
63: Compute the matrix and set right-hand-side vector.
64: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
65: /*
66: Create parallel matrix, specifying only its global dimensions.
67: When using MatCreate(), the matrix format can be specified at
68: runtime. Also, the parallel partitioning of the matrix is
69: determined by PETSc at runtime.
71: Performance tuning note: For problems of substantial size,
72: preallocation of matrix memory is crucial for attaining good
73: performance. See the matrix chapter of the users manual for details.
74: */
75: MatCreate(PETSC_COMM_WORLD,&A);
76: MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,n1*n2,n1*n2);
77: MatSetFromOptions(A);
78: MatMPIAIJSetPreallocation(A,5,NULL,5,NULL);
79: MatSeqAIJSetPreallocation(A,5,NULL);
80: MatSetUp(A);
82: /*
83: Currently, all PETSc parallel matrix formats are partitioned by
84: contiguous chunks of rows across the processors. Determine which
85: rows of the matrix are locally owned.
86: */
87: MatGetOwnershipRange(A,&Istart,&Iend);
89: /*
90: Set matrix elements for the 2-D, five-point stencil in parallel.
91: - Each processor needs to insert only elements that it owns
92: locally (but any non-local elements will be sent to the
93: appropriate processor during matrix assembly).
94: - Always specify global rows and columns of matrix entries.
95: */
96: PetscLogStageRegister("Assembly", &stage);
97: PetscLogStagePush(stage);
98: co1 = gamma * h * h / 2.0;
99: co2 = beta * h * h;
100: for (Ii=Istart; Ii<Iend; Ii++) {
101: i = Ii/n2; j = Ii - i*n2;
102: if (i>0) {
103: J = Ii - n2; v = -1.0 + co1*(PetscScalar)i;
104: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
105: }
106: if (i<n1-1) {
107: J = Ii + n2; v = -1.0 + co1*(PetscScalar)i;
108: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
109: }
110: if (j>0) {
111: J = Ii - 1; v = -1.0 + co1*(PetscScalar)j;
112: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
113: }
114: if (j<n2-1) {
115: J = Ii + 1; v = -1.0 + co1*(PetscScalar)j;
116: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
117: }
118: v = 4.0 + co2;
119: MatSetValues(A,1,&Ii,1,&Ii,&v,INSERT_VALUES);
120: }
122: /*
123: Assemble matrix, using the 2-step process:
124: MatAssemblyBegin(), MatAssemblyEnd()
125: Computations can be done while messages are in transition
126: by placing code between these two statements.
127: */
128: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
129: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
130: PetscLogStagePop();
132: /*
133: Create parallel vectors.
134: - We form 1 vector from scratch and then duplicate as needed.
135: - When using VecCreate(), VecSetSizes and VecSetFromOptions()
136: in this example, we specify only the
137: vector's global dimension; the parallel partitioning is determined
138: at runtime.
139: - When solving a linear system, the vectors and matrices MUST
140: be partitioned accordingly. PETSc automatically generates
141: appropriately partitioned matrices and vectors when MatCreate()
142: and VecCreate() are used with the same communicator.
143: - The user can alternatively specify the local vector and matrix
144: dimensions when more sophisticated partitioning is needed
145: (replacing the PETSC_DECIDE argument in the VecSetSizes() statement
146: below).
147: */
148: VecCreate(PETSC_COMM_WORLD,&b);
149: VecSetSizes(b,PETSC_DECIDE,n1*n2);
150: VecSetFromOptions(b);
151: VecDuplicate(b,&x);
152: VecDuplicate(b,&u);
154: /*
155: Set right-hand side.
156: */
157: VecSet(b,1.0);
159: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
160: Create the linear solver and set various options
161: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
162: /*
163: Create linear solver context
164: */
165: KSPCreate(PETSC_COMM_WORLD,&ksp);
167: /*
168: Set operators. Here the matrix that defines the linear system
169: also serves as the preconditioning matrix.
170: */
171: KSPSetOperators(ksp,A,A);
173: /*
174: Set linear solver defaults for this problem (optional).
175: - By extracting the KSP and PC contexts from the KSP context,
176: we can then directly call any KSP and PC routines to set
177: various options.
178: */
179: KSPSetTolerances(ksp,1.e-6,PETSC_DEFAULT,PETSC_DEFAULT,200);
181: /*
182: Set runtime options, e.g.,
183: -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
184: These options will override those specified above as long as
185: KSPSetFromOptions() is called _after_ any other customization
186: routines.
187: */
188: KSPSetFromOptions(ksp);
190: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
191: Solve the linear system
192: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
194: KSPSolve(ksp,b,x);
196: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
197: Clean up
198: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
199: /*
200: Free work space. All PETSc objects should be destroyed when they
201: are no longer needed.
202: */
203: KSPDestroy(&ksp);
204: VecDestroy(&u)); PetscCall(VecDestroy(&x);
205: VecDestroy(&b)); PetscCall(MatDestroy(&A);
207: /*
208: Always call PetscFinalize() before exiting a program. This routine
209: - finalizes the PETSc libraries as well as MPI
210: - provides summary and diagnostic information if certain runtime
211: options are chosen (e.g., -log_view).
212: */
213: PetscFinalize();
214: return 0;
215: }
217: /*TEST
219: test:
220: nsize: 8
221: args: -ksp_type fbcgs -ksp_rtol 1.e-6 -sub_ksp_type bcgs -sub_ksp_rtol 1.e-3 -pc_type bjacobi -ksp_converged_reason -n1 64 -n2 64
223: test:
224: suffix: 2
225: nsize: 8
226: args: -ksp_type qmrcgs -ksp_rtol 1.e-6 -sub_ksp_type bcgs -sub_ksp_rtol 1.e-3 -pc_type bjacobi -ksp_converged_reason -n1 64 -n2 64
227: output_file: output/ex38_1.out
229: TEST*/