Actual source code: ex13.c
2: static char help[] = "Solves a variable Poisson problem with KSP.\n\n";
4: /*T
5: Concepts: KSP^basic sequential example
6: Concepts: KSP^Laplacian, 2d
7: Concepts: Laplacian, 2d
8: Processors: 1
9: T*/
11: /*
12: Include "petscksp.h" so that we can use KSP solvers. Note that this file
13: automatically includes:
14: petscsys.h - base PETSc routines petscvec.h - vectors
15: petscmat.h - matrices
16: petscis.h - index sets petscksp.h - Krylov subspace methods
17: petscviewer.h - viewers petscpc.h - preconditioners
18: */
19: #include <petscksp.h>
21: /*
22: User-defined context that contains all the data structures used
23: in the linear solution process.
24: */
25: typedef struct {
26: Vec x,b; /* solution vector, right-hand-side vector */
27: Mat A; /* sparse matrix */
28: KSP ksp; /* linear solver context */
29: PetscInt m,n; /* grid dimensions */
30: PetscScalar hx2,hy2; /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */
31: } UserCtx;
33: extern PetscErrorCode UserInitializeLinearSolver(PetscInt,PetscInt,UserCtx*);
34: extern PetscErrorCode UserFinalizeLinearSolver(UserCtx*);
35: extern PetscErrorCode UserDoLinearSolver(PetscScalar*,UserCtx *userctx,PetscScalar *b,PetscScalar *x);
37: int main(int argc,char **args)
38: {
39: UserCtx userctx;
40: PetscInt m = 6,n = 7,t,tmax = 2,i,Ii,j,N;
41: PetscScalar *userx,*rho,*solution,*userb,hx,hy,x,y;
42: PetscReal enorm;
44: /*
45: Initialize the PETSc libraries
46: */
47: PetscInitialize(&argc,&args,(char*)0,help);
48: /*
49: The next two lines are for testing only; these allow the user to
50: decide the grid size at runtime.
51: */
52: PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);
53: PetscOptionsGetInt(NULL,NULL,"-n",&n,NULL);
55: /*
56: Create the empty sparse matrix and linear solver data structures
57: */
58: UserInitializeLinearSolver(m,n,&userctx);
59: N = m*n;
61: /*
62: Allocate arrays to hold the solution to the linear system.
63: This is not normally done in PETSc programs, but in this case,
64: since we are calling these routines from a non-PETSc program, we
65: would like to reuse the data structures from another code. So in
66: the context of a larger application these would be provided by
67: other (non-PETSc) parts of the application code.
68: */
69: PetscMalloc1(N,&userx);
70: PetscMalloc1(N,&userb);
71: PetscMalloc1(N,&solution);
73: /*
74: Allocate an array to hold the coefficients in the elliptic operator
75: */
76: PetscMalloc1(N,&rho);
78: /*
79: Fill up the array rho[] with the function rho(x,y) = x; fill the
80: right-hand-side b[] and the solution with a known problem for testing.
81: */
82: hx = 1.0/(m+1);
83: hy = 1.0/(n+1);
84: y = hy;
85: Ii = 0;
86: for (j=0; j<n; j++) {
87: x = hx;
88: for (i=0; i<m; i++) {
89: rho[Ii] = x;
90: solution[Ii] = PetscSinScalar(2.*PETSC_PI*x)*PetscSinScalar(2.*PETSC_PI*y);
91: userb[Ii] = -2*PETSC_PI*PetscCosScalar(2*PETSC_PI *x)*PetscSinScalar(2*PETSC_PI*y) +
92: 8*PETSC_PI*PETSC_PI*x*PetscSinScalar(2*PETSC_PI *x)*PetscSinScalar(2*PETSC_PI*y);
93: x += hx;
94: Ii++;
95: }
96: y += hy;
97: }
99: /*
100: Loop over a bunch of timesteps, setting up and solver the linear
101: system for each time-step.
103: Note this is somewhat artificial. It is intended to demonstrate how
104: one may reuse the linear solver stuff in each time-step.
105: */
106: for (t=0; t<tmax; t++) {
107: UserDoLinearSolver(rho,&userctx,userb,userx);
109: /*
110: Compute error: Note that this could (and usually should) all be done
111: using the PETSc vector operations. Here we demonstrate using more
112: standard programming practices to show how they may be mixed with
113: PETSc.
114: */
115: enorm = 0.0;
116: for (i=0; i<N; i++) enorm += PetscRealPart(PetscConj(solution[i]-userx[i])*(solution[i]-userx[i]));
117: enorm *= PetscRealPart(hx*hy);
118: PetscPrintf(PETSC_COMM_WORLD,"m %D n %D error norm %g\n",m,n,(double)enorm);
119: }
121: /*
122: We are all finished solving linear systems, so we clean up the
123: data structures.
124: */
125: PetscFree(rho);
126: PetscFree(solution);
127: PetscFree(userx);
128: PetscFree(userb);
129: UserFinalizeLinearSolver(&userctx);
130: PetscFinalize();
131: return 0;
132: }
134: /* ------------------------------------------------------------------------*/
135: PetscErrorCode UserInitializeLinearSolver(PetscInt m,PetscInt n,UserCtx *userctx)
136: {
137: PetscInt N;
139: /*
140: Here we assume use of a grid of size m x n, with all points on the
141: interior of the domain, i.e., we do not include the points corresponding
142: to homogeneous Dirichlet boundary conditions. We assume that the domain
143: is [0,1]x[0,1].
144: */
145: userctx->m = m;
146: userctx->n = n;
147: userctx->hx2 = (m+1)*(m+1);
148: userctx->hy2 = (n+1)*(n+1);
149: N = m*n;
151: /*
152: Create the sparse matrix. Preallocate 5 nonzeros per row.
153: */
154: MatCreateSeqAIJ(PETSC_COMM_SELF,N,N,5,0,&userctx->A);
156: /*
157: Create vectors. Here we create vectors with no memory allocated.
158: This way, we can use the data structures already in the program
159: by using VecPlaceArray() subroutine at a later stage.
160: */
161: VecCreateSeqWithArray(PETSC_COMM_SELF,1,N,NULL,&userctx->b);
162: VecDuplicate(userctx->b,&userctx->x);
164: /*
165: Create linear solver context. This will be used repeatedly for all
166: the linear solves needed.
167: */
168: KSPCreate(PETSC_COMM_SELF,&userctx->ksp);
170: return 0;
171: }
173: /*
174: Solves -div (rho grad psi) = F using finite differences.
175: rho is a 2-dimensional array of size m by n, stored in Fortran
176: style by columns. userb is a standard one-dimensional array.
177: */
178: /* ------------------------------------------------------------------------*/
179: PetscErrorCode UserDoLinearSolver(PetscScalar *rho,UserCtx *userctx,PetscScalar *userb,PetscScalar *userx)
180: {
181: PetscInt i,j,Ii,J,m = userctx->m,n = userctx->n;
182: Mat A = userctx->A;
183: PC pc;
184: PetscScalar v,hx2 = userctx->hx2,hy2 = userctx->hy2;
186: /*
187: This is not the most efficient way of generating the matrix
188: but let's not worry about it. We should have separate code for
189: the four corners, each edge and then the interior. Then we won't
190: have the slow if-tests inside the loop.
192: Computes the operator
193: -div rho grad
194: on an m by n grid with zero Dirichlet boundary conditions. The rho
195: is assumed to be given on the same grid as the finite difference
196: stencil is applied. For a staggered grid, one would have to change
197: things slightly.
198: */
199: Ii = 0;
200: for (j=0; j<n; j++) {
201: for (i=0; i<m; i++) {
202: if (j>0) {
203: J = Ii - m;
204: v = -.5*(rho[Ii] + rho[J])*hy2;
205: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
206: }
207: if (j<n-1) {
208: J = Ii + m;
209: v = -.5*(rho[Ii] + rho[J])*hy2;
210: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
211: }
212: if (i>0) {
213: J = Ii - 1;
214: v = -.5*(rho[Ii] + rho[J])*hx2;
215: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
216: }
217: if (i<m-1) {
218: J = Ii + 1;
219: v = -.5*(rho[Ii] + rho[J])*hx2;
220: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
221: }
222: v = 2.0*rho[Ii]*(hx2+hy2);
223: MatSetValues(A,1,&Ii,1,&Ii,&v,INSERT_VALUES);
224: Ii++;
225: }
226: }
228: /*
229: Assemble matrix
230: */
231: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
232: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
234: /*
235: Set operators. Here the matrix that defines the linear system
236: also serves as the preconditioning matrix. Since all the matrices
237: will have the same nonzero pattern here, we indicate this so the
238: linear solvers can take advantage of this.
239: */
240: KSPSetOperators(userctx->ksp,A,A);
242: /*
243: Set linear solver defaults for this problem (optional).
244: - Here we set it to use direct LU factorization for the solution
245: */
246: KSPGetPC(userctx->ksp,&pc);
247: PCSetType(pc,PCLU);
249: /*
250: Set runtime options, e.g.,
251: -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
252: These options will override those specified above as long as
253: KSPSetFromOptions() is called _after_ any other customization
254: routines.
256: Run the program with the option -help to see all the possible
257: linear solver options.
258: */
259: KSPSetFromOptions(userctx->ksp);
261: /*
262: This allows the PETSc linear solvers to compute the solution
263: directly in the user's array rather than in the PETSc vector.
265: This is essentially a hack and not highly recommend unless you
266: are quite comfortable with using PETSc. In general, users should
267: write their entire application using PETSc vectors rather than
268: arrays.
269: */
270: VecPlaceArray(userctx->x,userx);
271: VecPlaceArray(userctx->b,userb);
273: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
274: Solve the linear system
275: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
277: KSPSolve(userctx->ksp,userctx->b,userctx->x);
279: /*
280: Put back the PETSc array that belongs in the vector xuserctx->x
281: */
282: VecResetArray(userctx->x);
283: VecResetArray(userctx->b);
285: return 0;
286: }
288: /* ------------------------------------------------------------------------*/
289: PetscErrorCode UserFinalizeLinearSolver(UserCtx *userctx)
290: {
291: /*
292: We are all done and don't need to solve any more linear systems, so
293: we free the work space. All PETSc objects should be destroyed when
294: they are no longer needed.
295: */
296: KSPDestroy(&userctx->ksp);
297: VecDestroy(&userctx->x);
298: VecDestroy(&userctx->b);
299: MatDestroy(&userctx->A);
300: return 0;
301: }
303: /*TEST
305: test:
306: args: -m 19 -n 20 -ksp_gmres_cgs_refinement_type refine_always
308: TEST*/