Actual source code: rosenbrock2.c
1: /* Program usage: mpiexec -n 1 rosenbrock2 [-help] [all TAO options] */
3: /* Include "petsctao.h" so we can use TAO solvers. */
4: #include <petsctao.h>
6: static char help[] = "This example demonstrates use of the TAO package to \n\
7: solve an unconstrained minimization problem on a single processor. We \n\
8: minimize the extended Rosenbrock function: \n\
9: sum_{i=0}^{n/2-1} (alpha*(x_{2i+1}-x_{2i}^2)^2 + (1-x_{2i})^2) \n\
10: or the chained Rosenbrock function:\n\
11: sum_{i=0}^{n-1} alpha*(x_{i+1} - x_i^2)^2 + (1 - x_i)^2\n";
13: /*T
14: Concepts: TAO^Solving an unconstrained minimization problem
15: Routines: TaoCreate();
16: Routines: TaoSetType(); TaoSetObjectiveAndGradient();
17: Routines: TaoSetHessian();
18: Routines: TaoSetSolution();
19: Routines: TaoSetFromOptions();
20: Routines: TaoSolve();
21: Routines: TaoDestroy();
22: Processors: 1
23: T*/
25: /*
26: User-defined application context - contains data needed by the
27: application-provided call-back routines that evaluate the function,
28: gradient, and hessian.
29: */
30: typedef struct {
31: PetscInt n; /* dimension */
32: PetscReal alpha; /* condition parameter */
33: PetscBool chained;
34: } AppCtx;
36: /* -------------- User-defined routines ---------- */
37: PetscErrorCode FormFunctionGradient(Tao,Vec,PetscReal*,Vec,void*);
38: PetscErrorCode FormHessian(Tao,Vec,Mat,Mat,void*);
40: int main(int argc,char **argv)
41: {
42: PetscReal zero=0.0;
43: Vec x; /* solution vector */
44: Mat H;
45: Tao tao; /* Tao solver context */
46: PetscBool flg, test_lmvm = PETSC_FALSE;
47: PetscMPIInt size; /* number of processes running */
48: AppCtx user; /* user-defined application context */
49: TaoConvergedReason reason;
50: PetscInt its, recycled_its=0, oneshot_its=0;
52: /* Initialize TAO and PETSc */
53: PetscInitialize(&argc,&argv,(char*)0,help);
54: MPI_Comm_size(PETSC_COMM_WORLD,&size);
57: /* Initialize problem parameters */
58: user.n = 2; user.alpha = 99.0; user.chained = PETSC_FALSE;
59: /* Check for command line arguments to override defaults */
60: PetscOptionsGetInt(NULL,NULL,"-n",&user.n,&flg);
61: PetscOptionsGetReal(NULL,NULL,"-alpha",&user.alpha,&flg);
62: PetscOptionsGetBool(NULL,NULL,"-chained",&user.chained,&flg);
63: PetscOptionsGetBool(NULL,NULL,"-test_lmvm",&test_lmvm,&flg);
65: /* Allocate vectors for the solution and gradient */
66: VecCreateSeq(PETSC_COMM_SELF,user.n,&x);
67: MatCreateSeqBAIJ(PETSC_COMM_SELF,2,user.n,user.n,1,NULL,&H);
69: /* The TAO code begins here */
71: /* Create TAO solver with desired solution method */
72: TaoCreate(PETSC_COMM_SELF,&tao);
73: TaoSetType(tao,TAOLMVM);
75: /* Set solution vec and an initial guess */
76: VecSet(x, zero);
77: TaoSetSolution(tao,x);
79: /* Set routines for function, gradient, hessian evaluation */
80: TaoSetObjectiveAndGradient(tao,NULL,FormFunctionGradient,&user);
81: TaoSetHessian(tao,H,H,FormHessian,&user);
83: /* Check for TAO command line options */
84: TaoSetFromOptions(tao);
86: /* Solve the problem */
87: TaoSetTolerances(tao, 1.e-5, 0.0, 0.0);
88: TaoSetMaximumIterations(tao, 5);
89: TaoLMVMRecycle(tao, PETSC_TRUE);
90: reason = TAO_CONTINUE_ITERATING;
91: while (reason != TAO_CONVERGED_GATOL) {
92: TaoSolve(tao);
93: TaoGetConvergedReason(tao, &reason);
94: TaoGetIterationNumber(tao, &its);
95: recycled_its += its;
96: PetscPrintf(PETSC_COMM_SELF, "-----------------------\n");
97: }
99: /* Disable recycling and solve again! */
100: TaoSetMaximumIterations(tao, 100);
101: TaoLMVMRecycle(tao, PETSC_FALSE);
102: VecSet(x, zero);
103: TaoSolve(tao);
104: TaoGetConvergedReason(tao, &reason);
106: TaoGetIterationNumber(tao, &oneshot_its);
107: PetscPrintf(PETSC_COMM_SELF, "-----------------------\n");
108: PetscPrintf(PETSC_COMM_SELF, "recycled its: %D | oneshot its: %D\n", recycled_its, oneshot_its);
111: TaoDestroy(&tao);
112: VecDestroy(&x);
113: MatDestroy(&H);
115: PetscFinalize();
116: return 0;
117: }
119: /* -------------------------------------------------------------------- */
120: /*
121: FormFunctionGradient - Evaluates the function, f(X), and gradient, G(X).
123: Input Parameters:
124: . tao - the Tao context
125: . X - input vector
126: . ptr - optional user-defined context, as set by TaoSetFunctionGradient()
128: Output Parameters:
129: . G - vector containing the newly evaluated gradient
130: . f - function value
132: Note:
133: Some optimization methods ask for the function and the gradient evaluation
134: at the same time. Evaluating both at once may be more efficient that
135: evaluating each separately.
136: */
137: PetscErrorCode FormFunctionGradient(Tao tao,Vec X,PetscReal *f, Vec G,void *ptr)
138: {
139: AppCtx *user = (AppCtx *) ptr;
140: PetscInt i,nn=user->n/2;
141: PetscReal ff=0,t1,t2,alpha=user->alpha;
142: PetscScalar *g;
143: const PetscScalar *x;
146: /* Get pointers to vector data */
147: VecGetArrayRead(X,&x);
148: VecGetArray(G,&g);
150: /* Compute G(X) */
151: if (user->chained) {
152: g[0] = 0;
153: for (i=0; i<user->n-1; i++) {
154: t1 = x[i+1] - x[i]*x[i];
155: ff += PetscSqr(1 - x[i]) + alpha*t1*t1;
156: g[i] += -2*(1 - x[i]) + 2*alpha*t1*(-2*x[i]);
157: g[i+1] = 2*alpha*t1;
158: }
159: } else {
160: for (i=0; i<nn; i++) {
161: t1 = x[2*i+1]-x[2*i]*x[2*i]; t2= 1-x[2*i];
162: ff += alpha*t1*t1 + t2*t2;
163: g[2*i] = -4*alpha*t1*x[2*i]-2.0*t2;
164: g[2*i+1] = 2*alpha*t1;
165: }
166: }
168: /* Restore vectors */
169: VecRestoreArrayRead(X,&x);
170: VecRestoreArray(G,&g);
171: *f = ff;
173: PetscLogFlops(15.0*nn);
174: return 0;
175: }
177: /* ------------------------------------------------------------------- */
178: /*
179: FormHessian - Evaluates Hessian matrix.
181: Input Parameters:
182: . tao - the Tao context
183: . x - input vector
184: . ptr - optional user-defined context, as set by TaoSetHessian()
186: Output Parameters:
187: . H - Hessian matrix
189: Note: Providing the Hessian may not be necessary. Only some solvers
190: require this matrix.
191: */
192: PetscErrorCode FormHessian(Tao tao,Vec X,Mat H, Mat Hpre, void *ptr)
193: {
194: AppCtx *user = (AppCtx*)ptr;
195: PetscInt i, ind[2];
196: PetscReal alpha=user->alpha;
197: PetscReal v[2][2];
198: const PetscScalar *x;
199: PetscBool assembled;
202: /* Zero existing matrix entries */
203: MatAssembled(H,&assembled);
204: if (assembled) MatZeroEntries(H);
206: /* Get a pointer to vector data */
207: VecGetArrayRead(X,&x);
209: /* Compute H(X) entries */
210: if (user->chained) {
211: MatZeroEntries(H);
212: for (i=0; i<user->n-1; i++) {
213: PetscScalar t1 = x[i+1] - x[i]*x[i];
214: v[0][0] = 2 + 2*alpha*(t1*(-2) - 2*x[i]);
215: v[0][1] = 2*alpha*(-2*x[i]);
216: v[1][0] = 2*alpha*(-2*x[i]);
217: v[1][1] = 2*alpha*t1;
218: ind[0] = i; ind[1] = i+1;
219: MatSetValues(H,2,ind,2,ind,v[0],ADD_VALUES);
220: }
221: } else {
222: for (i=0; i<user->n/2; i++) {
223: v[1][1] = 2*alpha;
224: v[0][0] = -4*alpha*(x[2*i+1]-3*x[2*i]*x[2*i]) + 2;
225: v[1][0] = v[0][1] = -4.0*alpha*x[2*i];
226: ind[0]=2*i; ind[1]=2*i+1;
227: MatSetValues(H,2,ind,2,ind,v[0],INSERT_VALUES);
228: }
229: }
230: VecRestoreArrayRead(X,&x);
232: /* Assemble matrix */
233: MatAssemblyBegin(H,MAT_FINAL_ASSEMBLY);
234: MatAssemblyEnd(H,MAT_FINAL_ASSEMBLY);
235: PetscLogFlops(9.0*user->n/2.0);
236: return 0;
237: }
239: /*TEST
241: build:
242: requires: !complex
244: test:
245: args: -tao_type lmvm -tao_monitor
246: requires: !single
248: TEST*/