Actual source code: rosenbrock3.c
1: /* Program usage: mpiexec -n 1 rosenbrock2 [-help] [all TAO options] */
3: /* Include "petsctao.h" so we can use TAO solvers. */
4: #include <petsctao.h>
6: static char help[] = "This example demonstrates use of the TAO package to \n\
7: solve an unconstrained minimization problem on a single processor. We \n\
8: minimize the extended Rosenbrock function: \n\
9: sum_{i=0}^{n/2-1} (alpha*(x_{2i+1}-x_{2i}^2)^2 + (1-x_{2i})^2) \n\
10: or the chained Rosenbrock function:\n\
11: sum_{i=0}^{n-1} alpha*(x_{i+1} - x_i^2)^2 + (1 - x_i)^2\n";
13: /*T
14: Concepts: TAO^Solving an unconstrained minimization problem
15: Routines: TaoCreate();
16: Routines: TaoSetType(); TaoSetObjectiveAndGradient();
17: Routines: TaoSetHessian();
18: Routines: TaoSetSolution();
19: Routines: TaoSetFromOptions();
20: Routines: TaoSolve();
21: Routines: TaoDestroy();
22: Processors: 1
23: T*/
25: /*
26: User-defined application context - contains data needed by the
27: application-provided call-back routines that evaluate the function,
28: gradient, and hessian.
29: */
30: typedef struct {
31: PetscInt n; /* dimension */
32: PetscReal alpha; /* condition parameter */
33: PetscBool chained;
34: } AppCtx;
36: /* -------------- User-defined routines ---------- */
37: PetscErrorCode FormFunctionGradient(Tao,Vec,PetscReal*,Vec,void*);
38: PetscErrorCode FormHessian(Tao,Vec,Mat,Mat,void*);
40: int main(int argc,char **argv)
41: {
42: PetscReal zero=0.0;
43: Vec x; /* solution vector */
44: Mat H;
45: Tao tao; /* Tao solver context */
46: PetscBool flg, test_lmvm = PETSC_FALSE;
47: PetscMPIInt size; /* number of processes running */
48: AppCtx user; /* user-defined application context */
49: TaoConvergedReason reason;
50: PetscInt its, recycled_its=0, oneshot_its=0;
52: /* Initialize TAO and PETSc */
53: PetscInitialize(&argc,&argv,(char*)0,help);
54: MPI_Comm_size(PETSC_COMM_WORLD,&size);
57: /* Initialize problem parameters */
58: user.n = 2; user.alpha = 99.0; user.chained = PETSC_FALSE;
59: /* Check for command line arguments to override defaults */
60: PetscOptionsGetInt(NULL,NULL,"-n",&user.n,&flg);
61: PetscOptionsGetReal(NULL,NULL,"-alpha",&user.alpha,&flg);
62: PetscOptionsGetBool(NULL,NULL,"-chained",&user.chained,&flg);
63: PetscOptionsGetBool(NULL,NULL,"-test_lmvm",&test_lmvm,&flg);
65: /* Allocate vectors for the solution and gradient */
66: VecCreateSeq(PETSC_COMM_SELF,user.n,&x);
67: MatCreateSeqBAIJ(PETSC_COMM_SELF,2,user.n,user.n,1,NULL,&H);
69: /* The TAO code begins here */
71: /* Create TAO solver with desired solution method */
72: TaoCreate(PETSC_COMM_SELF,&tao);
73: TaoSetType(tao,TAOBQNLS);
75: /* Set solution vec and an initial guess */
76: VecSet(x, zero);
77: TaoSetSolution(tao,x);
79: /* Set routines for function, gradient, hessian evaluation */
80: TaoSetObjectiveAndGradient(tao,NULL,FormFunctionGradient,&user);
81: TaoSetHessian(tao,H,H,FormHessian,&user);
83: /* Check for TAO command line options */
84: TaoSetFromOptions(tao);
86: /* Solve the problem */
87: TaoSetTolerances(tao, 1.e-5, 0.0, 0.0);
88: TaoSetMaximumIterations(tao, 5);
89: TaoSetRecycleHistory(tao, PETSC_TRUE);
90: reason = TAO_CONTINUE_ITERATING;
91: flg = PETSC_FALSE;
92: TaoGetRecycleHistory(tao, &flg);
93: if (flg) PetscPrintf(PETSC_COMM_SELF, "Recycle: enabled\n");
94: while (reason != TAO_CONVERGED_GATOL) {
95: TaoSolve(tao);
96: TaoGetConvergedReason(tao, &reason);
97: TaoGetIterationNumber(tao, &its);
98: recycled_its += its;
99: PetscPrintf(PETSC_COMM_SELF, "-----------------------\n");
100: }
102: /* Disable recycling and solve again! */
103: TaoSetMaximumIterations(tao, 100);
104: TaoSetRecycleHistory(tao, PETSC_FALSE);
105: VecSet(x, zero);
106: TaoGetRecycleHistory(tao, &flg);
107: if (!flg) PetscPrintf(PETSC_COMM_SELF, "Recycle: disabled\n");
108: TaoSolve(tao);
109: TaoGetConvergedReason(tao, &reason);
111: TaoGetIterationNumber(tao, &oneshot_its);
112: PetscPrintf(PETSC_COMM_SELF, "-----------------------\n");
113: PetscPrintf(PETSC_COMM_SELF, "recycled its: %D | oneshot its: %D\n", recycled_its, oneshot_its);
116: TaoDestroy(&tao);
117: VecDestroy(&x);
118: MatDestroy(&H);
120: PetscFinalize();
121: return 0;
122: }
124: /* -------------------------------------------------------------------- */
125: /*
126: FormFunctionGradient - Evaluates the function, f(X), and gradient, G(X).
128: Input Parameters:
129: . tao - the Tao context
130: . X - input vector
131: . ptr - optional user-defined context, as set by TaoSetFunctionGradient()
133: Output Parameters:
134: . G - vector containing the newly evaluated gradient
135: . f - function value
137: Note:
138: Some optimization methods ask for the function and the gradient evaluation
139: at the same time. Evaluating both at once may be more efficient than
140: evaluating each separately.
141: */
142: PetscErrorCode FormFunctionGradient(Tao tao,Vec X,PetscReal *f, Vec G,void *ptr)
143: {
144: AppCtx *user = (AppCtx *) ptr;
145: PetscInt i,nn=user->n/2;
146: PetscReal ff=0,t1,t2,alpha=user->alpha;
147: PetscScalar *g;
148: const PetscScalar *x;
151: /* Get pointers to vector data */
152: VecGetArrayRead(X,&x);
153: VecGetArrayWrite(G,&g);
155: /* Compute G(X) */
156: if (user->chained) {
157: g[0] = 0;
158: for (i=0; i<user->n-1; i++) {
159: t1 = x[i+1] - x[i]*x[i];
160: ff += PetscSqr(1 - x[i]) + alpha*t1*t1;
161: g[i] += -2*(1 - x[i]) + 2*alpha*t1*(-2*x[i]);
162: g[i+1] = 2*alpha*t1;
163: }
164: } else {
165: for (i=0; i<nn; i++) {
166: t1 = x[2*i+1]-x[2*i]*x[2*i]; t2= 1-x[2*i];
167: ff += alpha*t1*t1 + t2*t2;
168: g[2*i] = -4*alpha*t1*x[2*i]-2.0*t2;
169: g[2*i+1] = 2*alpha*t1;
170: }
171: }
173: /* Restore vectors */
174: VecRestoreArrayRead(X,&x);
175: VecRestoreArrayWrite(G,&g);
176: *f = ff;
178: PetscLogFlops(15.0*nn);
179: return 0;
180: }
182: /* ------------------------------------------------------------------- */
183: /*
184: FormHessian - Evaluates Hessian matrix.
186: Input Parameters:
187: . tao - the Tao context
188: . x - input vector
189: . ptr - optional user-defined context, as set by TaoSetHessian()
191: Output Parameters:
192: . H - Hessian matrix
194: Note: Providing the Hessian may not be necessary. Only some solvers
195: require this matrix.
196: */
197: PetscErrorCode FormHessian(Tao tao,Vec X,Mat H, Mat Hpre, void *ptr)
198: {
199: AppCtx *user = (AppCtx*)ptr;
200: PetscInt i, ind[2];
201: PetscReal alpha=user->alpha;
202: PetscReal v[2][2];
203: const PetscScalar *x;
204: PetscBool assembled;
207: /* Zero existing matrix entries */
208: MatAssembled(H,&assembled);
209: if (assembled || user->chained) MatZeroEntries(H);
211: /* Get a pointer to vector data */
212: VecGetArrayRead(X,&x);
214: /* Compute H(X) entries */
215: if (user->chained) {
216: for (i=0; i<user->n-1; i++) {
217: PetscScalar t1 = x[i+1] - x[i]*x[i];
218: v[0][0] = 2 + 2*alpha*(t1*(-2) - 2*x[i]);
219: v[0][1] = 2*alpha*(-2*x[i]);
220: v[1][0] = 2*alpha*(-2*x[i]);
221: v[1][1] = 2*alpha*t1;
222: ind[0] = i; ind[1] = i+1;
223: MatSetValues(H,2,ind,2,ind,v[0],ADD_VALUES);
224: }
225: } else {
226: for (i=0; i<user->n/2; i++) {
227: v[1][1] = 2*alpha;
228: v[0][0] = -4*alpha*(x[2*i+1]-3*x[2*i]*x[2*i]) + 2;
229: v[1][0] = v[0][1] = -4.0*alpha*x[2*i];
230: ind[0]=2*i; ind[1]=2*i+1;
231: MatSetValues(H,2,ind,2,ind,v[0],INSERT_VALUES);
232: }
233: }
234: VecRestoreArrayRead(X,&x);
236: /* Assemble matrix */
237: MatAssemblyBegin(H,MAT_FINAL_ASSEMBLY);
238: MatAssemblyEnd(H,MAT_FINAL_ASSEMBLY);
239: PetscLogFlops(9.0*user->n/2.0);
240: return 0;
241: }
243: /*TEST
245: build:
246: requires: !complex
248: test:
249: args: -tao_type bqnls -tao_monitor
250: requires: !single
252: TEST*/