Actual source code: ex2.c
1: /*$Id: ex2.c,v 1.41 2001/08/10 03:34:17 bsmith Exp $*/
2: static char help[] ="Solves a time-dependent nonlinear PDE. Uses implicitn
3: timestepping. Runtime options include:n
4: -M <xg>, where <xg> = number of grid pointsn
5: -debug : Activate debugging printoutsn
6: -nox : Deactivate x-window graphicsnn";
8: /*
9: Concepts: TS^time-dependent nonlinear problems
10: Processors: n
11: */
13: /* ------------------------------------------------------------------------
15: This program solves the PDE
17: u * u_xx
18: u_t = ---------
19: 2*(t+1)^2
21: on the domain 0 <= x <= 1, with boundary conditions
22: u(t,0) = t + 1, u(t,1) = 2*t + 2,
23: and initial condition
24: u(0,x) = 1 + x*x.
26: The exact solution is:
27: u(t,x) = (1 + x*x) * (1 + t)
29: Note that since the solution is linear in time and quadratic in x,
30: the finite difference scheme actually computes the "exact" solution.
32: We use by default the backward Euler method.
34: ------------------------------------------------------------------------- */
36: /*
37: Include "petscts.h" to use the PETSc timestepping routines. Note that
38: this file automatically includes "petsc.h" and other lower-level
39: PETSc include files.
41: Include the "petscda.h" to allow us to use the distributed array data
42: structures to manage the parallel grid.
43: */
44: #include petscts.h
45: #include petscda.h
47: /*
48: User-defined application context - contains data needed by the
49: application-provided callback routines.
50: */
51: typedef struct {
52: MPI_Comm comm; /* communicator */
53: DA da; /* distributed array data structure */
54: Vec localwork; /* local ghosted work vector */
55: Vec u_local; /* local ghosted approximate solution vector */
56: Vec solution; /* global exact solution vector */
57: int m; /* total number of grid points */
58: PetscReal h; /* mesh width: h = 1/(m-1) */
59: PetscTruth debug; /* flag (1 indicates activation of debugging printouts) */
60: } AppCtx;
62: /*
63: User-defined routines, provided below.
64: */
65: extern int InitialConditions(Vec,AppCtx*);
66: extern int RHSFunction(TS,PetscReal,Vec,Vec,void*);
67: extern int RHSJacobian(TS,PetscReal,Vec,Mat*,Mat*,MatStructure*,void*);
68: extern int Monitor(TS,int,PetscReal,Vec,void*);
69: extern int ExactSolution(PetscReal,Vec,AppCtx*);
71: /*
72: Utility routine for finite difference Jacobian approximation
73: */
74: extern int RHSJacobianFD(TS,PetscReal,Vec,Mat*,Mat*,MatStructure*,void*);
76: int main(int argc,char **argv)
77: {
78: AppCtx appctx; /* user-defined application context */
79: TS ts; /* timestepping context */
80: Mat A; /* Jacobian matrix data structure */
81: Vec u; /* approximate solution vector */
82: int time_steps_max = 1000; /* default max timesteps */
83: int ierr,steps;
84: PetscReal ftime; /* final time */
85: PetscReal dt;
86: PetscReal time_total_max = 100.0; /* default max total time */
87: PetscTruth flg;
89: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
90: Initialize program and set problem parameters
91: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
92:
93: PetscInitialize(&argc,&argv,(char*)0,help);
95: appctx.comm = PETSC_COMM_WORLD;
96: appctx.m = 60;
97: PetscOptionsGetInt(PETSC_NULL,"-M",&appctx.m,PETSC_NULL);
98: PetscOptionsHasName(PETSC_NULL,"-debug",&appctx.debug);
99: appctx.h = 1.0/(appctx.m-1.0);
101: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
102: Create vector data structures
103: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
105: /*
106: Create distributed array (DA) to manage parallel grid and vectors
107: and to set up the ghost point communication pattern. There are M
108: total grid values spread equally among all the processors.
109: */
110: DACreate1d(PETSC_COMM_WORLD,DA_NONPERIODIC,appctx.m,1,1,PETSC_NULL,
111: &appctx.da);
113: /*
114: Extract global and local vectors from DA; we use these to store the
115: approximate solution. Then duplicate these for remaining vectors that
116: have the same types.
117: */
118: DACreateGlobalVector(appctx.da,&u);
119: DACreateLocalVector(appctx.da,&appctx.u_local);
121: /*
122: Create local work vector for use in evaluating right-hand-side function;
123: create global work vector for storing exact solution.
124: */
125: VecDuplicate(appctx.u_local,&appctx.localwork);
126: VecDuplicate(u,&appctx.solution);
128: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
129: Create timestepping solver context; set callback routine for
130: right-hand-side function evaluation.
131: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
133: TSCreate(PETSC_COMM_WORLD,&ts);
134: TSSetProblemType(ts,TS_NONLINEAR);
135: TSSetRHSFunction(ts,RHSFunction,&appctx);
137: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
138: Set optional user-defined monitoring routine
139: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
141: TSSetMonitor(ts,Monitor,&appctx,PETSC_NULL);
143: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
144: For nonlinear problems, the user can provide a Jacobian evaluation
145: routine (or use a finite differencing approximation).
147: Create matrix data structure; set Jacobian evaluation routine.
148: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
150: MatCreate(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,appctx.m,appctx.m,&A);
151: MatSetFromOptions(A);
152: PetscOptionsHasName(PETSC_NULL,"-fdjac",&flg);
153: if (flg) {
154: TSSetRHSJacobian(ts,A,A,RHSJacobianFD,&appctx);
155: } else {
156: TSSetRHSJacobian(ts,A,A,RHSJacobian,&appctx);
157: }
159: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
160: Set solution vector and initial timestep
161: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
163: dt = appctx.h/2.0;
164: TSSetInitialTimeStep(ts,0.0,dt);
165: TSSetSolution(ts,u);
167: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
168: Customize timestepping solver:
169: - Set the solution method to be the Backward Euler method.
170: - Set timestepping duration info
171: Then set runtime options, which can override these defaults.
172: For example,
173: -ts_max_steps <maxsteps> -ts_max_time <maxtime>
174: to override the defaults set by TSSetDuration().
175: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
177: TSSetType(ts,TS_BEULER);
178: TSSetDuration(ts,time_steps_max,time_total_max);
179: TSSetFromOptions(ts);
181: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
182: Solve the problem
183: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
185: /*
186: Evaluate initial conditions
187: */
188: InitialConditions(u,&appctx);
190: /*
191: Run the timestepping solver
192: */
193: TSStep(ts,&steps,&ftime);
195: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
196: Free work space. All PETSc objects should be destroyed when they
197: are no longer needed.
198: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
200: TSDestroy(ts);
201: VecDestroy(u);
202: MatDestroy(A);
203: DADestroy(appctx.da);
204: VecDestroy(appctx.localwork);
205: VecDestroy(appctx.solution);
206: VecDestroy(appctx.u_local);
208: /*
209: Always call PetscFinalize() before exiting a program. This routine
210: - finalizes the PETSc libraries as well as MPI
211: - provides summary and diagnostic information if certain runtime
212: options are chosen (e.g., -log_summary).
213: */
214: PetscFinalize();
215: return 0;
216: }
217: /* --------------------------------------------------------------------- */
218: /*
219: InitialConditions - Computes the solution at the initial time.
221: Input Parameters:
222: u - uninitialized solution vector (global)
223: appctx - user-defined application context
225: Output Parameter:
226: u - vector with solution at initial time (global)
227: */
228: int InitialConditions(Vec u,AppCtx *appctx)
229: {
230: PetscScalar *u_localptr,h = appctx->h,x;
231: int i,mybase,myend,ierr;
233: /*
234: Determine starting point of each processor's range of
235: grid values.
236: */
237: VecGetOwnershipRange(u,&mybase,&myend);
239: /*
240: Get a pointer to vector data.
241: - For default PETSc vectors, VecGetArray() returns a pointer to
242: the data array. Otherwise, the routine is implementation dependent.
243: - You MUST call VecRestoreArray() when you no longer need access to
244: the array.
245: - Note that the Fortran interface to VecGetArray() differs from the
246: C version. See the users manual for details.
247: */
248: VecGetArray(u,&u_localptr);
250: /*
251: We initialize the solution array by simply writing the solution
252: directly into the array locations. Alternatively, we could use
253: VecSetValues() or VecSetValuesLocal().
254: */
255: for (i=mybase; i<myend; i++) {
256: x = h*(PetscReal)i; /* current location in global grid */
257: u_localptr[i-mybase] = 1.0 + x*x;
258: }
260: /*
261: Restore vector
262: */
263: VecRestoreArray(u,&u_localptr);
265: /*
266: Print debugging information if desired
267: */
268: if (appctx->debug) {
269: PetscPrintf(appctx->comm,"initial guess vectorn");
270: VecView(u,PETSC_VIEWER_STDOUT_WORLD);
271: }
273: return 0;
274: }
275: /* --------------------------------------------------------------------- */
276: /*
277: ExactSolution - Computes the exact solution at a given time.
279: Input Parameters:
280: t - current time
281: solution - vector in which exact solution will be computed
282: appctx - user-defined application context
284: Output Parameter:
285: solution - vector with the newly computed exact solution
286: */
287: int ExactSolution(PetscReal t,Vec solution,AppCtx *appctx)
288: {
289: PetscScalar *s_localptr,h = appctx->h,x;
290: int i,mybase,myend,ierr;
292: /*
293: Determine starting and ending points of each processor's
294: range of grid values
295: */
296: VecGetOwnershipRange(solution,&mybase,&myend);
298: /*
299: Get a pointer to vector data.
300: */
301: VecGetArray(solution,&s_localptr);
303: /*
304: Simply write the solution directly into the array locations.
305: Alternatively, we could use VecSetValues() or VecSetValuesLocal().
306: */
307: for (i=mybase; i<myend; i++) {
308: x = h*(PetscReal)i;
309: s_localptr[i-mybase] = (t + 1.0)*(1.0 + x*x);
310: }
312: /*
313: Restore vector
314: */
315: VecRestoreArray(solution,&s_localptr);
316: return 0;
317: }
318: /* --------------------------------------------------------------------- */
319: /*
320: Monitor - User-provided routine to monitor the solution computed at
321: each timestep. This example plots the solution and computes the
322: error in two different norms.
324: Input Parameters:
325: ts - the timestep context
326: step - the count of the current step (with 0 meaning the
327: initial condition)
328: time - the current time
329: u - the solution at this timestep
330: ctx - the user-provided context for this monitoring routine.
331: In this case we use the application context which contains
332: information about the problem size, workspace and the exact
333: solution.
334: */
335: int Monitor(TS ts,int step,PetscReal time,Vec u,void *ctx)
336: {
337: AppCtx *appctx = (AppCtx*) ctx; /* user-defined application context */
338: int ierr;
339: PetscReal en2,en2s,enmax;
340: PetscScalar mone = -1.0;
341: PetscDraw draw;
343: /*
344: We use the default X windows viewer
345: PETSC_VIEWER_DRAW_(appctx->comm)
346: that is associated with the current communicator. This saves
347: the effort of calling PetscViewerDrawOpen() to create the window.
348: Note that if we wished to plot several items in separate windows we
349: would create each viewer with PetscViewerDrawOpen() and store them in
350: the application context, appctx.
352: PetscReal buffering makes graphics look better.
353: */
354: PetscViewerDrawGetDraw(PETSC_VIEWER_DRAW_(appctx->comm),0,&draw);
355: PetscDrawSetDoubleBuffer(draw);
356: VecView(u,PETSC_VIEWER_DRAW_(appctx->comm));
358: /*
359: Compute the exact solution at this timestep
360: */
361: ExactSolution(time,appctx->solution,appctx);
363: /*
364: Print debugging information if desired
365: */
366: if (appctx->debug) {
367: PetscPrintf(appctx->comm,"Computed solution vectorn");
368: VecView(u,PETSC_VIEWER_STDOUT_WORLD);
369: PetscPrintf(appctx->comm,"Exact solution vectorn");
370: VecView(appctx->solution,PETSC_VIEWER_STDOUT_WORLD);
371: }
373: /*
374: Compute the 2-norm and max-norm of the error
375: */
376: VecAXPY(&mone,u,appctx->solution);
377: VecNorm(appctx->solution,NORM_2,&en2);
378: en2s = sqrt(appctx->h)*en2; /* scale the 2-norm by the grid spacing */
379: VecNorm(appctx->solution,NORM_MAX,&enmax);
381: /*
382: PetscPrintf() causes only the first processor in this
383: communicator to print the timestep information.
384: */
385: PetscPrintf(appctx->comm,"Timestep %d: time = %g,2-norm error = %g, max norm error = %gn",
386: step,time,en2s,enmax);
388: /*
389: Print debugging information if desired
390: */
391: if (appctx->debug) {
392: PetscPrintf(appctx->comm,"Error vectorn");
393: VecView(appctx->solution,PETSC_VIEWER_STDOUT_WORLD);
394: }
395: return 0;
396: }
397: /* --------------------------------------------------------------------- */
398: /*
399: RHSFunction - User-provided routine that evalues the right-hand-side
400: function of the ODE. This routine is set in the main program by
401: calling TSSetRHSFunction(). We compute:
402: global_out = F(global_in)
404: Input Parameters:
405: ts - timesteping context
406: t - current time
407: global_in - vector containing the current iterate
408: ctx - (optional) user-provided context for function evaluation.
409: In this case we use the appctx defined above.
411: Output Parameter:
412: global_out - vector containing the newly evaluated function
413: */
414: int RHSFunction(TS ts,PetscReal t,Vec global_in,Vec global_out,void *ctx)
415: {
416: AppCtx *appctx = (AppCtx*) ctx; /* user-defined application context */
417: DA da = appctx->da; /* distributed array */
418: Vec local_in = appctx->u_local; /* local ghosted input vector */
419: Vec localwork = appctx->localwork; /* local ghosted work vector */
420: int ierr,i,localsize,rank,size;
421: PetscScalar *copyptr,*localptr,sc;
423: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
424: Get ready for local function computations
425: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
426: /*
427: Scatter ghost points to local vector, using the 2-step process
428: DAGlobalToLocalBegin(), DAGlobalToLocalEnd().
429: By placing code between these two statements, computations can be
430: done while messages are in transition.
431: */
432: DAGlobalToLocalBegin(da,global_in,INSERT_VALUES,local_in);
433: DAGlobalToLocalEnd(da,global_in,INSERT_VALUES,local_in);
435: /*
436: Access directly the values in our local INPUT work array
437: */
438: VecGetArray(local_in,&localptr);
440: /*
441: Access directly the values in our local OUTPUT work array
442: */
443: VecGetArray(localwork,©ptr);
445: sc = 1.0/(appctx->h*appctx->h*2.0*(1.0+t)*(1.0+t));
447: /*
448: Evaluate our function on the nodes owned by this processor
449: */
450: VecGetLocalSize(local_in,&localsize);
452: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
453: Compute entries for the locally owned part
454: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
456: /*
457: Handle boundary conditions: This is done by using the boundary condition
458: u(t,boundary) = g(t,boundary)
459: for some function g. Now take the derivative with respect to t to obtain
460: u_{t}(t,boundary) = g_{t}(t,boundary)
462: In our case, u(t,0) = t + 1, so that u_{t}(t,0) = 1
463: and u(t,1) = 2t+ 1, so that u_{t}(t,1) = 2
464: */
465: MPI_Comm_rank(appctx->comm,&rank);
466: MPI_Comm_size(appctx->comm,&size);
467: if (!rank) copyptr[0] = 1.0;
468: if (rank == size-1) copyptr[localsize-1] = 2.0;
470: /*
471: Handle the interior nodes where the PDE is replace by finite
472: difference operators.
473: */
474: for (i=1; i<localsize-1; i++) {
475: copyptr[i] = localptr[i] * sc * (localptr[i+1] + localptr[i-1] - 2.0*localptr[i]);
476: }
478: /*
479: Restore vectors
480: */
481: VecRestoreArray(local_in,&localptr);
482: VecRestoreArray(localwork,©ptr);
484: /*
485: Insert values from the local OUTPUT vector into the global
486: output vector
487: */
488: DALocalToGlobal(da,localwork,INSERT_VALUES,global_out);
490: /* Print debugging information if desired */
491: if (appctx->debug) {
492: PetscPrintf(appctx->comm,"RHS function vectorn");
493: VecView(global_out,PETSC_VIEWER_STDOUT_WORLD);
494: }
496: return 0;
497: }
498: /* --------------------------------------------------------------------- */
499: /*
500: RHSJacobian - User-provided routine to compute the Jacobian of
501: the nonlinear right-hand-side function of the ODE.
503: Input Parameters:
504: ts - the TS context
505: t - current time
506: global_in - global input vector
507: dummy - optional user-defined context, as set by TSetRHSJacobian()
509: Output Parameters:
510: AA - Jacobian matrix
511: BB - optionally different preconditioning matrix
512: str - flag indicating matrix structure
514: Notes:
515: RHSJacobian computes entries for the locally owned part of the Jacobian.
516: - Currently, all PETSc parallel matrix formats are partitioned by
517: contiguous chunks of rows across the processors.
518: - Each processor needs to insert only elements that it owns
519: locally (but any non-local elements will be sent to the
520: appropriate processor during matrix assembly).
521: - Always specify global row and columns of matrix entries when
522: using MatSetValues().
523: - Here, we set all entries for a particular row at once.
524: - Note that MatSetValues() uses 0-based row and column numbers
525: in Fortran as well as in C.
526: */
527: int RHSJacobian(TS ts,PetscReal t,Vec global_in,Mat *AA,Mat *BB,MatStructure *str,void *ctx)
528: {
529: Mat A = *AA; /* Jacobian matrix */
530: AppCtx *appctx = (AppCtx*)ctx; /* user-defined application context */
531: Vec local_in = appctx->u_local; /* local ghosted input vector */
532: DA da = appctx->da; /* distributed array */
533: PetscScalar v[3],*localptr,sc;
534: int ierr,i,mstart,mend,mstarts,mends,idx[3],is;
536: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
537: Get ready for local Jacobian computations
538: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
539: /*
540: Scatter ghost points to local vector, using the 2-step process
541: DAGlobalToLocalBegin(), DAGlobalToLocalEnd().
542: By placing code between these two statements, computations can be
543: done while messages are in transition.
544: */
545: DAGlobalToLocalBegin(da,global_in,INSERT_VALUES,local_in);
546: DAGlobalToLocalEnd(da,global_in,INSERT_VALUES,local_in);
548: /*
549: Get pointer to vector data
550: */
551: VecGetArray(local_in,&localptr);
553: /*
554: Get starting and ending locally owned rows of the matrix
555: */
556: MatGetOwnershipRange(A,&mstarts,&mends);
557: mstart = mstarts; mend = mends;
559: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
560: Compute entries for the locally owned part of the Jacobian.
561: - Currently, all PETSc parallel matrix formats are partitioned by
562: contiguous chunks of rows across the processors.
563: - Each processor needs to insert only elements that it owns
564: locally (but any non-local elements will be sent to the
565: appropriate processor during matrix assembly).
566: - Here, we set all entries for a particular row at once.
567: - We can set matrix entries either using either
568: MatSetValuesLocal() or MatSetValues().
569: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
571: /*
572: Set matrix rows corresponding to boundary data
573: */
574: if (mstart == 0) {
575: v[0] = 0.0;
576: MatSetValues(A,1,&mstart,1,&mstart,v,INSERT_VALUES);
577: mstart++;
578: }
579: if (mend == appctx->m) {
580: mend--;
581: v[0] = 0.0;
582: MatSetValues(A,1,&mend,1,&mend,v,INSERT_VALUES);
583: }
585: /*
586: Set matrix rows corresponding to interior data. We construct the
587: matrix one row at a time.
588: */
589: sc = 1.0/(appctx->h*appctx->h*2.0*(1.0+t)*(1.0+t));
590: for (i=mstart; i<mend; i++) {
591: idx[0] = i-1; idx[1] = i; idx[2] = i+1;
592: is = i - mstart + 1;
593: v[0] = sc*localptr[is];
594: v[1] = sc*(localptr[is+1] + localptr[is-1] - 4.0*localptr[is]);
595: v[2] = sc*localptr[is];
596: MatSetValues(A,1,&i,3,idx,v,INSERT_VALUES);
597: }
599: /*
600: Restore vector
601: */
602: VecRestoreArray(local_in,&localptr);
604: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
605: Complete the matrix assembly process and set some options
606: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
607: /*
608: Assemble matrix, using the 2-step process:
609: MatAssemblyBegin(), MatAssemblyEnd()
610: Computations can be done while messages are in transition
611: by placing code between these two statements.
612: */
613: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
614: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
616: /*
617: Set flag to indicate that the Jacobian matrix retains an identical
618: nonzero structure throughout all timestepping iterations (although the
619: values of the entries change). Thus, we can save some work in setting
620: up the preconditioner (e.g., no need to redo symbolic factorization for
621: ILU/ICC preconditioners).
622: - If the nonzero structure of the matrix is different during
623: successive linear solves, then the flag DIFFERENT_NONZERO_PATTERN
624: must be used instead. If you are unsure whether the matrix
625: structure has changed or not, use the flag DIFFERENT_NONZERO_PATTERN.
626: - Caution: If you specify SAME_NONZERO_PATTERN, PETSc
627: believes your assertion and does not check the structure
628: of the matrix. If you erroneously claim that the structure
629: is the same when it actually is not, the new preconditioner
630: will not function correctly. Thus, use this optimization
631: feature with caution!
632: */
633: *str = SAME_NONZERO_PATTERN;
635: /*
636: Set and option to indicate that we will never add a new nonzero location
637: to the matrix. If we do, it will generate an error.
638: */
639: MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR);
641: return 0;
642: }