Actual source code: ex26.c
1: /*$Id: ex2.c,v 1.94 2001/08/07 21:30:54 bsmith Exp $*/
3: /* Program usage: mpirun -np <procs> ex2 [-help] [all PETSc options] */
5: static char help[] = "Solves a linear system in parallel with ESI.n
6: Input parameters include:n
7: -n <mesh_n> : number of mesh points in x-directionnn";
9: /*T
10: Concepts: ESI^basic parallel example;
11: Concepts: ESI^Laplacian, 1d
12: Concepts: Laplacian, 1d
13: Processors: n
14: T*/
16: /*
18: Note the usual PETSc objects all work. Those related to vec, mat, pc, ksp, and sles
19: are prefixed with esi_
21: */
22: #include "esi/ESI.h"
24: #include petsc.h
26: int main(int argc,char **args)
27: {
28: ::esi::IndexSpace<int> *indexspace;
29: ::esi::Vector<double,int> *x,*b;
30: ::esi::Operator<double,int> *op;
31: ::esi::SolverIterative<double,int> *solver;
32: ::esi::MatrixRowWriteAccess<double,int> *A;
33: int ierr,i,n = 3,Istart,Iend,c[3],N;
34: double v[3],*barray;
35: ::esi::IndexSpace<int>::Factory *ifactory;
36: ::esi::Vector<double,int>::Factory *vfactory;
37: ::esi::Operator<double,int>::Factory *ofactory;
38: ::esi::SolverIterative<double,int>::Factory *sfactory; /* linear solver context */
40: PetscInitialize(&argc,&args,(char *)0,help);
41: /*
42: Load up the factorys we will need to create our objects
43: */
44: ESILoadFactory("MPI",(void*)&PETSC_COMM_WORLD,"esi::petsc::IndexSpace",reinterpret_cast<void *&>(ifactory));
45: ESILoadFactory("MPI",(void*)&PETSC_COMM_WORLD,"esi::petsc::Matrix",reinterpret_cast<void *&>(ofactory));
46: ESILoadFactory("MPI",(void*)&PETSC_COMM_WORLD,"esi::petsc::Vector",reinterpret_cast<void *&>(vfactory));
47: ESILoadFactory("MPI",(void*)&PETSC_COMM_WORLD,"esi::petsc::SolverIterative",reinterpret_cast<void *&>(sfactory));
49: PetscOptionsGetInt(PETSC_NULL,"-n",&n,PETSC_NULL);
51: /*
52: Define the layout of the vectors and matrices across the processors
53: */
54: ifactory->create("MPI",(void*)&PETSC_COMM_WORLD,n,PETSC_DECIDE,PETSC_DECIDE,indexspace);
56: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
57: Compute the matrix and right-hand-side vector that define
58: the linear system, Ax = b.
59: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
60: /*
61: Create parallel matrix, specifying only its global dimensions.
63: Performance tuning note: For problems of substantial size,
64: preallocation of matrix memory is crucial for attaining good
65: performance. Preallocation is not possible via the generic
66: matrix creation routine
67: */
68: ofactory->create(*indexspace,*indexspace,op);
69:
70: /*
71: ESI parallel matrix formats are partitioned by
72: contiguous chunks of rows across the processors. Determine which
73: rows of the matrix are locally owned.
74: */
75: ierr = indexspace->getLocalPartitionOffset(Istart);
76: ierr = indexspace->getLocalSize(Iend);
77: ierr = indexspace->getGlobalSize(N);
78: Iend += Istart;
80: /*
81: Set matrix elements for the 1-D, three-point stencil in parallel.
82: - Each processor needs to insert only elements that it owns
83: locally (but any non-local elements will be sent to the
84: appropriate processor during matrix assembly).
85: - Always specify global rows and columns of matrix entries.
87: */
88: op->getInterface("esi::MatrixRowWriteAccess",reinterpret_cast<void *&>(A));
89: if (Istart == 0) {
90: v[0] = 1.0;
91: A->copyIntoRow(Istart,v,&Istart,1);
92: Istart++;
93: }
94: if (Iend == N) {
95: Iend--;
96: v[0] = 1.0;
97: A->copyIntoRow(Iend,v,&Iend,1);
98: }
99: v[0] = -1.0; v[1] = 2.0; v[2] = -1.0;
100: for (i=Istart; i<Iend; i++) {
101: c[0] = i-1; c[1] = i; c[2] = i+1;
102: A->copyIntoRow(i,v,c,3);
103: }
105: /*
106: Assemble matrix, using the 2-step process:
107: */
108: A->loadComplete();
111: /*
112: Create parallel vectors.i
113: - We form 1 vector from scratch and then duplicate as needed.
114: - When solving a linear system, the vectors and matrices MUST
115: be partitioned accordingly. PETSc automatically generates
116: appropriately partitioned matrices and vectors when MatCreate()
117: and VecCreate() are used with the same communicator.
118: - The user can alternatively specify the local vector and matrix
119: dimensions when more sophisticated partitioning is needed
120: (replacing the PETSC_DECIDE argument in the VecSetSizes() statement
121: below).
122: */
123: vfactory->create(*indexspace,x);
124: x->clone(b);
126: b->getCoefPtrReadWriteLock(barray);
127: for (i=Istart; i<Iend; i++) {
128: barray[i-Istart] = i;
129: }
130: b->releaseCoefPtrLock(barray);
132: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
133: Create the linear solver
134: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
136: /*
137: Create linear solver context
138: */
139: sfactory->create("MPI",(void*)&PETSC_COMM_WORLD,solver);
141: /*
142: Set operators. Here the matrix that defines the linear system
143: also serves as the preconditioning matrix.
144: */
145: solver->setOperator(*op);
147: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
148: Solve the linear system
149: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
150: solver->solve(*b,*x);
153: /*
154: Always call PetscFinalize() before exiting a program. This routine
155: - finalizes the PETSc libraries as well as MPI
156: - provides summary and diagnostic information if certain runtime
157: options are chosen (e.g., -log_summary).
158: */
159: indexspace->deleteReference();
160: op->deleteReference();
161: x->deleteReference();
162: b->deleteReference();
163: solver->deleteReference();
164: delete ifactory;
165: delete vfactory;
166: delete ofactory;
167: delete sfactory;
168: PetscFinalize();
169: return 0;
170: }