Actual source code: grid2d.c

  1: #ifdef PETSC_RCS_HEADER
  2: static char vcid[] = "$Id: grid2d.c,v 1.31 2000/07/16 23:20:01 knepley Exp $";
  3: #endif

  5: /* Implements 2d triangular grids */
  6: #include "petscts.h"
  7: #include "gsolver.h"
  8: #include "src/grid/gridimpl.h"         /*I "grid.h" I*/
  9: #include "src/mesh/impls/triangular/triimpl.h"
 10: #include "src/gvec/impls/triangular/2d/gvec2d.h"
 11: #include "src/gvec/impls/triangular/2d/gvec2dView.h"
 12: #include "src/gvec/impls/triangular/2d/gmat2d.h"
 13: #include "grid2d.h"
 14: #include "elemvec2d.h"
 15: #include "varorder2d.h"

 17: extern int GridResetConstrainedMultiply_Private(Grid, GMat);

 19: int GridDestroy_Triangular_2D(Grid grid) {
 20:   int  field, bd;
 21:   int  ierr;

 24:   /* Field variables */
 25:   for(field = 0; field < grid->numFields; field++) {
 26:     if (grid->fields[field].name != PETSC_NULL) {
 27:       PetscFree(grid->fields[field].name);
 28:     }
 29:     PetscFree(grid->fields[field].discType);
 30:     DiscretizationDestroy(grid->fields[field].disc);
 31:   }
 32:   PetscFree(grid->fields);
 33:   /* Class variables */
 34:   if (grid->cm) {
 35:     FieldClassMapDestroy(grid->cm);
 36:   }
 37:   /* Default variable orderings */
 38:   if (grid->order) {
 39:     VarOrderingDestroy(grid->order);
 40:   }
 41:   if (grid->locOrder) {
 42:     LocalVarOrderingDestroy(grid->locOrder);
 43:   }
 44:   /* Ghost variable scatter */
 45:   if (grid->ghostVec) {
 46:     VecDestroy(grid->ghostVec);
 47:   }
 48:   if (grid->ghostScatter) {
 49:     VecScatterDestroy(grid->ghostScatter);
 50:   }
 51:   /* Constraint variables */
 52:   if (grid->constraintCM) {
 53:     FieldClassMapDestroy(grid->constraintCM);
 54:   }
 55:   if (grid->constraintOrder) {
 56:     VarOrderingDestroy(grid->constraintOrder);
 57:   }
 58:   if (grid->constraintOrdering) {
 59:     ISDestroy(grid->constraintOrdering);
 60:   }
 61:   if (grid->constraintMatrix) {
 62:     MatDestroy(grid->constraintMatrix);
 63:   }
 64:   if (grid->constraintInverse) {
 65:     MatDestroy(grid->constraintInverse);
 66:   }
 67:   /* Problem variables */
 68:   PetscFree(grid->rhsFuncs);
 69:   PetscFree(grid->rhsOps);
 70:   PetscFree(grid->matOps);
 71:   /* Assembly variables */
 72:   PetscFree(grid->defaultFields);
 73:   if (grid->vec) {
 74:     ElementVecDestroy(grid->vec);
 75:   }
 76:   if (grid->mat) {
 77:     ElementMatDestroy(grid->mat);
 78:   }
 79:   if (grid->ghostElementVec) {
 80:     ElementVecDestroy(grid->ghostElementVec);
 81:   }
 82:   /* Boundary condition variables */
 83:   if (grid->reductionCM) {
 84:     FieldClassMapDestroy(grid->reductionCM);
 85:   }
 86:   if (grid->reduceOrder) {
 87:     VarOrderingDestroy(grid->reduceOrder);
 88:   }
 89:   if (grid->locReduceOrder) {
 90:     LocalVarOrderingDestroy(grid->locReduceOrder);
 91:   }
 92:   PetscFree(grid->bc);
 93:   PetscFree(grid->pointBC);
 94:   /* Boundary iteration variables */
 95:   for(bd = 0; bd < grid->numBd; bd++) {
 96:     if (grid->bdSize[bd] != PETSC_NULL) {
 97:       PetscFree(grid->bdSize[bd]);
 98:     }
 99:   }
100:   PetscFree(grid->bdSize);
101:   if (grid->bdOrder) {
102:     VarOrderingDestroy(grid->bdOrder);
103:   }
104:   if (grid->bdLocOrder) {
105:     LocalVarOrderingDestroy(grid->bdLocOrder);
106:   }
107:   /* Subobjects */
108:   MeshDestroy(grid->mesh);
109:   return(0);
110: }

112: static int GridView_Triangular_2D_File(Grid grid, PetscViewer viewer) {
113:   VarOrdering order = grid->order;
114:   FILE       *fd;
115:   int         rank, field;
116:   int         ierr;

119:   MPI_Comm_rank(grid->comm, &rank);
120:   PetscViewerASCIIGetPointer(viewer, &fd);
121:   PetscFPrintf(grid->comm, fd, "Grid Object:n");
122:   if (grid->numFields == 1) {
123:     PetscFPrintf(grid->comm, fd, "  %d field:n", grid->numFields);
124:   } else {
125:     PetscFPrintf(grid->comm, fd, "  %d fields:n", grid->numFields);
126:   }
127:   for(field = 0; field < grid->numFields; field++) {
128:     /* Grid structure */
129:     if (grid->fields[field].name != PETSC_NULL) {
130:       PetscFPrintf(grid->comm, fd, "  %s field", grid->fields[field].name);
131:     } else {
132:       PetscFPrintf(grid->comm, fd, "  field %d", field);
133:     }
134:     if (grid->fields[field].numComp == 1) {
135:       PetscFPrintf(grid->comm, fd, " with %d component is ", grid->fields[field].numComp);
136:     } else {
137:       PetscFPrintf(grid->comm, fd, " with %d components is ", grid->fields[field].numComp);
138:     }
139:     if (grid->fields[field].isActive) {
140:       PetscFPrintf(grid->comm, fd, "activen    ");
141:     } else {
142:       PetscFPrintf(grid->comm, fd, "inactiven    ");
143:     }
144:     DiscretizationView(grid->fields[field].disc, viewer);
145:   }

147:   /* Problem specific information */
148:   if (grid->numActiveFields > 0) {
149:     PetscFPrintf(grid->comm, fd, "  %d variables in the problem:n", order->numVars);
150:     PetscSynchronizedFPrintf(grid->comm, fd, "    %d variables and %d ghost variables in domain %d:n",
151:                              order->numLocVars, order->numOverlapVars - order->numLocVars, rank);
152:     PetscSynchronizedFlush(grid->comm);
153:   }

155:   /* Underlying mesh */
156:   MeshView(grid->mesh, viewer);
157:   return(0);
158: }

160: int GridView_Triangular_2D(Grid grid, PetscViewer viewer) {
161:   PetscTruth isascii;
162:   int        ierr;

165:   PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_ASCII, &isascii);
166:   if (isascii == PETSC_TRUE) {
167:     GridView_Triangular_2D_File(grid, viewer);
168:   }
169:   return(0);
170: }

172: int GridSetupGhostScatter_Triangular_2D(Grid grid, VarOrdering order, Vec *ghostVec, VecScatter *ghostScatter) {
173:   FieldClassMap         map;
174:   PetscConstraintObject constCtx          = grid->constraintCtx;
175:   int                   numOverlapVars    = order->numOverlapVars;
176:   int                   numLocVars        = order->numLocVars;
177:   int                   numVars           = order->numVars;
178:   int                   numLocNewVars     = order->numLocNewVars;
179:   int                   numOverlapNewVars = order->numOverlapNewVars;
180:   int                   numGhostNewVars   = order->numOverlapNewVars - order->numLocNewVars;
181:   int                  *firstVar          = order->firstVar;
182:   int                  *offsets           = order->offsets;
183:   int                   numNodes, numGhostNodes;
184:   int                  *classes, *classSizes;
185:   IS                    localIS;       /* Local  indices for local ghost vector variables */
186:   int                  *indices;       /* Global indices for local ghost vector variables */
187:   IS                    globalIS;      /* Global indices for local ghost vector variables */
188:   Vec                   dummyVec;      /* Dummy global vector used to create the ghost variable scatter */
189:   int                   rank, newComp;
190:   int                   node, nclass, var, startVar, newField, i, c;
191:   int                   ierr;

196:   VarOrderingGetClassMap(order, &map);
197:   numNodes      = map->numNodes;
198:   numGhostNodes = map->numGhostNodes;
199:   classes       = map->classes;
200:   classSizes    = map->classSizes;

202:   /* Create the ghost variable scatter -- Notice that for no ghost variables localOffsets is not used */
203:   MPI_Comm_rank(grid->comm, &rank);
204:   ISCreateStride(grid->comm, numOverlapVars, 0, 1, &localIS);
205:   PetscMalloc(numOverlapVars * sizeof(int), &indices);
206:   for(var = 0; var < numLocVars; var++) {
207:     indices[var] = var + firstVar[rank];
208:   }
209:   for(node = 0, var = numLocVars; node < numGhostNodes; node++) {
210:     nclass = classes[numNodes+node];
211:     for(i = 0; i < classSizes[nclass]; i++) {
212:       indices[var++] = offsets[numNodes+node] + i;
213:     }
214:   }
215:   if (numGhostNewVars > 0) {
216:     /* Add in constraints that generate off-processor variables */
217:     (*constCtx->ops->getsize)(constCtx, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, &newComp);
218: 
219:     for(newField = numLocNewVars/newComp; newField < numOverlapNewVars/newComp; newField++) {
220:       (*constCtx->ops->getindices)(constCtx, grid->mesh, order, newField, CONSTRAINT_NEW_INDEX, &startVar);
221: 
222:       for(c = 0; c < newComp; c++, var++) {
223:         indices[var] = startVar+c;
224:       }
225:     }
226:   }
227:   if (var != numOverlapVars) SETERRQ(PETSC_ERR_PLIB, "Invalid ghost vector numbering");
228:   ISCreateGeneral(grid->comm, numOverlapVars, indices, &globalIS);
229:   VecCreateMPI(grid->comm, numLocVars, numVars, &dummyVec);
230:   VecCreateSeq(PETSC_COMM_SELF, numOverlapVars, ghostVec);
231:   VecScatterCreate(dummyVec, globalIS, *ghostVec, localIS, ghostScatter);
232:   PetscLogObjectParent(grid, *ghostVec);
233:   PetscLogObjectParent(grid, *ghostScatter);

235:   /* Cleanup */
236:   VecDestroy(dummyVec);
237:   ISDestroy(localIS);
238:   ISDestroy(globalIS);
239:   PetscFree(indices);
240:   return(0);
241: }

243: int GridSetupBoundarySizes_Triangular_2D(Grid grid) {
244:   Mesh_Triangular         *tri  = (Mesh_Triangular *) grid->mesh->data;
245:   Partition                part = grid->mesh->part;
246:   int                      numFields    = grid->cm->numFields;
247:   int                     *fields       = grid->cm->fields;
248:   int                      numClasses   = grid->cm->numClasses;
249:   int                     *classes      = grid->cm->classes;
250:   int                    **fieldClasses = grid->cm->fieldClasses;
251:   int                     *bdCount; /* Number of boundary nodes of a given class */
252:   int                      firstNode;
253:   int                      bd, bdNode, f, field, node, nclass;
254:   int                      ierr;

257:   PetscMalloc(numClasses * sizeof(int), &bdCount);
258:   PartitionGetStartNode(part, &firstNode);
259:   for(bd = 0; bd < grid->numBd; bd++) {
260:     /* Count the number of boundary nodes of each class */
261:     PetscMemzero(bdCount, numClasses * sizeof(int));
262:     for(bdNode = tri->bdBegin[bd]; bdNode < tri->bdBegin[bd+1]; bdNode++) {
263:       node = tri->bdNodes[bdNode] - firstNode;
264:       if ((node >= 0) && (node < grid->mesh->numNodes)) {
265:         bdCount[classes[node]]++;
266:       }
267:     }
268:     /* Calculate boundary sizes */
269:     PetscMemzero(grid->bdSize[bd], grid->numFields * sizeof(int));
270:     for(f = 0; f < numFields; f++) {
271:       field = fields[f];
272:       for(nclass = 0; nclass < numClasses; nclass++) {
273:         if (fieldClasses[f][nclass]) {
274:           grid->bdSize[bd][field] += bdCount[nclass];
275:         }
276:       }
277:     }
278:   }

280:   /* Cleanup */
281:   PetscFree(bdCount);
282:   return(0);
283: }

285: #if 0
286: /*@C
287:   GridExtantExchange
288:   This functions transfers data between local storage in different domains without a predefined mapping.

290:   Input Parameters:
291: . numExtants   - The number of extants (interior variables) in this domain
292: . extantProcs  - The processor to which to send each extant
293: . firstExtant  - The first extant variable in each domain

295: . ghostIndices - The global index for each ghost
296: . dataType     - The type of the variables
297: . firstVar     - The first variable on each processor
298: . addv         - The insert mode, INSERT_VALUES or ADD_VALUES
299: . mode         - The direction of the transfer, SCATTER_FORWARD or SCATTER_REVERSE
300: . locVars      - The local variable array

302:   Output Paramters:
303: . firstExtant  - The first extant variable in each domain after repartitioning

305: . ghostVars    - The ghost variables

307:   Note:
308:   The data in ghostVars is assumed contiguous and implicitly indexed by the order of
309:   ghostProcs and ghostIndices. The SCATTER_FORWARD mode will take the requested data
310:   from locVars and copy it to ghostVars in the order specified by ghostIndices. The
311:   SCATTER_REVERSE mode will take data from ghostVars and copy it to locVars.

313:   Level: developer

315: .keywords ghost, exchange, grid
316: .seealso GridGlobalToLocal, GridLocalToGlobal
317: @*/
318: int GridExtantExchange(MPI_Comm comm, int numExtants, int *extantProcs, int *firstExtant, PetscDataType dataType, AO *ordering)

320:                        int *firstVar, InsertMode addv, ScatterMode mode, void *locVars, void *ghostVars
321: {
322:   int         *numSendExtants; /* The number of extants from each domain */
323:   int         *numRecvExtants; /* The number of extants in each domain */
324:   int         *sumSendExtants; /* The prefix sums of numSendExtants */
325:   int         *sumRecvExtants; /* The prefix sums of numRecvExtantss */
326:   int         *offsets;        /* The offset into the send array for each domain */
327:   int          totSendExtants; /* The number of ghosts to request variables for */
328:   int          totRecvExtants; /* The number of nodes to provide class info about */
329:   int         *sendIndices;    /* The canonical indices of extants in this domain */
330:   int         *recvIndices;    /* The canonical indices of extants to return variables for */
331:   int         *extantIndices;  /* The new canonical indices of extants after reordering */
332:   char        *tempVars;       /* The variables of the requested or submitted extants */
333:   int          numLocVars;
334:   char        *locBytes   = (char *) locVars;
335:   MPI_Datatype MPIType;
336:   int          typeSize;
337:   int          numProcs, rank;
338:   int          proc, extant, locIndex, byte;
339:   int          ierr;

342:   /* Initialize communication */
343:   MPI_Comm_size(comm, &numProcs);
344:   MPI_Comm_rank(comm, &rank);
345:   PetscMalloc(numProcs * sizeof(int), &numSendExtants);
346:   PetscMalloc(numProcs * sizeof(int), &numRecvExtants);
347:   PetscMalloc(numProcs * sizeof(int), &sumSendExtants);
348:   PetscMalloc(numProcs * sizeof(int), &sumRecvExtants);
349:   PetscMalloc(numProcs * sizeof(int), &offsets);
350:   PetscMemzero(numSendExtants,  numProcs * sizeof(int));
351:   PetscMemzero(numRecvExtants,  numProcs * sizeof(int));
352:   PetscMemzero(sumSendExtants,  numProcs * sizeof(int));
353:   PetscMemzero(sumRecvExtants,  numProcs * sizeof(int));
354:   PetscMemzero(offsets,         numProcs * sizeof(int));
355:   numLocVars = firstVar[rank+1] - firstVar[rank];

357:   /* Get number of extants to send to each processor */
358:   for(extant = 0; extant < numExtants; extant++) {
359:     numSendExtants[extantProcs[extant]]++;
360:   }

362:   /* Get number of extants to receive from each processor */
363:   MPI_Alltoall(numSendExtants, 1, MPI_INT, numRecvExtants, 1, MPI_INT, comm);
364:   for(proc = 1; proc < numProcs; proc++) {
365:     sumSendExtants[proc] = sumSendExtants[proc-1] + numSendExtants[proc-1];
366:     sumRecvExtants[proc] = sumRecvExtants[proc-1] + numRecvExtants[proc-1];
367:     offsets[proc]       = sumSendExtants[proc];
368:   }
369:   totSendExtants = sumSendExtants[numProcs-1] + numSendExtants[numProcs-1];
370:   totRecvExtants = sumRecvExtants[numProcs-1] + numRecvExtants[numProcs-1];
371:   if (numExtants != totSendExtants) SETERRQ(PETSC_ERR_PLIB, "Invalid number of extants in send");

373:   PetscDataTypeGetSize(dataType, &typeSize);
374:   if (totSendExtants) {
375:     PetscMalloc(totSendExtants * sizeof(int), &sendIndices);
376:   }
377:   if (totRecvExtants) {
378:     PetscMalloc(totRecvExtants * sizeof(int), &recvIndices);
379:     PetscMalloc(totRecvExtants * sizeof(int), &extantIndices);
380:     PetscMalloc(totRecvExtants * typeSize,    &tempVars);
381:   }

383:   /* Must order extants by processor */
384:   for(extant = 0; extant < numExtants; extant++)
385:     sendIndices[offsets[extantProcs[extant]]++] = extant + firstExtant[rank];

387:   /* Get canonical indices of extants to provide variables for */
388:   MPI_Alltoallv(sendIndices, numSendExtants, sumSendExtants, MPI_INT,
389:                        recvIndices, numRecvExtants, sumRecvExtants, MPI_INT, comm);
390: 

392:   /* Recompute size and offset of each domain */
393:   MPI_Allgather(&totRecvExtants, 1, MPI_INT, &firstExtant[1], 1, MPI_INT, comm);
394:   firstExtant[0] = 0;
395:   for(proc = 1; proc <= numProcs; proc++)
396:     firstExtant[proc] += firstExtant[proc-1];

398:   /* Create the global extant reordering */
399:   for(extant = 0; extant < totRecvExtants; extant++)
400:     /* This would be the time to do RCM on the local graph by reordering extantIndices[] */
401:     extantIndices[extant] =  extant + firstExtant[rank];
402:   AOCreateDebug(comm, totRecvExtants, recvIndices, extantIndices, ordering);

404:   switch(mode)
405:   {
406:   case SCATTER_FORWARD:
407:     /* Get extant variables */
408:     if (addv == INSERT_VALUES) {
409:       for(extant = 0; extant < totRecvExtants; extant++)
410:       {
411:         locIndex = recvIndices[extant] - firstVar[rank];
412: #ifdef PETSC_USE_BOPT_g
413:         if ((locIndex < 0) || (locIndex >= numLocVars)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Invalid extant index received");
414: #endif
415:         for(byte = 0; byte < typeSize; byte++)
416:           tempVars[extant*typeSize+byte] = locBytes[locIndex*typeSize+byte];
417:       }
418:     } else {
419:       for(extant = 0; extant < totRecvExtants; extant++)
420:       {
421:         locIndex = recvIndices[extant] - firstVar[rank];
422: #ifdef PETSC_USE_BOPT_g
423:         if ((locIndex < 0) || (locIndex >= numLocVars)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Invalid extant index received");
424: #endif
425:         for(byte = 0; byte < typeSize; byte++)
426:           tempVars[extant*typeSize+byte] += locBytes[locIndex*typeSize+byte];
427:       }
428:     }

430:     /* Communicate local variables to extant storage */
431:     PetscDataTypeToMPIDataType(dataType, &MPIType);
432:     MPI_Alltoallv(tempVars,  numRecvExtants, sumRecvExtants, MPIType,
433:                          extantVars, numSendExtants, sumSendExtants, MPIType, comm);
434: 
435:     break;
436:   case SCATTER_REVERSE:
437:     /* Communicate extant variables to local storage */
438:     PetscDataTypeToMPIDataType(dataType, &MPIType);
439:     MPI_Alltoallv(extantVars, numSendExtants, sumRecvExtants, MPIType,
440:                          tempVars,  numRecvExtants, sumSendExtants, MPIType, comm);
441: 

443:     /* Get extant variables */
444:     if (addv == INSERT_VALUES) {
445:       for(extant = 0; extant < totRecvExtants; extant++)
446:       {
447:         locIndex = recvIndices[extant] - firstVar[rank];
448: #ifdef PETSC_USE_BOPT_g
449:         if ((locIndex < 0) || (locIndex >= numLocVars)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Invalid extant index received");
450: #endif
451:         for(byte = 0; byte < typeSize; byte++)
452:           locBytes[locIndex*typeSize+byte] = tempVars[extant*typeSize+byte];
453:       }
454:     } else {
455:       for(extant = 0; extant < totRecvExtants; extant++)
456:       {
457:         locIndex = recvIndices[extant] - firstVar[rank];
458: #ifdef PETSC_USE_BOPT_g
459:         if ((locIndex < 0) || (locIndex >= numLocVars)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Invalid extant index received");
460: #endif
461:         for(byte = 0; byte < typeSize; byte++)
462:           locBytes[locIndex*typeSize+byte] += tempVars[extant*typeSize+byte];
463:       }
464:     }
465:     break;
466:   default:
467:     SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid scatter mode");
468:   }

470:   /* Cleanup */
471:   PetscFree(numSendExtants);
472:   PetscFree(numRecvExtants);
473:   PetscFree(sumSendExtants);
474:   PetscFree(sumRecvExtants);
475:   PetscFree(offsets);
476:   if (totSendExtants) {
477:     PetscFree(sendIndices);
478:   }
479:   if (totRecvExtants) {
480:     PetscFree(recvIndices);
481:     PetscFree(tempVars);
482:   }
483:   return(0);
484: }
485: #endif

487: int GridSetUp_Triangular_2D(Grid grid) {
488:   FieldClassMap newCM;
489: #ifdef NEW_REDUCTION
490:   int           numReduceFields;
491:   int          *reduceFields;
492:   int           bc;
493: #endif
494:   int           elemSize;
495:   int           f, field;
496:   int           ierr;

499:   if (grid->numActiveFields <= 0) PetscFunctionReturn(1);

501:   /* Create default class map */
502:   if (grid->cm != PETSC_NULL) {
503:     FieldClassMapDestroy(grid->cm);
504:   }
505:   FieldClassMapCreateTriangular2D(grid, grid->numActiveFields, grid->defaultFields, &grid->cm);
506:   /* Implement system constraints */
507:   if (grid->reduceSystem == PETSC_TRUE) {
508:     /* Constrain the default class structure */
509:     FieldClassMapConstrain(grid->cm, grid, PETSC_TRUE, PETSC_FALSE, &newCM);
510:     FieldClassMapDestroy(grid->cm);
511:     grid->cm = newCM;
512:     /* Create reduction class map */
513:     if (grid->reductionCM != PETSC_NULL) {
514:       FieldClassMapDestroy(grid->reductionCM);
515:     }
516: #ifdef NEW_REDUCTION
517:     PetscMalloc((grid->numBC+grid->numPointBC) * sizeof(int), &reduceFields);
518:     for(bc = 0, numReduceFields = 0; bc < grid->numBC; bc++) {
519:       if (grid->bcReduce[bc] != PETSC_TRUE) continue;
520:       for(f = 0; f < numReduceFields; f++) {
521:         if (reduceFields[f] == grid->bcField[bc]) break;
522:       }
523:       if (f == numReduceFields) reduceFields[numReduceFields++] = grid->bcField[bc];
524:     }
525:     for(bc = 0; bc < grid->numPointBC; bc++) {
526:       if (grid->pointBCReduce[bc] != PETSC_TRUE) continue;
527:       for(f = 0; f < numReduceFields; f++) {
528:         if (reduceFields[f] == grid->pointBCField[bc]) break;
529:       }
530:       if (f == numReduceFields) reduceFields[numReduceFields++] = grid->pointBCField[bc];
531:     }
532:     FieldClassMapCreateTriangular2D(grid, numReduceFields, reduceFields, &newCM);
533:     FieldClassMapReduce(newCM, grid, &grid->reductionCM);
534:     FieldClassMapDestroy(newCM);
535:     PetscFree(reduceFields);
536: #else
537:     FieldClassMapReduce(grid->cm, grid, &grid->reductionCM);
538: #endif
539:   }
540:   /* Calculate boundary sizes after reduction */
541:   GridSetupBoundarySizes_Triangular_2D(grid);

543:   /* Setup default global and local variable orderings */
544:   if (grid->order) {
545:     VarOrderingDestroy(grid->order);
546:   }
547:   if (grid->locOrder) {
548:     LocalVarOrderingDestroy(grid->locOrder);
549:   }
550:   VarOrderingCreate(grid, &grid->order);
551:   LocalVarOrderingCreate(grid, grid->cm->numFields, grid->cm->fields, &grid->locOrder);

553:   /* Setup global and local variable orderings for BC reduction */
554:   if (grid->reduceOrder) {
555:     VarOrderingDestroy(grid->reduceOrder);
556:   }
557:   if (grid->locReduceOrder) {
558:     LocalVarOrderingDestroy(grid->locReduceOrder);
559:   }
560:   if (grid->reduceSystem) {
561:     VarOrderingCreateReduce(grid, &grid->reduceOrder);
562:     LocalVarOrderingCreate(grid, grid->reductionCM->numFields, grid->reductionCM->fields, &grid->locReduceOrder);
563: 
564:   }

566:   /* Setup element vector and matrix */
567:   if (grid->vec != PETSC_NULL) {
568:     ElementVecDestroy(grid->vec);
569:   }
570:   if (grid->ghostElementVec != PETSC_NULL) {
571:     ElementVecDestroy(grid->ghostElementVec);
572:   }
573:   if (grid->mat != PETSC_NULL) {
574:     ElementMatDestroy(grid->mat);
575:   }
576:   elemSize = grid->locOrder->elemSize;
577:   if (grid->explicitConstraints == PETSC_TRUE) {
578:     for(f = 0; f < grid->cm->numFields; f++) {
579:       field = grid->cm->fields[f];
580:       if (grid->fields[field].isConstrained == PETSC_TRUE)
581:         elemSize += grid->fields[field].disc->funcs*grid->fields[field].constraintCompDiff;
582:     }
583:   }
584:   ElementVecCreate(grid->comm, elemSize, &grid->vec);
585:   ElementVecCreate(grid->comm, elemSize, &grid->ghostElementVec);
586:   ElementMatCreate(grid->comm, elemSize, elemSize, &grid->mat);
587:   grid->vec->reduceSize             = grid->locOrder->elemSize;
588:   grid->ghostElementVec->reduceSize = grid->locOrder->elemSize;
589:   grid->mat->reduceRowSize          = grid->locOrder->elemSize;
590:   grid->mat->reduceColSize          = grid->locOrder->elemSize;

592:   return(0);
593: }

595: int GridSetupConstraints_Triangular_2D(Grid grid, PetscConstraintObject ctx) {
596:   Mesh             mesh;
597:   Field           *fields             = grid->fields;
598:   FieldClassMap    cm                 = grid->cm;
599:   int              numFields          = grid->cm->numFields;
600:   int              numNodes           = grid->cm->numNodes;
601:   int            **fieldClasses       = grid->cm->fieldClasses;
602:   int             *classes            = grid->cm->classes;
603:   int             *classSizes         = grid->cm->classSizes;
604:   int              numVars            = grid->order->numVars;
605:   int              numLocVars         = grid->order->numLocVars;
606:   int             *firstVar           = grid->order->firstVar;
607:   int             *offsets            = grid->order->offsets;
608:   int              numTotalFields     = grid->order->numTotalFields;
609:   int            **localStart         = grid->order->localStart;
610:   int              constField         = -1; /* The field which is constrained */
611:   int             *ordering;                /* Gives a mapping between the two variable numberings */
612:   int             *diagRows;                /* Allocation for the projector P */
613:   int             *offdiagRows;             /* Allocation for the projector P */
614:   int              numConstrainedFields;
615:   int              rowStartVar, colStartVar, locColStart, locColEnd, numLocConstraintVars;
616:   int              rank;
617:   int              f, field, node, marker, nclass, comp, nodeVars, var, count;
618:   PetscTruth       opt;
619:   int              ierr;

622:   MPI_Comm_rank(grid->comm, &rank);
623:   GridGetMesh(grid, &mesh);
624:   /* Check constrained fields */
625:   for(field = 0, numConstrainedFields = 0; field < numTotalFields; field++)
626:     if (fields[field].isConstrained == PETSC_TRUE) {
627:       constField = field;
628:       numConstrainedFields++;
629:     }
630:   if (numConstrainedFields == 0) return(0);
631:   if (numConstrainedFields > 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Only one field may be constrained");

633:   /* Create constrained class map */
634:   if (grid->constraintCM != PETSC_NULL) {
635:     FieldClassMapDestroy(grid->constraintCM);
636:   }
637:   FieldClassMapConstrain(grid->cm, grid, PETSC_FALSE, PETSC_TRUE, &grid->constraintCM);

639:   /* Create variable ordering for constrained and new fields */
640:   if (grid->constraintOrder != PETSC_NULL) {
641:     VarOrderingDestroy(grid->constraintOrder);
642:   }
643:   VarOrderingConstrain(grid, grid->order, &grid->constraintOrder);

645:   /* Calculate mapping between variable numberings */
646:   if (grid->constraintOrdering != PETSC_NULL) {
647:     ISDestroy(grid->constraintOrdering);
648:   }
649:   PetscMalloc(numLocVars * sizeof(int), &ordering);
650:   numLocConstraintVars = grid->constraintOrder->numLocVars - grid->constraintOrder->numLocNewVars;
651:   for(node = 0, count = 0; node < numNodes; node++) {
652:     nclass      = classes[node];
653:     rowStartVar = offsets[node];
654:     nodeVars    = classSizes[nclass];
655:     colStartVar = grid->constraintOrder->offsets[node];

657:     MeshGetNodeBoundary(mesh, node, &marker);
658:     if ((marker < 0) && (localStart[constField][nclass] >= 0)) {
659:       /* The preceeding fields on the node */
660:       for(var = 0; var < localStart[constField][nclass]; var++, count++)
661:         ordering[rowStartVar-firstVar[rank]+var] = colStartVar-grid->constraintOrder->firstVar[rank]+var;
662:       /* Nonzeroes in C */
663:       rowStartVar += localStart[constField][nclass];
664:       colStartVar += localStart[constField][nclass];
665:       for(var = 0; var < fields[constField].numComp; var++, count++)
666:         ordering[rowStartVar-firstVar[rank]+var] = numLocConstraintVars++;
667:       /* The remaining fields on the node */
668:       for(var = fields[constField].numComp; var < nodeVars - localStart[constField][nclass]; var++, count++)
669:         ordering[rowStartVar-firstVar[rank]+var] = colStartVar-grid->constraintOrder->firstVar[rank]+var-fields[constField].numComp;
670:     } else {
671:       /* Nonzeroes in I */
672:       for(var = 0; var < nodeVars; var++, count++)
673:         ordering[rowStartVar-firstVar[rank]+var] = colStartVar-grid->constraintOrder->firstVar[rank]+var;
674:     }
675:   }
676:   if (numLocConstraintVars != numLocVars) SETERRQ(PETSC_ERR_PLIB, "Invalid constraint variable offsets");
677:   if (count != numLocVars) SETERRQ(PETSC_ERR_PLIB, "Invalid constraint variable offsets");
678:   ISCreateGeneral(PETSC_COMM_SELF, numLocVars, ordering, &grid->constraintOrdering);
679:   PetscFree(ordering);


682:   /* Calculate allocation for constraint matrix which transforms unconstrained fields to constrained and new fields:

684:          / I 0  / v_Int  = / v_Int 
685:           0 C /  v_Bd  /    v_New /
686:   */
687:   PetscMalloc(numLocVars * sizeof(int), &diagRows);
688:   PetscMalloc(numLocVars * sizeof(int), &offdiagRows);
689:   PetscMemzero(diagRows,    numLocVars * sizeof(int));
690:   PetscMemzero(offdiagRows, numLocVars * sizeof(int));
691:   locColStart = grid->constraintOrder->firstVar[rank];
692:   locColEnd   = grid->constraintOrder->firstVar[rank+1];
693:   for(node = 0; node < numNodes; node++) {
694:     nclass           = classes[node];
695:     rowStartVar      = offsets[node] - firstVar[rank];
696:     nodeVars         = classSizes[nclass];

698:     /* All constrained nodes have negative markers */
699:     MeshGetNodeBoundary(mesh, node, &marker);
700:     if (marker < 0) {
701:       for(f = 0; f < numFields; f++) {
702:         field = cm->fields[f];
703:         if (fields[field].isConstrained == PETSC_TRUE) {
704:           comp = fields[field].numComp + fields[field].constraintCompDiff;
705:           (*ctx->ops->getindices)(ctx, grid->mesh, grid->constraintOrder, node, CONSTRAINT_COL_INDEX, &colStartVar);
706: 
707:           /* Check to see whether the variables fall within the diagonal block --
708:                Notice we are overestimating as if every constrained variable
709:                depends on all the new variables
710:           */
711:           if ((colStartVar + comp <= locColStart) || (colStartVar >= locColEnd)) {
712:             for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
713:               offdiagRows[rowStartVar] += comp;
714:           } else if ((colStartVar >= locColStart) && (colStartVar + comp <= locColEnd)) {
715:             for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
716:               diagRows[rowStartVar]    += comp;
717: #if 0
718:           /* Allow cuts on a single node for rectangular matrices */
719:           } else if (rectangular) {
720:             if (colStartVar < locColStart) {
721:               /* Cut is from below */
722:               for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
723:               {
724:                 diagRows[rowStartVar]    += (colStartVar + comp) - locColStart;
725:                 offdiagRows[rowStartVar] += locColStart - colStartVar;
726:               }
727:             } else {
728:               /* Cut is from above */
729:               for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
730:               {
731:                 diagRows[rowStartVar]    += locColEnd - colStartVar;
732:                 offdiagRows[rowStartVar] += (colStartVar + comp) - locColEnd;
733:               }
734:             }
735: #endif
736:           } else {
737:             /* Row blocking cuts variables on a single node. This is bad partitioning. */
738:             SETERRQ(PETSC_ERR_ARG_WRONG, "Row blocking cut variables on a single node");
739:           }
740:         } else if (fieldClasses[f][nclass]) {
741:           /* Remember localStart[][] is -1 if the field is not on the node */
742:           for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
743:             diagRows[rowStartVar] = 1;
744:         }
745:       }
746:     } else {
747:       /* Unconstrained nodes */
748:       for(var = 0; var < nodeVars; var++)
749:         diagRows[rowStartVar+var] = 1;
750:     }
751:   }

753:   /* Create the constraint matrix */
754:   if (grid->constraintMatrix != PETSC_NULL) {
755:     MatDestroy(grid->constraintMatrix);
756:   }
757:   MatCreateMPIAIJ(grid->comm, numLocVars, grid->constraintOrder->numLocVars, numVars,
758:                          grid->constraintOrder->numVars, 0, diagRows, 0, offdiagRows, &grid->constraintMatrix);
759: 
760:   MatSetOption(grid->constraintMatrix, MAT_NEW_NONZERO_ALLOCATION_ERR);

762:   /* Create the pseudo-inverse of the constraint matrix */
763:   PetscOptionsHasName(PETSC_NULL, "-grid_const_inv", &opt);
764:   if (opt == PETSC_TRUE) {
765:     if (grid->constraintInverse != PETSC_NULL) {
766:       MatDestroy(grid->constraintInverse);
767:     }
768:     MatCreateMPIAIJ(grid->comm, grid->constraintOrder->numLocVars, grid->constraintOrder->numLocVars,
769:                            grid->constraintOrder->numVars, grid->constraintOrder->numVars, 3, PETSC_NULL, 0, PETSC_NULL,
770:                            &grid->constraintInverse);
771: 
772:     MatSetOption(grid->constraintInverse, MAT_NEW_NONZERO_ALLOCATION_ERR);
773:   }

775:   /* Cleanup */
776:   PetscFree(diagRows);
777:   PetscFree(offdiagRows);

779:   return(0);
780: }

782: int GridSetupBoundary_Triangular_2D(Grid grid) {
783:   Mesh                     mesh;
784:   Partition                part;
785:   FieldClassMap            map             = grid->cm;
786:   PetscConstraintObject    constCtx        = grid->constraintCtx;
787:   int                      numBC           = grid->numBC;
788:   GridBC                  *gridBC          = grid->bc;
789:   int                      numFields       = map->numFields;
790:   int                     *fields          = map->fields;
791:   int                      numNodes        = map->numNodes;
792:   int                      numOverlapNodes = map->numOverlapNodes;
793:   int                      numGhostNodes   = map->numGhostNodes;
794:   int                      numClasses      = map->numClasses;
795:   int                    **fieldClasses    = map->fieldClasses;
796:   int                     *classes         = map->classes;
797:   int                     *classSizes      = map->classSizes;
798:   int                     *localOffsets;
799:   int                      numNewVars;
800:   VarOrdering              o;
801:   LocalVarOrdering         l;
802:   /* Ghost variable communication */
803:   int                     *ghostSendVars;    /* Number of ghost variables on a given processor interior to this domain */
804:   int                     *sumSendVars;      /* Prefix sums of ghostSendVars */
805:   int                     *ghostRecvVars;    /* Number of ghost variables on a given processor */
806:   int                     *sumRecvVars;      /* Prefix sums of ghostRecvVars */
807:   int                     *displs;           /* Offsets into ghostRecvVars */
808:   int                      numSendGhostVars; /* The number of ghost variable offsets to send to other processors */
809:   int                     *sendGhostBuffer;  /* Recv: Global node numbers Send: Offsets of these nodes */
810:   int                      numProcs, rank;
811:   int                      elemOffset;
812:   int                      proc, f, field, bc, node, locNode, gNode, marker, nclass, var;
813:   int                      ierr;

816:   grid->bdSetupCalled = PETSC_TRUE;
817:   GridGetMesh(grid, &mesh);
818:   MeshGetPartition(mesh, &part);

820:   /* Destroy old orderings */
821:   if (grid->bdOrder) {
822:     VarOrderingDestroy(grid->bdOrder);
823:   }
824:   if (grid->bdLocOrder) {
825:     LocalVarOrderingDestroy(grid->bdLocOrder);
826:   }

828:   /* Setup the boundary ordering */
829:   PetscHeaderCreate(o, _VarOrdering, int, IS_COOKIE, 0, "VarOrdering", grid->comm, VarOrderingDestroy, 0);
830:   PetscLogObjectCreate(o);
831:   PetscObjectCompose((PetscObject) o, "ClassMap", (PetscObject) map);

833:   /* Allocate memory */
834:   MPI_Comm_size(grid->comm, &numProcs);
835:   MPI_Comm_rank(grid->comm, &rank);
836:   GridGetNumFields(grid, &o->numTotalFields);
837:   PetscMalloc((numProcs+1)      * sizeof(int),   &o->firstVar);
838:   PetscMalloc(numOverlapNodes   * sizeof(int),   &o->offsets);
839:   PetscMalloc(o->numTotalFields * sizeof(int *), &o->localStart);
840:   PetscLogObjectMemory(o, (numProcs+1 + numOverlapNodes + o->numTotalFields*numClasses) * sizeof(int) + o->numTotalFields*sizeof(int *));
841:   PetscMemzero(o->localStart, o->numTotalFields * sizeof(int *));
842:   o->numLocNewVars = 0;
843:   o->numNewVars    = 0;

845:   /* Setup domain variable numbering */
846:   o->offsets[0] = 0;
847:   for(node = 0; node < numNodes-1; node++) {
848:     MeshGetNodeBoundary(mesh, node, &marker);
849:     if (marker == 0) {
850:       o->offsets[node+1] = o->offsets[node];
851:     } else {
852:       for(bc = 0; bc < numBC; bc++) {
853:         if ((gridBC[bc].reduce == PETSC_TRUE) && (gridBC[bc].boundary == marker)) break;
854:       }
855:       if (bc == numBC) {
856:         o->offsets[node+1] = o->offsets[node] + classSizes[classes[node]];
857:       } else {
858:         o->offsets[node+1] = o->offsets[node];
859:       }
860:     }
861:   }
862:   MeshGetNodeBoundary(mesh, numNodes-1, &marker);
863:   for(bc = 0; bc < numBC; bc++) {
864:     if ((gridBC[bc].reduce == PETSC_TRUE) && (gridBC[bc].boundary == marker)) break;
865:   }
866:   if (bc == numBC) {
867:     o->numLocVars = o->offsets[numNodes-1] + classSizes[classes[numNodes-1]];
868:   } else {
869:     o->numLocVars = o->offsets[numNodes-1];
870:   }
871:   if (map->isConstrained == PETSC_TRUE) {
872:     (*constCtx->ops->getsize)(constCtx, &o->numLocNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
873: 
874:     o->numLocVars += o->numLocNewVars;
875:   }
876:   MPI_Allgather(&o->numLocVars, 1, MPI_INT, &o->firstVar[1], 1, MPI_INT, o->comm);
877:   o->firstVar[0] = 0;
878:   for(proc = 1; proc <= numProcs; proc++)
879:     o->firstVar[proc] += o->firstVar[proc-1];
880:   o->numVars = o->firstVar[numProcs];
881:   if (map->isConstrained == PETSC_TRUE) {
882:     (*constCtx->ops->getsize)(constCtx, PETSC_NULL, &o->numNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
883: 
884:     MPI_Allreduce(&o->numLocNewVars, &numNewVars, 1, MPI_INT, MPI_SUM, o->comm);
885:     if (o->numNewVars != numNewVars) SETERRQ(PETSC_ERR_PLIB, "Invalid partition of new variables");
886:   }

888:   /* Initialize the overlap */
889:   o->numOverlapVars    = o->numLocVars;
890:   o->numOverlapNewVars = o->numLocNewVars;

892:   if (numProcs > 1) {
893:     /* Map local to global variable numbers */
894:     for(node = 0; node < numNodes; node++)
895:       o->offsets[node] += o->firstVar[rank];

897:     /* Initialize communication */
898:     PetscMalloc(numProcs * sizeof(int), &ghostSendVars);
899:     PetscMalloc(numProcs * sizeof(int), &sumSendVars);
900:     PetscMalloc(numProcs * sizeof(int), &ghostRecvVars);
901:     PetscMalloc(numProcs * sizeof(int), &sumRecvVars);
902:     PetscMalloc(numProcs * sizeof(int), &displs);
903:     PetscMemzero(ghostSendVars, numProcs * sizeof(int));
904:     PetscMemzero(sumSendVars,   numProcs * sizeof(int));
905:     PetscMemzero(ghostRecvVars, numProcs * sizeof(int));
906:     PetscMemzero(sumRecvVars,   numProcs * sizeof(int));
907:     PetscMemzero(displs,        numProcs * sizeof(int));

909:     /* Get number of ghost variables to receive from each processor and size of blocks --
910:          we here assume that classes[] already has ghost node classes in it */
911:     for(node = 0; node < numGhostNodes; node++) {
912:       PartitionGhostToGlobalNodeIndex(part, node, &gNode, &proc);
913:       nclass = classes[numNodes+node];
914:       ghostRecvVars[proc]++;
915:       o->numOverlapVars += classSizes[nclass];
916:     }

918:     /* Get number of constrained ghost variables to receive from each processor and size of blocks */
919:     if (map->isConstrained == PETSC_TRUE) {
920:       (*constCtx->ops->getsize)(constCtx, PETSC_NULL, PETSC_NULL, &o->numOverlapNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
921: 
922:     }
923:     o->numOverlapVars += o->numOverlapNewVars - o->numLocNewVars;

925:     /* Get sizes of ghost variable blocks to send to each processor */
926:     MPI_Alltoall(ghostRecvVars, 1, MPI_INT, ghostSendVars, 1, MPI_INT, o->comm);

928:     /* Calculate offets into the ghost variable receive array */
929:     for(proc = 1; proc < numProcs; proc++) {
930:       sumRecvVars[proc] = sumRecvVars[proc-1] + ghostRecvVars[proc-1];
931:       displs[proc]      = sumRecvVars[proc];
932:     }

934:     /* Calculate offsets into the ghost variable send array */
935:     for(proc = 1; proc < numProcs; proc++)
936:       sumSendVars[proc] = sumSendVars[proc-1] + ghostSendVars[proc-1];

938:     /* Send requests for ghost variable offsets to each processor */
939:     numSendGhostVars = sumSendVars[numProcs-1] + ghostSendVars[numProcs-1];
940:     PetscMalloc(numSendGhostVars * sizeof(int), &sendGhostBuffer);
941:     for(node = 0; node < numGhostNodes; node++) {
942:       PartitionGhostToGlobalNodeIndex(part, node, &gNode, &proc);
943:       o->offsets[numNodes+(displs[proc]++)] = gNode;
944:     }
945:     MPI_Alltoallv(&o->offsets[numNodes],  ghostRecvVars, sumRecvVars, MPI_INT,
946:                          sendGhostBuffer,        ghostSendVars, sumSendVars, MPI_INT, o->comm);
947: 

949:     /* Send ghost variables offsets to each processor */
950:     for(node = 0; node < numSendGhostVars; node++) {
951:       PartitionGlobalToLocalNodeIndex(part, sendGhostBuffer[node], &locNode);
952:       sendGhostBuffer[node] = o->offsets[locNode];
953:     }
954:     MPI_Alltoallv(sendGhostBuffer,       ghostSendVars, sumSendVars, MPI_INT,
955:                          &o->offsets[numNodes], ghostRecvVars, sumRecvVars, MPI_INT, o->comm);
956: 

958:     /* Cleanup */
959:     PetscFree(ghostSendVars);
960:     PetscFree(sumSendVars);
961:     PetscFree(ghostRecvVars);
962:     PetscFree(sumRecvVars);
963:     PetscFree(displs);
964:     PetscFree(sendGhostBuffer);

966:     /* We maintain local offsets for ghost variables, meaning the offsets after the last
967:        interior variable, rather than the offset of the given ghost variable in the global
968:        matrix. */
969:     PetscMalloc(numGhostNodes * sizeof(int), &o->localOffsets);
970:     for(node = 0, var = o->numLocVars; node < numGhostNodes; node++) {
971:       o->localOffsets[node] = var;
972:       nclass = classes[numNodes+node];
973:       var   += classSizes[nclass];
974:     }
975:   }

977:   /* Allocate memory */
978:   PetscMalloc(numClasses * sizeof(int), &localOffsets);
979:   PetscMemzero(localOffsets, numClasses * sizeof(int));

981:   /* Setup local field offsets */
982:   for(f = 0; f < numFields; f++) {
983:     field = fields[f];
984:     ierr  = PetscMalloc(numClasses * sizeof(int), &o->localStart[field]);
985:     for(nclass = 0; nclass < numClasses; nclass++) {
986:       if (fieldClasses[f][nclass]) {
987:         o->localStart[field][nclass]  = localOffsets[nclass];
988:         localOffsets[nclass]         += grid->fields[field].disc->bdDisc->comp;
989:       } else {
990:         o->localStart[field][nclass]  = -1;
991:       }
992:     }
993:   }
994:   grid->bdOrder = o;

996:   /* Cleanup */
997:   PetscFree(localOffsets);

999:   /* Setup the local boundary ordering */
1000:   PetscHeaderCreate(l, _LocalVarOrdering, int, IS_COOKIE, 0, "LocalVarOrdering", grid->comm, LocalVarOrderingDestroy, 0);
1001:   PetscLogObjectCreate(l);

1003:   /* Allocate memory */
1004:   l->numFields = numFields;
1005:   PetscMalloc(numFields       * sizeof(int), &l->fields);
1006:   PetscMalloc(grid->numFields * sizeof(int), &l->elemStart);
1007:   PetscLogObjectMemory(l, (numFields + grid->numFields) * sizeof(int));
1008:   PetscMemcpy(l->fields, fields, numFields * sizeof(int));

1010:   /* Put in sentinel values */
1011:   for(f = 0; f < grid->numFields; f++) {
1012:     l->elemStart[f] = -1;
1013:   }

1015:   /* Setup local and global offsets offsets with lower dimensional discretizations */
1016:   for(f = 0, elemOffset = 0; f < numFields; f++) {
1017:     field               = fields[f];
1018:     l->elemStart[field] = elemOffset;
1019:     elemOffset         += grid->fields[field].disc->bdDisc->size;
1020:   }
1021:   l->elemSize = elemOffset;
1022:   grid->bdLocOrder = l;

1024:   return(0);
1025: }

1027: int GridReformMesh_Triangular_2D(Grid grid) {

1031:   GridSetupBoundarySizes_Triangular_2D(grid);
1032:   return(0);
1033: }

1035: int GridGetBoundaryNext_Triangular_2D(Grid grid, int boundary, int fieldIdx, PetscTruth ghost, FieldClassMap map, int *node, int *nclass) {

1039:   do {
1040:     MeshGetBoundaryNext(grid->mesh, boundary, ghost, node);
1041:   }
1042:   /* Note: I am using the boolean short circuit to avoid classes[] with node == -1 */
1043:   while((*node != -1) && (map->fieldClasses[fieldIdx][map->classes[*node]] == 0));
1044:   if (*node != -1) {
1045:     *nclass = map->classes[*node];
1046:   } else {
1047:     *nclass = -1;
1048:   }
1049:   return(0);
1050: }

1052: int GridGetBoundaryStart_Triangular_2D(Grid grid, int boundary, int fieldIdx, PetscTruth ghost, FieldClassMap map, int *node, int *nclass) {
1053:   Mesh mesh;
1054:   Mesh_Triangular *tri = (Mesh_Triangular *) grid->mesh->data;
1055:   int  b; /* Canonical boundary number */
1056:   int  ierr;

1059:   /* Find canonical boundary number */
1060:   GridGetMesh(grid, &mesh);
1061:   MeshGetBoundaryIndex(mesh, boundary, &b);
1062:   if (mesh->activeBd != -1) SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Already iterating over a boundary");
1063:   /* Find first boundary node of a class in the active field */
1064:   mesh->activeBd     = b;
1065:   mesh->activeBdOld  = b;
1066:   mesh->activeBdNode = tri->bdBegin[b] - 1;
1067:   GridGetBoundaryNext_Triangular_2D(grid, boundary, fieldIdx, ghost, map, node, nclass);
1068:   return(0);
1069: }

1071: int GridCreateRestriction_Triangular_2D(Grid dcf, Grid dcc, GMat *gmat) {
1072:   SETERRQ(PETSC_ERR_SUP, " ");
1073: }

1075: int GridEvaluateRhs_Triangular_2D(Grid grid, GVec x, GVec f, PetscObject ctx) {
1076:   Mesh                  mesh;
1077:   Partition             part;
1078:   Field                *fields               = grid->fields;
1079:   int                   numNewFields         = grid->numNewFields;         /* The number of new fields added by constraints */
1080:   GridFunc             *rhsFuncs             = grid->rhsFuncs;             /* The Rhs PointFunctions */
1081:   int                   numRhsOps            = grid->numRhsOps;            /* The number of Rhs operators */
1082:   GridOp               *rhsOps               = grid->rhsOps;               /* The operators on the Rhs */
1083:   PetscTruth            reduceSystem         = grid->reduceSystem;
1084:   PetscTruth            reduceElement        = grid->reduceElement;
1085:   PetscTruth            explicitConstraints  = grid->explicitConstraints;
1086:   PetscConstraintObject constCtx             = grid->constraintCtx;        /* The constraint object */
1087:   int                   numFields            = grid->cm->numFields;        /* The number of fields in the calculation */
1088:   LocalVarOrdering      locOrder             = grid->locOrder;             /* The default local variable ordering */
1089:   int                   elemSize             = locOrder->elemSize;         /* The number of shape funcs in the elem mat */
1090:   int                  *elemStart            = locOrder->elemStart;        /* The offset of each field in the elem mat */
1091:   ElementVec            vec                  = grid->vec;                  /* The element vector */
1092:   PetscScalar          *array                = vec->array;                 /* The values in the element vector */
1093:   Vec                   ghostVec             = grid->ghostVec;             /* The local solution vector */
1094:   ElementVec            elemGhostVec         = grid->ghostElementVec;      /* The element vector from ghostVec */
1095:   PetscScalar          *ghostArray           = elemGhostVec->array;        /* The values in elemGhostVec */
1096:   ElementMat            mat                  = grid->mat;                  /* A temporary element matrix */
1097:   PetscScalar          *matArray             = mat->array;                 /* The values in the element matrix */
1098:   MeshMover             mover;
1099:   Grid                  ALEGrid;                                           /* The grid describing the mesh velocity */
1100:   VarOrdering           order;                                             /* The default variable ordering */
1101:   ElementVec            MeshALEVec;                                        /* ALE velocity vector from mesh */
1102:   ElementVec            ALEVec;                                            /* ALE velocity vector */
1103:   PetscScalar          *ALEArray;                                          /* The values in the ALE element vector */
1104:   int                   computeFunc, computeLinear, computeNonlinear;      /* Flags for selective computation */
1105:   PetscScalar          *nonlinearArgs[2];
1106:   int                   newComp = 0;
1107:   int                   numElements;
1108:   int                   sField, tField, op, newField, elem, func, fieldIndex;
1109: #ifdef PETSC_USE_BOPT_g
1110:   int                   var;
1111:   PetscTruth            opt;
1112: #endif
1113:   int                   ierr;

1116:   GridGetMesh(grid, &mesh);
1117:   MeshGetPartition(mesh, &part);
1118:   if (explicitConstraints == PETSC_TRUE) {
1119:     order = grid->constraintOrder;
1120:   } else {
1121:     order = grid->order;
1122:   }
1123:   /* Handle selective computation */
1124:   computeFunc        = 1;
1125:   if (grid->activeOpTypes[0] == PETSC_FALSE) computeFunc      = 0;
1126:   computeLinear      = 1;
1127:   if (grid->activeOpTypes[1] == PETSC_FALSE) computeLinear    = 0;
1128:   computeNonlinear   = 1;
1129:   if (grid->activeOpTypes[2] == PETSC_FALSE) computeNonlinear = 0;

1131:   /* Fill the local solution vectors */
1132:   if (x != PETSC_NULL) {
1133:     GridGlobalToLocal(grid, INSERT_VALUES, x);
1134:   }

1136:   /* Setup ALE variables */
1137:   if (grid->ALEActive == PETSC_TRUE) {
1138:     MeshGetMover(mesh, &mover);
1139:     MeshMoverGetVelocityGrid(mover, &ALEGrid);
1140:     /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
1141:     ElementVecDuplicate(grid->vec, &ALEVec);
1142:     ALEArray   = ALEVec->array;
1143:     MeshALEVec = ALEGrid->vec;
1144:   } else {
1145:     ALEArray   = PETSC_NULL;
1146:     MeshALEVec = PETSC_NULL;
1147:   }

1149:   /* Loop over elements */
1150:   PartitionGetNumElements(part, &numElements);
1151:   for(elem = 0; elem < numElements; elem++) {
1152:     /* Initialize element vector */
1153:     ElementVecZero(vec);
1154:     vec->reduceSize          = locOrder->elemSize;
1155:     elemGhostVec->reduceSize = locOrder->elemSize;

1157:     /* Setup local row indices for the ghost vector */
1158:     GridCalcLocalElementVecIndices(grid, elem, elemGhostVec);
1159:     /* Setup local solution vector */
1160:     GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
1161:     /* Must transform to unconstrained variables for element integrals */
1162:     GridProjectElementVec(grid, mesh, elem, order, PETSC_FALSE, elemGhostVec);

1164:     /* Setup ALE variables */
1165:     if (grid->ALEActive == PETSC_TRUE) {
1166:       GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
1167:       GridLocalToElement(ALEGrid, MeshALEVec);
1168:     }

1170:     if (computeFunc) {
1171:       for(func = 0; func < grid->numRhsFuncs; func++) {
1172:         if (fields[rhsFuncs[func].field].isActive == PETSC_FALSE) continue;
1173:         tField = rhsFuncs[func].field;
1174:         DiscretizationEvaluateFunctionGalerkin(fields[tField].disc, mesh, *rhsFuncs[tField].func, rhsFuncs[tField].alpha, elem,
1175:                                                       &array[elemStart[tField]], ctx);
1176: 
1177:       }
1178: #ifdef PETSC_USE_BOPT_g
1179: #endif
1180:     }

1182:     for(op = 0; op < numRhsOps; op++) {
1183:       if (fields[rhsOps[op].field].isActive == PETSC_FALSE) continue;
1184:       if ((rhsOps[op].nonlinearOp != PETSC_NULL) && (computeNonlinear)) {
1185:         tField = rhsOps[op].field;
1186:         nonlinearArgs[0] = &ghostArray[elemStart[tField]];
1187:         nonlinearArgs[1] = &ghostArray[elemStart[tField]];
1188:         if (rhsOps[op].isALE) {
1189:           GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, tField, ALEVec);
1190:           DiscretizationEvaluateNonlinearALEOperatorGalerkin(fields[tField].disc, mesh, rhsOps[op].nonlinearOp,
1191:                                                                     rhsOps[op].alpha, elem, 2, nonlinearArgs,
1192:                                                                     ALEArray, &array[elemStart[tField]], ctx);
1193: 
1194:         } else {
1195:           DiscretizationEvaluateNonlinearOperatorGalerkin(fields[tField].disc, mesh, rhsOps[op].nonlinearOp,
1196:                                                                  rhsOps[op].alpha, elem, 2, nonlinearArgs,
1197:                                                                  &array[elemStart[tField]], ctx);
1198: 
1199:         }
1200:       } else if (computeLinear) {
1201:         sField = rhsOps[op].field;
1202:         tField = fields[sField].disc->operators[rhsOps[op].op]->test->field;
1203:         ElementMatZero(mat);
1204:         if (rhsOps[op].isALE) {
1205:           GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
1206:           DiscretizationEvaluateALEOperatorGalerkinMF(fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1207:                                                              rhsOps[op].op, rhsOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1208:                                                              &ghostArray[elemStart[sField]], ALEArray, array, matArray, ctx);
1209: 
1210:         } else {
1211:           DiscretizationEvaluateOperatorGalerkinMF(fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1212:                                                           rhsOps[op].op, rhsOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1213:                                                           &ghostArray[elemStart[sField]], array, matArray, ctx);
1214: 
1215:         }
1216:       }
1217: #ifdef PETSC_USE_BOPT_g
1218: #endif
1219:     }

1221:     /* Setup global row indices, with reduction if necessary */
1222:     GridCalcGeneralElementVecIndices(grid, elem, order, PETSC_NULL, PETSC_FALSE, vec);
1223: #ifdef PETSC_USE_BOPT_g
1224:     PetscOptionsHasName(PETSC_NULL, "-trace_vec_assembly", &opt);
1225:     if (opt == PETSC_TRUE) {
1226:       for(var = 0; var < vec->reduceSize; var++)
1227:         PetscPrintf(grid->comm, "%2d %4.2gn", vec->indices[var], PetscRealPart(array[var]));
1228:     }
1229: #endif
1230:     /* Put values in the global vector */
1231:     ElementVecSetValues(vec, f, ADD_VALUES);
1232:   }

1234:   /* Cleanup ALE variables */
1235:   if (grid->ALEActive == PETSC_TRUE) {
1236:     ElementVecDestroy(ALEVec);
1237:   }

1239:   /* Evaluate self-interaction of new fields created by constraints */
1240:   if (explicitConstraints == PETSC_TRUE) {
1241:     /* WARNING: This only accomodates 1 constrained field */
1242:     /* Get constraint information */
1243:     for(fieldIndex = 0; fieldIndex < numFields; fieldIndex++) {
1244:       sField = grid->cm->fields[fieldIndex];
1245:       if (fields[sField].isConstrained == PETSC_TRUE) {
1246:         newComp = fields[sField].numComp + fields[sField].constraintCompDiff;
1247:         break;
1248:       }
1249:     }
1250:     /* Calculate self-interaction */
1251:     for(newField = 0; newField < numNewFields; newField++) {
1252:       /* Initialize element vector */
1253:       ElementVecZero(vec);
1254:       vec->reduceSize = newComp;

1256:       /* Calculate the indices and contribution to the element matrix from the new field */
1257:       (*constCtx->ops->newelemvec)(constCtx, order, newField, vec);
1258: #ifdef PETSC_USE_BOPT_g
1259:       PetscOptionsHasName(PETSC_NULL, "-trace_vec_assembly_constrained", &opt);
1260:       if (opt == PETSC_TRUE) {
1261:         for(var = 0; var < vec->reduceSize; var++)
1262:           PetscPrintf(grid->comm, "%2d %4.2gn", vec->indices[var], PetscRealPart(array[var]));
1263:       }
1264: #endif
1265:       /* Put values in global matrix */
1266:       ElementVecSetValues(vec, f, ADD_VALUES);
1267: #ifdef PETSC_USE_BOPT_g
1268: #endif
1269:     }
1270:   }

1272:   /* Reset element vectors */
1273:   vec->reduceSize          = locOrder->elemSize;
1274:   elemGhostVec->reduceSize = locOrder->elemSize;

1276:   VecAssemblyBegin(f);
1277:   VecAssemblyEnd(f);
1278:   return(0);
1279: }

1281: int GridEvaluateSystemMatrix_Triangular_2D(Grid grid, GVec x, GMat *J, GMat *M, MatStructure *flag, PetscObject ctx) {
1282:   GMat                  A             = *J;                     /* The working system matrix */
1283:   Field                *fields        = grid->fields;
1284:   int                   numNewFields  = grid->numNewFields;     /* The number of new fields added by constraints */
1285:   int                   numMatOps     = grid->numMatOps;        /* The number of operators in the matrix */
1286:   GridOp               *matOps        = grid->matOps;           /* The operators in the system matrix */
1287:   VarOrdering           constOrder    = grid->constraintOrder;  /* The constrained variable ordering */
1288:   PetscTruth            reduceSystem  = grid->reduceSystem;
1289:   PetscTruth            reduceElement = grid->reduceElement;
1290:   PetscTruth            expConst      = grid->explicitConstraints;
1291:   PetscConstraintObject constCtx      = grid->constraintCtx;    /* The constraint object */
1292:   int                   numFields     = grid->cm->numFields;    /* The number of fields in the calculation */
1293:   LocalVarOrdering      locOrder      = grid->locOrder;         /* The default local variable ordering */
1294:   int                   elemSize      = locOrder->elemSize;     /* The number of shape functions in the element matrix */
1295:   int                  *elemStart     = locOrder->elemStart;    /* The offset of each field in the element matrix */
1296:   ElementMat            mat           = grid->mat;              /* The element matrix */
1297:   PetscScalar          *array         = mat->array;             /* The values in the element matrix */
1298:   Vec                   ghostVec      = grid->ghostVec;         /* The local solution vector */
1299:   ElementVec            elemGhostVec  = grid->ghostElementVec;  /* The element vector from ghostVec */
1300:   PetscScalar          *ghostArray    = elemGhostVec->array;    /* The values in elemGhostVec */
1301:   Mesh                  mesh;
1302:   Partition             part;
1303:   MeshMover             mover;
1304:   Grid                  ALEGrid;                                /* The grid describing the mesh velocity */
1305:   VarOrdering           order;                                  /* The default variable ordering */
1306:   ElementVec            MeshALEVec;                             /* ALE velocity vector with mesh discretization */
1307:   ElementVec            ALEVec;                                 /* ALE velocity vector */
1308:   PetscScalar          *ALEArray;                               /* The values in the ALE element vector */
1309:   int                   newComp = 0;
1310:   int                   numElements;
1311:   int                   elem, f, sField, tField, op, newField;
1312: #ifdef PETSC_USE_BOPT_g
1313:   PetscTruth            opt;
1314: #endif
1315:   int                   ierr;

1318:   GridGetMesh(grid, &mesh);
1319:   MeshGetPartition(mesh, &part);
1320:   if (expConst == PETSC_TRUE) {
1321:     order = grid->constraintOrder;
1322:   } else {
1323:     order = grid->order;
1324:   }
1325:   /* Fill the local solution vectors */
1326:   if (x != PETSC_NULL) {
1327:     GridGlobalToLocal(grid, INSERT_VALUES, x);
1328:   }

1330:   /* Setup ALE variables -- No new variables should be ALE so ALEVec is not recalculated */
1331:   if (grid->ALEActive == PETSC_TRUE) {
1332:     MeshGetMover(mesh, &mover);
1333:     MeshMoverGetVelocityGrid(mover, &ALEGrid);
1334:     /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
1335:     ElementVecDuplicate(grid->vec, &ALEVec);
1336:     ALEArray   = ALEVec->array;
1337:     MeshALEVec = ALEGrid->vec;
1338:   } else {
1339:     ALEArray   = PETSC_NULL;
1340:     MeshALEVec = PETSC_NULL;
1341:   }

1343:   /* Loop over elements */
1344:   PartitionGetNumElements(part, &numElements);
1345:   for(elem = 0; elem < numElements; elem++) {
1346:     /* Initialize element matrix */
1347:     ElementMatZero(mat);
1348:     mat->reduceRowSize       = locOrder->elemSize;
1349:     mat->reduceColSize       = locOrder->elemSize;
1350:     elemGhostVec->reduceSize = locOrder->elemSize;

1352:     /* Setup local row indices for the ghost vector */
1353:     GridCalcLocalElementVecIndices(grid, elem, elemGhostVec);
1354:     /* Setup local solution vector */
1355:     GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
1356:     /* Must transform to unconstrained variables for element integrals */
1357:     GridProjectElementVec(grid, mesh, elem, order, PETSC_FALSE, elemGhostVec);

1359:     /* Setup ALE variables */
1360:     if (grid->ALEActive == PETSC_TRUE) {
1361:       GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
1362:       GridLocalToElement(ALEGrid, MeshALEVec);
1363:     }

1365:     /* Calculate the contribution to the element matrix from each field */
1366:     for(op = 0; op < numMatOps; op++) {
1367:       sField = matOps[op].field;
1368:       tField = fields[sField].disc->operators[matOps[op].op]->test->field;
1369:       if (fields[sField].isActive) {
1370:         if (matOps[op].isALE) {
1371:           GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
1372:           DiscretizationEvaluateALEOperatorGalerkin(fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1373:                                                            matOps[op].op, matOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1374:                                                            ALEArray, array, ctx);
1375: 
1376:         } else {
1377:           DiscretizationEvaluateOperatorGalerkin(fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1378:                                                         matOps[op].op, matOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1379:                                                         array, ctx);
1380: 
1381:         }
1382: #ifdef PETSC_USE_BOPT_g
1383: #endif
1384:       }
1385:     }

1387:     /* Setup global numbering, with reduction if necessary */
1388:     GridCalcGeneralElementMatIndices(grid, elem, order, order, PETSC_FALSE, mat);
1389: #ifdef PETSC_USE_BOPT_g
1390:     PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
1391:     if (opt == PETSC_TRUE) {
1392:       ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
1393:     }
1394: #endif
1395:     /* Put values in the global matrix */
1396:     ElementMatSetValues(mat, A, ADD_VALUES);
1397:   }

1399:   /* Evaluate self-interaction of new fields created by constraints */
1400:   if (expConst == PETSC_TRUE) {
1401:     /* WARNING: This only accomodates 1 constrained field */
1402:     /* Get constraint information */
1403:     for(f = 0; f < numFields; f++) {
1404:       sField = grid->cm->fields[f];
1405:       if (fields[sField].isConstrained == PETSC_TRUE) {
1406:         newComp = fields[sField].numComp + fields[sField].constraintCompDiff;
1407:         break;
1408:       }
1409:     }
1410:     /* Calculate self-interaction */
1411:     for(newField = 0; newField < numNewFields; newField++) {
1412:       /* Initialize element matrix */
1413:       ElementMatZero(mat);
1414:       mat->reduceRowSize = newComp;
1415:       mat->reduceColSize = newComp;

1417:       /* Calculate the indices and contribution to the element matrix from the new field */
1418:       (*constCtx->ops->newelemmat)(constCtx, constOrder, newField, mat);
1419: #ifdef PETSC_USE_BOPT_g
1420:       PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly_constrained", &opt);
1421:       if (opt == PETSC_TRUE) {
1422:         ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
1423:       }
1424: #endif
1425:       /* Put values in global matrix */
1426:       ElementMatSetValues(mat, A, ADD_VALUES);
1427: #ifdef PETSC_USE_BOPT_g
1428: #endif
1429:     }
1430:   }

1432:   /* Assemble matrix */
1433:   MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
1434:   MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);

1436:   /* Reset element matrix and vector */
1437:   mat->reduceRowSize       = locOrder->elemSize;
1438:   mat->reduceColSize       = locOrder->elemSize;
1439:   elemGhostVec->reduceSize = locOrder->elemSize;

1441:   /* Cleanup */
1442:   if (grid->ALEActive == PETSC_TRUE) {
1443:     ElementVecDestroy(ALEVec);
1444:   }

1446:   GridResetConstrainedMultiply_Private(grid, A);
1447:   return(0);
1448: }

1450: static struct _GridOps GOps = {GridSetUp_Triangular_2D,
1451:                                GridSetupBoundary_Triangular_2D,
1452:                                GridSetupConstraints_Triangular_2D,
1453:                                GridSetupGhostScatter_Triangular_2D,
1454:                                PETSC_NULL/* GridSetFromOptions */,
1455:                                PETSC_NULL/* GridDuplicate */,
1456:                                PETSC_NULL/* GridReform */,
1457:                                PETSC_NULL/* GridCopy */,
1458:                                GridDestroy_Triangular_2D,
1459:                                GridView_Triangular_2D,
1460:                                GridGetBoundaryStart_Triangular_2D,
1461:                                GridGetBoundaryNext_Triangular_2D,
1462:                                GridReformMesh_Triangular_2D,
1463:                                GridCreateGMat_Triangular_2D,
1464:                                GridCreateVarOrdering_Triangular_2D,
1465:                                GridCreateLocalVarOrdering_Triangular_2D,
1466:                                GridCreateVarScatter_Triangular_2D,
1467:                                GridVarOrderingConstrain_Triangular_2D,
1468:                                GridCalcElementVecIndices_Triangular_2D,
1469:                                GridCalcElementMatIndices_Triangular_2D,
1470:                                GridCalcBoundaryElementVecIndices_Triangular_2D,
1471:                                GridCalcBoundaryElementMatIndices_Triangular_2D,
1472:                                GridProjectElementVec_Triangular_2D,
1473:                                GVecGetLocalGVec_Triangular_2D,
1474:                                GVecRestoreLocalGVec_Triangular_2D,
1475:                                0,/* GVecGetWorkGVec */
1476:                                0,/* GVecRestoreWorkGVec */
1477:                                GVecGlobalToLocal_Triangular_2D,
1478:                                GVecLocalToGlobal_Triangular_2D,
1479:                                GVecView_Triangular_2D,
1480:                                GridCreateRestriction_Triangular_2D,
1481:                                GVecEvaluateFunction_Triangular_2D,
1482:                                GVecEvaluateFunctionBoundary_Triangular_2D,
1483:                                GVecEvaluateFunctionCollective_Triangular_2D,
1484:                                GVecEvaluateFunctionGalerkin_Triangular_2D,
1485:                                GVecEvaluateFunctionGalerkinCollective_Triangular_2D,
1486:                                GVecEvaluateBoundaryFunctionGalerkin_Triangular_2D,
1487:                                GVecEvaluateBoundaryFunctionGalerkinCollective_Triangular_2D,
1488:                                GVecEvaluateOperatorGalerkin_Triangular_2D,
1489:                                GVecEvaluateNonlinearOperatorGalerkin_Triangular_2D,
1490:                                GVecEvaluateSystemMatrix_Triangular_2D,
1491:                                GVecEvaluateSystemMatrixDiagonal_Triangular_2D,
1492:                                GMatView_Triangular_2D,
1493:                                GMatEvaluateOperatorGalerkin_Triangular_2D,
1494:                                GMatEvaluateALEOperatorGalerkin_Triangular_2D,
1495:                                GMatEvaluateALEConstrainedOperatorGalerkin_Triangular_2D,
1496:                                GMatEvaluateBoundaryOperatorGalerkin_Triangular_2D,
1497:                                GridEvaluateRhs_Triangular_2D,
1498:                                GridEvaluateSystemMatrix_Triangular_2D};

1500: EXTERN_C_BEGIN
1501: int GridCreate_Triangular_2D(Grid grid) {

1505:   PetscMemcpy(grid->ops, &GOps, sizeof(struct _GridOps));
1506:   /* General grid description */
1507:   grid->dim  = 2;
1508:   grid->data = PETSC_NULL;
1509:   return(0);
1510: }
1511: EXTERN_C_END