Actual source code: varorder2d.c

  1: #ifdef PETSC_RCS_HEADER
  2: static char vcid[] = "$Id: varorder2d.c,v 1.8 2000/01/30 18:31:33 huangp Exp $";
  3: #endif

  5: #include "src/grid/gridimpl.h"         /*I "grid.h" I*/
  6: #include "varorder2d.h"

  8: int GridCreateVarOrdering_Triangular_2D(Grid grid, FieldClassMap map, VarOrdering *order) {
  9:   Mesh                  mesh;
 10:   Partition             part;
 11:   PetscConstraintObject constCtx        = grid->constraintCtx;
 12:   int                   numFields       = map->numFields;
 13:   int                  *fields          = map->fields;
 14:   int                   numNodes        = map->numNodes;
 15:   int                   numOverlapNodes = map->numOverlapNodes;
 16:   int                   numGhostNodes   = map->numGhostNodes;
 17:   int                   numClasses      = map->numClasses;
 18:   int                 **fieldClasses    = map->fieldClasses;
 19:   int                  *classes         = map->classes;
 20:   int                  *classSizes      = map->classSizes;
 21:   int                  *localOffsets;
 22:   int                   numNewVars;
 23:   VarOrdering           o;
 24:   /* Ghost variable communication */
 25:   int                  *ghostSendVars;    /* Number of ghost variables on a given processor interior to this domain */
 26:   int                  *sumSendVars;      /* Prefix sums of ghostSendVars */
 27:   int                  *ghostRecvVars;    /* Number of ghost variables on a given processor */
 28:   int                  *sumRecvVars;      /* Prefix sums of ghostRecvVars */
 29:   int                  *displs;           /* Offsets into ghostRecvVars */
 30:   int                   numSendGhostVars; /* The number of ghost variable offsets to send to other processors */
 31:   int                  *sendGhostBuffer;  /* Recv: Global node numbers Send: Offsets of these nodes */
 32:   int                   numProcs, rank;
 33:   int                   proc, f, field, comp, node, locNode, gNode, nclass, var;
 34:   int                   ierr;

 37:   /* Create the ordering */
 38:   PetscHeaderCreate(o, _VarOrdering, int, VAR_ORDER_COOKIE, 0, "VarOrdering", grid->comm, VarOrderingDestroy, 0);
 39:   PetscLogObjectCreate(o);
 40:   PetscObjectCompose((PetscObject) o, "ClassMap", (PetscObject) map);

 42:   /* Allocate memory */
 43:   MPI_Comm_size(grid->comm, &numProcs);
 44:   MPI_Comm_rank(grid->comm, &rank);
 45:   GridGetNumFields(grid, &o->numTotalFields);
 46:   PetscMalloc((numProcs+1)      * sizeof(int),   &o->firstVar);
 47:   PetscMalloc(numOverlapNodes   * sizeof(int),   &o->offsets);
 48:   PetscMalloc(o->numTotalFields * sizeof(int *), &o->localStart);
 49:   PetscLogObjectMemory(o, (numProcs+1 + numOverlapNodes + o->numTotalFields*numClasses)*sizeof(int) + o->numTotalFields*sizeof(int *));
 50:   PetscMemzero(o->localStart, o->numTotalFields * sizeof(int *));
 51:   o->numLocNewVars = 0;
 52:   o->numNewVars    = 0;

 54:   /* Setup domain variable numbering */
 55:   o->offsets[0] = 0;
 56:   for(node = 1; node < numNodes; node++)
 57:     o->offsets[node] = o->offsets[node-1] + classSizes[classes[node-1]];
 58:   o->numLocVars = o->offsets[numNodes-1] + classSizes[classes[numNodes-1]];
 59:   if (map->isConstrained == PETSC_TRUE) {
 60:     (*constCtx->ops->getsize)(constCtx, &o->numLocNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
 61: 
 62:     o->numLocVars += o->numLocNewVars;
 63:   }
 64:   MPI_Allgather(&o->numLocVars, 1, MPI_INT, &o->firstVar[1], 1, MPI_INT, o->comm);
 65:   o->firstVar[0] = 0;
 66:   for(proc = 1; proc <= numProcs; proc++)
 67:     o->firstVar[proc] += o->firstVar[proc-1];
 68:   o->numVars = o->firstVar[numProcs];
 69:   if (map->isConstrained == PETSC_TRUE) {
 70:     (*constCtx->ops->getsize)(constCtx, PETSC_NULL, &o->numNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
 71: 
 72:     MPI_Allreduce(&o->numLocNewVars, &numNewVars, 1, MPI_INT, MPI_SUM, o->comm);
 73:     if (o->numNewVars != numNewVars)
 74:       SETERRQ(PETSC_ERR_PLIB, "Invalid partition of new variables");
 75:   }

 77:   /* Initialize overlap size */
 78:   o->numOverlapVars    = o->numLocVars;
 79:   o->numOverlapNewVars = o->numLocNewVars;

 81:   GridGetMesh(grid, &mesh);
 82:   MeshGetPartition(mesh, &part);
 83:   if (numProcs > 1) {
 84:     /* Map local to global variable numbers */
 85:     for(node = 0; node < numNodes; node++)
 86:       o->offsets[node] += o->firstVar[rank];

 88: #if 0
 89:     GridGhostExchange(o->comm, numGhostNodes, q->ghostNodeProcs, q->ghostNodes, PETSC_INT,
 90:                              q->firstNode, INSERT_VALUES, SCATTER_FORWARD, o->offsets, &o->offsets[numNodes]);
 91: #else
 92:     /* Initialize communication */
 93:     PetscMalloc(numProcs * sizeof(int), &ghostSendVars);
 94:     PetscMalloc(numProcs * sizeof(int), &sumSendVars);
 95:     PetscMalloc(numProcs * sizeof(int), &ghostRecvVars);
 96:     PetscMalloc(numProcs * sizeof(int), &sumRecvVars);
 97:     PetscMalloc(numProcs * sizeof(int), &displs);
 98:     PetscMemzero(ghostSendVars, numProcs * sizeof(int));
 99:     PetscMemzero(sumSendVars,   numProcs * sizeof(int));
100:     PetscMemzero(ghostRecvVars, numProcs * sizeof(int));
101:     PetscMemzero(sumRecvVars,   numProcs * sizeof(int));
102:     PetscMemzero(displs,        numProcs * sizeof(int));

104:     /* Get number of ghost variables to receive from each processor and size of blocks --
105:          we here assume that classes[] already has ghost node classes in it */
106:     for(node = 0; node < numGhostNodes; node++) {
107:       PartitionGhostToGlobalNodeIndex(part, node, &gNode, &proc);
108:       nclass = classes[numNodes+node];
109:       ghostRecvVars[proc]++;
110:       o->numOverlapVars += classSizes[nclass];
111:     }

113:     /* Get number of constrained ghost variables to receive from each processor and size of blocks */
114:     if (map->isConstrained == PETSC_TRUE) {
115:       (*constCtx->ops->getsize)(constCtx, PETSC_NULL, PETSC_NULL, &o->numOverlapNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
116: 
117:     }
118:     o->numOverlapVars += o->numOverlapNewVars - o->numLocNewVars;

120:     /* Get sizes of ghost variable blocks to send to each processor */
121:     MPI_Alltoall(ghostRecvVars, 1, MPI_INT, ghostSendVars, 1, MPI_INT, o->comm);

123:     /* Calculate offets into the ghost variable receive array */
124:     for(proc = 1; proc < numProcs; proc++) {
125:       sumRecvVars[proc] = sumRecvVars[proc-1] + ghostRecvVars[proc-1];
126:       displs[proc]      = sumRecvVars[proc];
127:     }

129:     /* Calculate offsets into the ghost variable send array */
130:     for(proc = 1; proc < numProcs; proc++)
131:       sumSendVars[proc] = sumSendVars[proc-1] + ghostSendVars[proc-1];

133:     /* Send requests for ghost variable offsets to each processor */
134:     numSendGhostVars = sumSendVars[numProcs-1] + ghostSendVars[numProcs-1];
135:     PetscMalloc(numSendGhostVars * sizeof(int), &sendGhostBuffer);
136:     for(node = 0; node < numGhostNodes; node++) {
137:       PartitionGhostToGlobalNodeIndex(part, node, &gNode, &proc);
138:       o->offsets[numNodes+(displs[proc]++)] = gNode;
139:     }
140:     MPI_Alltoallv(&o->offsets[numNodes],  ghostRecvVars, sumRecvVars, MPI_INT,
141:                          sendGhostBuffer,        ghostSendVars, sumSendVars, MPI_INT, o->comm);
142: 

144:     /* Send ghost variables offsets to each processor */
145:     for(node = 0; node < numSendGhostVars; node++) {
146:       PartitionGlobalToLocalNodeIndex(part, sendGhostBuffer[node], &locNode);
147:       sendGhostBuffer[node] = o->offsets[locNode];
148:     }
149:     MPI_Alltoallv(sendGhostBuffer,       ghostSendVars, sumSendVars, MPI_INT,
150:                          &o->offsets[numNodes], ghostRecvVars, sumRecvVars, MPI_INT, o->comm);
151: 

153:     /* Cleanup */
154:     PetscFree(ghostSendVars);
155:     PetscFree(sumSendVars);
156:     PetscFree(ghostRecvVars);
157:     PetscFree(sumRecvVars);
158:     PetscFree(displs);
159:     PetscFree(sendGhostBuffer);
160: #endif

162:     /* We maintain local offsets for ghost variables, meaning the offsets after the last
163:        interior variable, rather than the offset of the given ghost variable in the global
164:        matrix. */
165:     PetscMalloc(numGhostNodes * sizeof(int), &o->localOffsets);
166:     for(node = 0, var = o->numLocVars; node < numGhostNodes; node++) {
167:       o->localOffsets[node] = var;
168:       nclass = classes[numNodes+node];
169:       var   += classSizes[nclass];
170:     }
171:   }

173:   /* Allocate memory */
174:   PetscMalloc(numClasses * sizeof(int), &localOffsets);
175:   PetscMemzero(localOffsets, numClasses * sizeof(int));

177:   /* Setup local field offsets */
178:   for(f = 0; f < numFields; f++) {
179:     field = fields[f];
180:     ierr  = PetscMalloc(numClasses * sizeof(int), &o->localStart[field]);
181:     for(nclass = 0; nclass < numClasses; nclass++) {
182:       if (fieldClasses[f][nclass]) {
183:         GridGetFieldComponents(grid, field, &comp);
184:         o->localStart[field][nclass]  = localOffsets[nclass];
185:         localOffsets[nclass]         += comp;
186:       } else {
187:         o->localStart[field][nclass]  = -1;
188:       }
189:     }
190:   }
191:   *order = o;

193:   /* Cleanup */
194:   PetscFree(localOffsets);

196:   return(0);
197: }

199: int GridVarOrderingConstrain_Triangular_2D(Grid grid, int constField, PetscConstraintObject constCtx,
200:                                            FieldClassMap constMap, VarOrdering oldOrder, VarOrdering *order)
201: {
202:   Mesh          mesh;
203:   Partition     part;
204:   int           numFields          = constMap->numFields;
205:   int          *fields             = constMap->fields;
206:   int           numNodes           = constMap->numNodes;
207:   int           numOverlapNodes    = constMap->numOverlapNodes;
208:   int           numGhostNodes      = constMap->numGhostNodes;
209:   int           numClasses         = constMap->numClasses;
210:   int          *classes            = constMap->classes;
211:   int          *classMap           = constMap->classMap[constMap->mapSize-1];
212:   int         **localStart         = oldOrder->localStart;
213:   int           numClassesOld;
214:   int           comp;
215:   FieldClassMap map;
216:   VarOrdering   o;
217:   /* Ghost variable communication */
218:   int          *ghostSendVars;    /* Number of ghost variables on a given processor interior to this domain */
219:   int          *sumSendVars;      /* Prefix sums of ghostSendVars */
220:   int          *ghostRecvVars;    /* Number of ghost variables on a given processor */
221:   int          *sumRecvVars;      /* Prefix sums of ghostRecvVars */
222:   int          *displs;           /* Offsets into ghostRecvVars */
223:   int           numSendGhostVars; /* The number of ghost variable offsets to send to other processors */
224:   int          *sendGhostBuffer;  /* Recv: Global node numbers Send: Offsets of these nodes */
225:   int           numProcs, rank;
226:   int           proc, f, field, node, locNode, gNode, nclass, i, var;
227:   int           ierr;

230:   /* Create the ordering */
231:   PetscHeaderCreate(o, _VarOrdering, int, VAR_ORDER_COOKIE, 0, "VarOrdering", grid->comm, VarOrderingDestroy, 0);
232:   PetscLogObjectCreate(o);
233:   PetscObjectCompose((PetscObject) o, "ClassMap", (PetscObject) constMap);

235:   /* Allocate memory */
236:   MPI_Comm_size(grid->comm, &numProcs);
237:   MPI_Comm_rank(grid->comm, &rank);
238:   GridGetFieldComponents(grid, constField, &comp);
239:   o->numTotalFields = oldOrder->numTotalFields;
240:   PetscMalloc((numProcs+1)      * sizeof(int),   &o->firstVar);
241:   PetscMalloc(numOverlapNodes   * sizeof(int),   &o->offsets);
242:   PetscMalloc(o->numTotalFields * sizeof(int *), &o->localStart);
243:   PetscLogObjectMemory(o, (numProcs+1 + numOverlapNodes + o->numTotalFields*numClasses) * sizeof(int) +
244:                        o->numTotalFields * sizeof(int *));
245:   PetscMemzero(o->localStart, o->numTotalFields * sizeof(int *));

247:   /* Setup domain variable numbering */
248:   if (numOverlapNodes < numNodes) SETERRQ(PETSC_ERR_PLIB, "Invalid node partition");
249:   o->offsets[0] = 0;
250:   for(node = 1; node < numNodes; node++)
251:     o->offsets[node]    = o->offsets[node-1] + constMap->classSizes[classes[node-1]];
252:   o->numLocVars = o->offsets[numNodes-1] + constMap->classSizes[classes[numNodes-1]];
253:   (*constCtx->ops->getsize)(constCtx, &o->numLocNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
254: 
255:   o->numLocVars        += o->numLocNewVars;
256:   MPI_Allgather(&o->numLocVars, 1, MPI_INT, &o->firstVar[1], 1, MPI_INT, o->comm);
257:   o->firstVar[0] = 0;
258:   for(proc = 1; proc <= numProcs; proc++)
259:     o->firstVar[proc] += o->firstVar[proc-1];
260:   o->numVars = o->firstVar[numProcs];
261: #ifdef PETSC_USE_BOPT_g
262:   (*constCtx->ops->getsize)(constCtx, PETSC_NULL, &o->numNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
263: 
264:   MPI_Allreduce(&o->numLocNewVars, &i, 1, MPI_INT, MPI_SUM, o->comm);
265:   if (o->numNewVars != i) SETERRQ(PETSC_ERR_PLIB, "Invalid partition of new variables");
266: #endif

268:   /* Initialize variable overlap */
269:   o->numOverlapVars    = o->numLocVars;
270:   o->numOverlapNewVars = o->numLocNewVars;

272:   GridGetMesh(grid, &mesh);
273:   MeshGetPartition(mesh, &part);
274:   if (numProcs > 1) {
275:     /* Map local to global variable numbers */
276:     for(node = 0; node < numNodes; node++)
277:       o->offsets[node] += o->firstVar[rank];

279:     /* Initialize communication */
280:     PetscMalloc(numProcs * sizeof(int), &ghostSendVars);
281:     PetscMalloc(numProcs * sizeof(int), &sumSendVars);
282:     PetscMalloc(numProcs * sizeof(int), &ghostRecvVars);
283:     PetscMalloc(numProcs * sizeof(int), &sumRecvVars);
284:     PetscMalloc(numProcs * sizeof(int), &displs);
285:     PetscMemzero(ghostSendVars, numProcs * sizeof(int));
286:     PetscMemzero(sumSendVars,   numProcs * sizeof(int));
287:     PetscMemzero(ghostRecvVars, numProcs * sizeof(int));
288:     PetscMemzero(sumRecvVars,   numProcs * sizeof(int));
289:     PetscMemzero(displs,        numProcs * sizeof(int));

291:     /* Get number of ghost variables to receive from each processor and size of blocks --
292:          we here assume that classes[] already has ghost node classes in it */
293:     for(node = 0; node < numGhostNodes; node++) {
294:       PartitionGhostToGlobalNodeIndex(part, node, &gNode, &proc);
295:       nclass = classes[numNodes+node];
296:       ghostRecvVars[proc]++;
297:       o->numOverlapVars += constMap->classSizes[nclass];
298:     }

300:     /* Get number of constrained ghost variables to receive from each processor and size of blocks */
301:     (*constCtx->ops->getsize)(constCtx, PETSC_NULL, PETSC_NULL, &o->numOverlapNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
302: 
303:     o->numOverlapVars += o->numOverlapNewVars - o->numLocNewVars;

305:     /* Get sizes of ghost variable blocks to send to each processor */
306:     MPI_Alltoall(ghostRecvVars, 1, MPI_INT, ghostSendVars, 1, MPI_INT, o->comm);

308:     /* Calculate offets into the ghost variable receive array */
309:     for(proc = 1; proc < numProcs; proc++) {
310:       sumRecvVars[proc] = sumRecvVars[proc-1] + ghostRecvVars[proc-1];
311:       displs[proc]      = sumRecvVars[proc];
312:     }

314:     /* Calculate offsets into the ghost variable send array */
315:     for(proc = 1; proc < numProcs; proc++)
316:       sumSendVars[proc] = sumSendVars[proc-1] + ghostSendVars[proc-1];

318:     /* Send requests for ghost variable offsets to each processor */
319:     numSendGhostVars = sumSendVars[numProcs-1] + ghostSendVars[numProcs-1];
320:     PetscMalloc(numSendGhostVars * sizeof(int), &sendGhostBuffer);
321:     for(node = 0; node < numGhostNodes; node++) {
322:       PartitionGhostToGlobalNodeIndex(part, node, &gNode, &proc);
323:       o->offsets[numNodes+(displs[proc]++)] = gNode;
324:     }
325:     MPI_Alltoallv(&o->offsets[numNodes],  ghostRecvVars, sumRecvVars, MPI_INT,
326:                          sendGhostBuffer,        ghostSendVars, sumSendVars, MPI_INT, o->comm);
327: 

329:     /* Send ghost variables offsets to each processor */
330:     for(node = 0; node < numSendGhostVars; node++) {
331:       PartitionGlobalToLocalNodeIndex(part, sendGhostBuffer[node], &locNode);
332:       sendGhostBuffer[node] = o->offsets[locNode];
333:     }
334:     MPI_Alltoallv(sendGhostBuffer,       ghostSendVars, sumSendVars, MPI_INT,
335:                          &o->offsets[numNodes], ghostRecvVars, sumRecvVars, MPI_INT, o->comm);
336: 

338:     /* Cleanup */
339:     PetscFree(ghostSendVars);
340:     PetscFree(sumSendVars);
341:     PetscFree(ghostRecvVars);
342:     PetscFree(sumRecvVars);
343:     PetscFree(displs);
344:     PetscFree(sendGhostBuffer);

346:     /* We maintain local offsets for ghost variables, meaning the offsets after the last
347:        interior variable, rather than the offset of the given ghost variable in the global
348:        matrix. */
349:     PetscMalloc(numGhostNodes * sizeof(int), &o->localOffsets);
350:     for(node = 0, var = o->numLocVars; node < numGhostNodes; node++) {
351:       o->localOffsets[node] = var;
352:       nclass = classes[numNodes+node];
353:       var   += constMap->classSizes[nclass];
354:     }
355:   }

357:   /* Setup local field offsets */
358:   VarOrderingGetClassMap(oldOrder, &map);
359:   numClassesOld = map->numClasses;
360:   for(f = 0; f < numFields; f++) {
361:     field = fields[f];
362:     ierr  = PetscMalloc(numClasses * sizeof(int), &o->localStart[field]);
363:     for(nclass = 0; nclass < numClassesOld; nclass++) {
364:       o->localStart[field][nclass] = localStart[field][nclass];
365:     }
366:     for(i = numClassesOld; i < numClasses; i++) {
367:       /* Invert the class map */
368:       for(nclass = 0; nclass < numClassesOld; nclass++) {
369:         if (classMap[nclass] == i) break;
370:       }
371:       if (nclass >= numClassesOld) SETERRQ(PETSC_ERR_PLIB, "Invalid class map");

373:       /* Subtract out the constrained fields */
374:       o->localStart[field][i]    = localStart[field][nclass];
375:       if (localStart[field][nclass] > localStart[constField][nclass])
376:         o->localStart[field][i] -= comp;
377:     }
378:   }

380:   *order = o;
381:   return(0);
382: }

384: int GridCreateVarScatter_Triangular_2D(Grid grid, VarOrdering order, GVec vec, VarOrdering embedOrder, GVec embedVec,
385:                                        VecScatter *scatter)
386: {
387:   PetscConstraintObject constCtx = grid->constraintCtx;
388:   FieldClassMap         map, embedMap;
389:   int                   numEmbedFields, numNodes;
390:   int                  *embedFields, *classes, *embedClassSizes;
391:   int                   numLocVars, numEmbedLocVars, numNewLocVars;
392:   int                  *offsets, **localStart;
393:   IS                    is, embedIS;
394:   int                  *indices;
395:   PetscTruth            isConstrained;
396:   int                   node, nclass, f, field, comp, var, count;
397:   int                   ierr;

400:   /* Retrieve orderings */
401:   VarOrderingGetClassMap(order,      &map);
402:   VarOrderingGetClassMap(embedOrder, &embedMap);
403:   numNodes        = map->numNodes;
404:   classes         = map->classes;
405:   numLocVars      = order->numLocVars;
406:   offsets         = order->offsets;
407:   localStart      = order->localStart;
408:   numEmbedFields  = embedMap->numFields;
409:   embedFields     = embedMap->fields;
410:   embedClassSizes = embedMap->classSizes;
411:   numEmbedLocVars = embedOrder->numLocVars;

413:   PetscMalloc(numEmbedLocVars * sizeof(int), &indices);
414:   for(node = 0, count = 0; node < numNodes; node++) {
415:     nclass = classes[node];
416:     if (embedClassSizes[nclass] > 0) {
417:       for(f = 0; f < numEmbedFields; f++) {
418:         field = embedFields[f];
419:         if (localStart[field][nclass] >= 0) {
420:           GridGetFieldComponents(grid, field, &comp);
421:           for(var = 0; var < comp; var++)
422:             indices[count++] = offsets[node] + localStart[field][nclass] + var;
423:         }
424:       }
425:     }
426:   }
427:   /* Handle new variables */
428:   GridIsConstrained(grid, &isConstrained);
429:   if ((isConstrained == PETSC_TRUE) && (count < numEmbedLocVars)) {
430:     (*constCtx->ops->getsize)(constCtx, &numNewLocVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
431: 
432:     if (count != numEmbedLocVars - numNewLocVars) SETERRQ(PETSC_ERR_PLIB, "Inconsistent variable orderings");
433:     /* Fill in the embed variables */
434:     for(var = numNewLocVars; var > 0; var--)
435:       indices[count++] = numLocVars - var;
436:   }
437:   if (count != numEmbedLocVars) SETERRQ(PETSC_ERR_PLIB, "Inconsistent variable orderings");

439:   /* Create mappings */
440:   ISCreateGeneral(order->comm, numEmbedLocVars, indices, &is);
441:   ISCreateStride(order->comm, numEmbedLocVars, 0, 1, &embedIS);

443:   /* Create the scatter */
444:   VecScatterCreate(vec, is, embedVec, embedIS, scatter);

446:   /* Cleanup */
447:   ISDestroy(is);
448:   ISDestroy(embedIS);
449:   PetscFree(indices);

451:   return(0);
452: }

454: int GridCreateLocalVarOrdering_Triangular_2D(Grid grid, int numFields, int *fields, LocalVarOrdering *order)
455: {
456:   LocalVarOrdering o;
457:   int              elemOffset;
458:   int              f, field;
459:   int              ierr;

462:   /* Create the ordering */
463:   PetscHeaderCreate(o, _LocalVarOrdering, int, VAR_ORDER_COOKIE, 0, "LocalVarOrdering", grid->comm, LocalVarOrderingDestroy, 0);
464:   PetscLogObjectCreate(o);

466:   /* Allocate memory */
467:   o->numFields = numFields;
468:   PetscMalloc(numFields    * sizeof(int), &o->fields);
469:   PetscMalloc(grid->numFields * sizeof(int), &o->elemStart);
470:   PetscLogObjectMemory(o, (numFields + grid->numFields) * sizeof(int));
471:   PetscMemcpy(o->fields, fields, numFields * sizeof(int));

473:   /* Put in sentinel values */
474:   for(f = 0; f < grid->numFields; f++) {
475:     o->elemStart[f] = -1;
476:   }

478:   /* Setup local and global offsets offsets */
479:   for(f = 0, elemOffset = 0; f < numFields; f++) {
480:     field               = fields[f];
481:     o->elemStart[field] = elemOffset;
482:     elemOffset         += grid->fields[field].disc->size;
483:   }
484:   o->elemSize = elemOffset;
485:   *order = o;

487:   return(0);
488: }