Actual source code: part2d.c

  1: #ifdef PETSC_RCS_HEADER
  2: static char vcid[] = "$Id: part2d.c,v 1.14 2000/01/31 17:40:21 knepley Exp $";
  3: #endif

  5: #include "src/mesh/impls/triangular/2d/2dimpl.h"         /*I "mesh.h" I*/
  6: #ifdef PETSC_HAVE_PARMETIS
  7: EXTERN_C_BEGIN
  8: #include "parmetis.h"
  9: EXTERN_C_END
 10: #endif
 11: #include "part2d.h"

 13: static int PartitionView_Triangular_2D_File(Partition p, PetscViewer viewer) {
 14:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
 15:   FILE                    *fd;
 16:   int                      numLocElements = p->numLocElements;
 17:   int                      numLocNodes    = q->numLocNodes;
 18:   int                      numLocEdges    = q->numLocEdges;
 19:   int                      i;
 20:   int                      ierr;

 23:   PetscViewerASCIIPrintf(viewer, "Partition Object:n");
 24:   PetscViewerASCIIPrintf(viewer, "  Partition of triangular 2D grid with %d elements and %d nodesn", p->numElements, q->numNodes);
 25:   PetscViewerASCIIGetPointer(viewer, &fd);
 26:   PetscSynchronizedFPrintf(p->comm, fd, "    Proc %d: %d elements %d nodes %d edgesn",
 27:                            p->rank, numLocElements, numLocNodes, numLocEdges);
 28:   PetscSynchronizedFlush(p->comm);
 29:   if (p->ordering != PETSC_NULL) {
 30:     PetscViewerASCIIPrintf(viewer, "  Global element renumbering:n");
 31:     AOView(p->ordering, viewer);
 32:   }
 33:   if (q->nodeOrdering != PETSC_NULL) {
 34:     PetscViewerASCIIPrintf(viewer, "  Global node renumbering:n");
 35:     AOView(q->nodeOrdering, viewer);
 36:   }
 37:   PetscSynchronizedFPrintf(p->comm, fd, "  %d ghost elements on proc %dn", p->numOverlapElements - numLocElements, p->rank);
 38:   for(i = 0; i < p->numOverlapElements - numLocElements; i++)
 39:     PetscSynchronizedFPrintf(p->comm, fd, "  %d %d %dn", i, p->ghostElements[i], p->ghostElementProcs[i]);
 40:   PetscSynchronizedFlush(p->comm);
 41:   PetscSynchronizedFPrintf(p->comm, fd, "  %d ghost nodes on proc %dn", q->numOverlapNodes - numLocNodes, p->rank);
 42:   for(i = 0; i < q->numOverlapNodes - numLocNodes; i++)
 43:     PetscSynchronizedFPrintf(p->comm, fd, "  %d %dn", i, q->ghostNodes[i]);
 44:   PetscSynchronizedFlush(p->comm);

 46:   return(0);
 47: }

 49: static int PartitionView_Triangular_2D_Draw(Partition p, PetscViewer v) {
 51:   return(0);
 52: }

 54: static int PartitionView_Triangular_2D(Partition p, PetscViewer viewer) {
 55:   PetscTruth isascii, isdraw;
 56:   int        ierr;

 59:   PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_ASCII, &isascii);
 60:   PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_DRAW,  &isdraw);
 61:   if (isascii == PETSC_TRUE) {
 62:     PartitionView_Triangular_2D_File(p, viewer);
 63:   } else if (isdraw == PETSC_TRUE) {
 64:     PartitionView_Triangular_2D_Draw(p, viewer);
 65:   }
 66:   return(0);
 67: }

 69: static int PartitionViewFromOptions_Private(Partition part, char *title) {
 70:   PetscViewer viewer;
 71:   PetscDraw   draw;
 72:   PetscTruth  opt;
 73:   int         ierr;

 76:   PetscOptionsHasName(part->prefix, "-part_view", &opt);
 77:   if (opt == PETSC_TRUE) {
 78:     PartitionView(part, PETSC_NULL);
 79:   }
 80:   PetscOptionsHasName(part->prefix, "-part_view_draw", &opt);
 81:   if (opt == PETSC_TRUE) {
 82:     PetscViewerDrawOpen(part->comm, 0, 0, 0, 0, 300, 300, &viewer);
 83:     PetscViewerDrawGetDraw(viewer, 0, &draw);
 84:     if (title != PETSC_NULL) {
 85:       PetscDrawSetTitle(draw, title);
 86:     }
 87:     PartitionView(part, viewer);
 88:     PetscViewerFlush(viewer);
 89:     PetscDrawPause(draw);
 90:     PetscViewerDestroy(viewer);
 91:   }
 92:   return(0);
 93: }

 95: static int PartitionDestroy_Triangular_2D(Partition p) {
 96:   Partition_Triangular_2D *s = (Partition_Triangular_2D *) p->data;
 97:   int                      ierr;

100:   PetscFree(p->firstElement);
101:   if (p->ordering != PETSC_NULL)
102:     {AODestroy(p->ordering);                                                                      }
103:   if (p->ghostElements != PETSC_NULL)
104:     {PetscFree(p->ghostElements);                                                                 }
105:   if (p->ghostElementProcs != PETSC_NULL)
106:     {PetscFree(p->ghostElementProcs);                                                             }
107:   PetscFree(s->firstNode);
108:   PetscFree(s->firstBdNode);
109:   if (s->nodeOrdering != PETSC_NULL)
110:     {AODestroy(s->nodeOrdering);                                                                  }
111:   if (s->ghostNodes != PETSC_NULL)
112:     {PetscFree(s->ghostNodes);                                                                    }
113:   if (s->ghostNodeProcs != PETSC_NULL)
114:     {PetscFree(s->ghostNodeProcs);                                                                }
115:   if (s->ghostBdNodes != PETSC_NULL)
116:     {PetscFree(s->ghostBdNodes);                                                                  }
117:   PetscFree(s->firstEdge);
118:   if (s->edgeOrdering != PETSC_NULL)
119:     {AODestroy(s->edgeOrdering);                                                                  }
120:   PetscFree(s);

122:   return(0);
123: }

125: static int PartitionGhostNodeExchange_Triangular_2D(Partition part, InsertMode addv, ScatterMode mode, int *locVars, int *ghostVars) {
126:   Partition_Triangular_2D *q    = (Partition_Triangular_2D *) part->data;
127:   Mesh                     mesh = part->mesh;
128:   int                      ierr;

131:   PetscGhostExchange(part->comm, q->numOverlapNodes - mesh->numNodes, q->ghostNodeProcs, q->ghostNodes, PETSC_INT,
132:                             q->firstNode, addv, mode, locVars, ghostVars);
133: 
134:   return(0);
135: }

137: static int PartitionGetTotalNodes_Triangular_2D(Partition p, int *size) {
138:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

141:   *size = q->numNodes;
142:   return(0);
143: }

145: static int PartitionGetStartNode_Triangular_2D(Partition p, int *node) {
146:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

149:   *node = q->firstNode[p->rank];
150:   return(0);
151: }

153: static int PartitionGetEndNode_Triangular_2D(Partition p, int *node) {
154:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

157:   *node = q->firstNode[p->rank+1];
158:   return(0);
159: }

161: static int PartitionGetNumNodes_Triangular_2D(Partition p, int *size) {
162:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

165:   *size = q->numLocNodes;
166:   return(0);
167: }

169: static int PartitionGetNumOverlapNodes_Triangular_2D(Partition p, int *size) {
170:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

173:   *size = q->numOverlapNodes;
174:   return(0);
175: }

177: int PartitionGhostNodeIndex_Private(Partition p, int node, int *gNode) {
178:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
179:   int                      low, high, mid;

182:   /* Use bisection since the array is assumed to be sorted */
183:   low  = 0;
184:   high = q->numOverlapNodes - (q->firstNode[p->rank+1] - q->firstNode[p->rank]) - 1;
185:   while (low <= high) {
186:     mid = (low + high)/2;
187:     if (node == q->ghostNodes[mid]) {
188:       *gNode = mid;
189:       return(0);
190:     } else if (node < q->ghostNodes[mid]) {
191:       high = mid - 1;
192:     } else {
193:       low  = mid + 1;
194:     }
195:   }
196:   *gNode = -1;
197:   /* Flag for ghost node not present */
198:   PetscFunctionReturn(1);
199: }

201: static int PartitionGlobalToLocalNodeIndex_Triangular_2D(Partition p, int node, int *locNode) {
202:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
203:   int                      numLocNodes = q->numLocNodes;
204:   int                      gNode; /* Local ghost node number */
205:   int                      ierr;

208:   if (node < 0) {
209:     *locNode = node;
210:     return(0);
211:   }
212:   /* Check for ghost node */
213:   if ((node < q->firstNode[p->rank]) || (node >= q->firstNode[p->rank+1])) {
214:     /* Search for canonical number */
215:     PartitionGhostNodeIndex_Private(p, node, &gNode);
216:     *locNode = numLocNodes + gNode;
217:   } else {
218:     *locNode = node - q->firstNode[p->rank];
219:   }
220:   return(0);
221: }

223: static int PartitionLocalToGlobalNodeIndex_Triangular_2D(Partition p, int locNode, int *node) {
224:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
225:   int                      numLocNodes = q->numLocNodes;

228:   if (locNode < 0) {
229:     *node = locNode;
230:     return(0);
231:   }
232:   /* Check for ghost node */
233:   if (locNode >= numLocNodes) {
234:     *node = q->ghostNodes[locNode - numLocNodes];
235:   } else {
236:     *node = locNode + q->firstNode[p->rank];
237:   }
238:   return(0);
239: }

241: static int PartitionGlobalToGhostNodeIndex_Triangular_2D(Partition p, int node, int *ghostNode, int *ghostProc) {
242:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
243:   int                      ierr;

246:   if (node < 0) {
247:     *ghostNode = node;
248:     *ghostProc = -1;
249:     return(0);
250:   }
251:   /* Check for ghost node */
252:   if ((node < q->firstNode[p->rank]) || (node >= q->firstNode[p->rank+1])) {
253:     PartitionGhostNodeIndex_Private(p, node, ghostNode);
254:     *ghostProc = q->ghostNodeProcs[*ghostNode];
255:   } else {
256:     SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE, "Global node %d is not a ghost node", node);
257:   }
258:   return(0);
259: }

261: static int PartitionGhostToGlobalNodeIndex_Triangular_2D(Partition p, int ghostNode, int *node, int *ghostProc) {
262:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
263:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;

266:   if (ghostNode < 0) {
267:     *node      = ghostNode;
268:     *ghostProc = -1;
269:     return(0);
270:   }
271:   /* Check for ghost node */
272:   if (ghostNode < numGhostNodes) {
273:     *node      = q->ghostNodes[ghostNode];
274:     *ghostProc = q->ghostNodeProcs[ghostNode];
275:   } else {
276:     SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE, "Ghost node %d does not exist", ghostNode);
277:   }
278:   return(0);
279: }

281: static int PartitionGetNodeOrdering_Triangular_2D(Partition p, AO *order) {
282:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

285:   *order = q->nodeOrdering;
286:   return(0);
287: }

289: static int PartitionGetTotalEdges_Triangular_2D(Partition p, int *size) {
290:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

293:   *size = q->numEdges;
294:   return(0);
295: }

297: static int PartitionGetStartEdge_Triangular_2D(Partition p, int *edge) {
298:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

301:   *edge = q->firstEdge[p->rank];
302:   return(0);
303: }

305: static int PartitionGetEndEdge_Triangular_2D(Partition p, int *edge) {
306:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

309:   *edge = q->firstEdge[p->rank+1];
310:   return(0);
311: }

313: static int PartitionGetNumEdges_Triangular_2D(Partition p, int *size) {
314:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

317:   *size = q->numLocEdges;
318:   return(0);
319: }

321: static int PartitionGetNumOverlapEdges_Triangular_2D(Partition p, int *size) {
322:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

325:   /* We do not maintain ghost edges */
326:   *size = q->numLocEdges;
327:   return(0);
328: }

330: static int PartitionGetEdgeOrdering_Triangular_2D(Partition p, AO *order) {
331:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

334:   *order = q->edgeOrdering;
335:   return(0);
336: }

338: static struct _PartitionOps POps = {/* Generic Operations */
339:                                     PETSC_NULL/* PartitionSetup */,
340:                                     PETSC_NULL/* PartitionSetFromOptions */,
341:                                     PartitionView_Triangular_2D,
342:                                     PETSC_NULL/* PartitionCopy */,
343:                                     PETSC_NULL/* PartitionDuplicate */,
344:                                     PartitionDestroy_Triangular_2D,
345:                                     /* Partition-Specific Operations */
346:                                     PartitionGhostNodeExchange_Triangular_2D,
347:                                     /* Node Query Functions */
348:                                     PartitionGetTotalNodes_Triangular_2D,
349:                                     PartitionGetStartNode_Triangular_2D,
350:                                     PartitionGetEndNode_Triangular_2D,
351:                                     PartitionGetNumNodes_Triangular_2D,
352:                                     PartitionGetNumOverlapNodes_Triangular_2D,
353:                                     PartitionGlobalToLocalNodeIndex_Triangular_2D,
354:                                     PartitionLocalToGlobalNodeIndex_Triangular_2D,
355:                                     PartitionGlobalToGhostNodeIndex_Triangular_2D,
356:                                     PartitionGhostToGlobalNodeIndex_Triangular_2D,
357:                                     PartitionGetNodeOrdering_Triangular_2D,
358:                                     /* Face Query Functions */
359:                                     PartitionGetTotalElements,
360:                                     PartitionGetStartElement,
361:                                     PartitionGetEndElement,
362:                                     PartitionGetNumElements,
363:                                     PartitionGetNumOverlapElements,
364:                                     PartitionGlobalToLocalElementIndex,
365:                                     PartitionLocalToGlobalElementIndex,
366:                                     PartitionGetElementOrdering,
367:                                     /* Edge Query Functions */
368:                                     PartitionGetTotalEdges_Triangular_2D,
369:                                     PartitionGetStartEdge_Triangular_2D,
370:                                     PartitionGetEndEdge_Triangular_2D,
371:                                     PartitionGetNumEdges_Triangular_2D,
372:                                     PartitionGetNumOverlapEdges_Triangular_2D,
373:                                     PETSC_NULL/* PartitionGlobalToLocalEdgeIndex */,
374:                                     PETSC_NULL/* PartitionLocalToGlobalEdgeIndex */,
375:                                     PartitionGetEdgeOrdering_Triangular_2D};

377: int PartitionCalcCut_Private(Partition p, int *cut)
378: {
379:   Mesh_Triangular *tri            = (Mesh_Triangular *) p->mesh->data;
380:   int              numLocElements = p->numLocElements;
381:   int             *neighbors      = tri->neighbors;
382:   int              locCut;        /* The number of edges of the dual crossing the partition from this domain */
383:   int              elem, neighbor;
384:   int              ierr;

387:   for(elem = 0, locCut = 0; elem < numLocElements; elem++) {
388:     for(neighbor = 0; neighbor < 3; neighbor++) {
389:       if (neighbors[elem*3+neighbor] >= numLocElements)
390:         locCut++;
391:     }
392:   }
393:   ierr  = MPI_Allreduce(&locCut, cut, 1, MPI_INT, MPI_SUM, p->comm);
394:   *cut /= 2;
395:   return(0);
396: }

398: int PartitionDebugAO_Private(Partition p, int *nodeProcs)
399: {
400:   Mesh                     mesh        = p->mesh;
401:   Mesh_Triangular         *tri         = (Mesh_Triangular *) mesh->data;
402:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
403:   int                      numCorners  = mesh->numCorners;
404:   int                      numElements = mesh->numFaces;
405:   int                     *elements    = tri->faces;
406:   int                      numNodes    = q->numNodes;
407:   int                      numProcs    = p->numProcs;
408:   int                      rank        = p->rank;
409:   int                     *support;
410:   int                     *temp;
411:   int                      proc, nProc, elem, nElem, sElem, corner, nCorner, node, degree, index;
412:   int                      ierr;

415:   PetscMalloc(numProcs * sizeof(int), &temp);
416:   for(node = 0; node < numNodes; node++) {
417:     PetscSynchronizedPrintf(p->comm, " %d", nodeProcs[node]);
418:     PetscSynchronizedFlush(p->comm);
419:     PetscPrintf(p->comm, "n");
420:     MPI_Allgather(&nodeProcs[node], 1, MPI_INT, temp, 1, MPI_INT, p->comm);
421:     for(proc = 0, index = 0; proc < numProcs; proc++) {
422:       if (temp[proc] == proc) index++;
423:     }

425:     /* If a node is not scheduled for a unique domain */
426:     if (index != 1) {
427:       for(elem = 0; elem < numElements; elem++) {
428:         for(corner = 0; corner < numCorners; corner++) {
429:           /* Locate an element containing the node */
430:           if (node != elements[elem*numCorners+corner])
431:             continue;
432: 
433:           /* Check the support of the node */
434:           PetscPrintf(PETSC_COMM_SELF, "[%d]elem: %d corner: %d node: %dn", rank, elem, corner, node);
435:           MeshGetNodeSupport(mesh, node, elem, &degree, &support);
436:           for(sElem = 0; sElem < degree; sElem++) {
437:             nElem = support[sElem];
438:             PetscPrintf(PETSC_COMM_SELF, "[%d]support[%d] = %dn", rank, sElem, nElem);
439:             /* See if neighbor is in another domain */
440:             if (nElem >= numElements) {
441:               /* Check to see if node is contained in the neighboring element */
442:               for(nCorner = 0; nCorner < numCorners; nCorner++)
443:                 if (elements[nElem*numCorners+nCorner] == node) {
444:                   nProc = p->ghostElementProcs[nElem-numElements];
445:                   PetscPrintf(PETSC_COMM_SELF, "[%d]Found in corner %d proc: %dn", rank, nCorner, nProc);
446:                   break;
447:                 }
448:             }
449:           }
450:           MeshRestoreNodeSupport(mesh, node, elem, &degree, &support);
451:           if (nodeProcs[node] < 0)
452:             nodeProcs[node] = rank;
453:           PetscPrintf(PETSC_COMM_SELF, "[%d]nodeProcs[%d]: %dn", rank, node, nodeProcs[node]);
454:         }
455:       }
456:     }
457:   }
458:   PetscFree(temp);
459:   PetscBarrier((PetscObject) p);
460:   PetscFunctionReturn(1);
461: }

463: /*
464:   PartitionSortGhosts_Private - This function sorts the ghost array and
465:   removes any duplicates.

467:   Input Parameters:
468: . p          - The Partition
469: . numGhosts  - The number of ghosts
470: . ghostArray - The ghost indices

472:   Output Paramters:
473: . numGhosts  - The new size of the ghost array
474: . ghostArray - The sorted ghost indices

476: .seealso:
477: */
478: int PartitionSortGhosts_Private(Partition p, int *numGhosts, int *ghostArray, int **ghostPerm)
479: {
480:   int *perm, *temp;
481:   int  size;
482:   int  ghost, newGhost;
483:   int  ierr;

486:   size = *numGhosts;
487:   PetscMalloc(size * sizeof(int), &perm);
488:   PetscMalloc(size * sizeof(int), &temp);

490:   /* Sort ghosts */
491:   for(ghost = 0; ghost < size; ghost++) perm[ghost] = ghost;
492:   PetscSortIntWithPermutation(size, ghostArray, perm);

494:   /* Permute ghosts and eliminates duplicates */
495:   for(ghost = 0, newGhost = 0; ghost < size; ghost++) {
496:     if ((newGhost == 0) || (temp[newGhost-1] != ghostArray[perm[ghost]])) {
497:       /* Keep ghost */
498:       temp[newGhost++] = ghostArray[perm[ghost]];
499:     } else {
500:       /* Eliminate redundant ghost */
501:       PetscMemmove(&perm[ghost], &perm[ghost+1], (size - (ghost+1)) * sizeof(int));
502:       ghost--;
503:       size--;
504:     }
505:   }
506:   for(ghost = 0; ghost < size; ghost++) {
507:     ghostArray[ghost] = temp[ghost];
508:   }
509:   PetscFree(temp);

511:   *numGhosts = size;
512:   *ghostPerm = perm;
513:   return(0);
514: }

516: int PartitionGetNewGhostNodes_Serial(Partition p, int *newProcNodes, int *newNodes)
517: {
518:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
519:   int                      numLocNodes   = q->numLocNodes;
520:   int                      numGhostNodes = q->numOverlapNodes - numLocNodes;
521:   int                      numProcs      = p->numProcs;
522:   int                     *nodePerm;        /* The new permutation for the sorted ghost nodes */
523:   int                      numNewNodes;    /* Total number of new ghost nodes to add */
524:   int                     *temp;
525:   int                      proc, node, i;
526:   int                      ierr;

529:   for(proc = 0, numNewNodes = 0; proc < numProcs; proc++)
530:     numNewNodes += newProcNodes[proc];

532:   /* Add in new ghost nodes */
533:   if (numNewNodes > 0) {
534:     PetscMalloc((numGhostNodes + numNewNodes) * sizeof(int), &temp);
535:     PetscMemcpy(temp, q->ghostNodes, numGhostNodes * sizeof(int));
536:     for(node = 0; node < numNewNodes; node++) {
537:       temp[numGhostNodes+node] = newNodes[node];
538:     }
539:     if (q->ghostNodes != PETSC_NULL) {
540:       PetscFree(q->ghostNodes);
541:     }
542:     q->ghostNodes = temp;

544:     PetscMalloc((numGhostNodes + numNewNodes) * sizeof(int), &temp);
545:     PetscMemcpy(temp, q->ghostNodeProcs, numGhostNodes * sizeof(int));
546:     for(proc = 0, node = 0; proc < numProcs; proc++) {
547:       for(i = 0; i < newProcNodes[proc]; i++)
548:         temp[numGhostNodes+(node++)] = proc;
549:     }
550:     if (q->ghostNodeProcs != PETSC_NULL) {
551:       PetscFree(q->ghostNodeProcs);
552:     }
553:     q->ghostNodeProcs = temp;

555:     /* Resort ghost nodes and remove duplicates */
556:     numGhostNodes += numNewNodes;
557:     PartitionSortGhosts_Private(p, &numGhostNodes, q->ghostNodes, &nodePerm);
558:     q->numOverlapNodes = numLocNodes + numGhostNodes;
559:     PetscMalloc(numGhostNodes * sizeof(int), &temp);
560:     for(node = 0; node < numGhostNodes; node++) {
561:       temp[node] = q->ghostNodeProcs[nodePerm[node]];
562:     }
563:     PetscFree(q->ghostNodeProcs);
564:     q->ghostNodeProcs = temp;
565:     PetscFree(nodePerm);
566:   }
567: #ifdef PETSC_USE_BOPT_g
568:   /* Consistency check for ghost nodes */
569:   for(node = 0; node < numGhostNodes; node++) {
570:     if ((q->ghostNodes[node] <  q->firstNode[q->ghostNodeProcs[node]]) ||
571:         (q->ghostNodes[node] >= q->firstNode[q->ghostNodeProcs[node]+1])) {
572:       SETERRQ(PETSC_ERR_PLIB, "Invalid ghost node source processor");
573:     }
574:   }
575: #endif
576:   return(0);
577: }

579: int PartitionGetNewGhostNodes_Parallel(Partition p, int *sendGhostNodes, int *sendNodes, int *recvGhostNodes, int *recvNodes)
580: {
581:   Partition_Triangular_2D *q               = (Partition_Triangular_2D *) p->data;
582:   Mesh                     mesh            = p->mesh;
583:   Mesh_Triangular         *tri             = (Mesh_Triangular *) mesh->data;
584:   int                      numLocNodes     = q->numLocNodes;
585:   int                     *firstNode       = q->firstNode;
586:   double                  *nodes           = tri->nodes;
587:   int                     *markers         = tri->markers;
588:   int                     *degrees         = tri->degrees;
589:   int                      numProcs        = p->numProcs;
590:   int                      rank            = p->rank;
591:   int                      numGhostNodes;
592:   int                     *nodePerm;        /* The new permutation for the sorted ghost nodes */
593:   int                      numSendNodes;    /* Total number of new ghost nodes to receive */
594:   int                      numRecvNodes;    /* Total number of new ghost nodes to send */
595:   int                     *sumSendNodes;    /* Prefix sums of sendGhostNodes */
596:   int                     *sumRecvNodes;    /* Prefix sums of recvGhostNodes */
597:   int                     *sendGhostCoords; /* Number of new ghost nodes needed from a given processor */
598:   int                     *recvGhostCoords; /* Number of new ghost nodes needed by a given processor */
599:   int                     *sumSendCoords;   /* Prefix sums of sendGhostCoords */
600:   int                     *sumRecvCoords;   /* Prefix sums of recvGhostCoords */
601:   double                  *sendCoords;      /* Coordinates of ghost nodes for other domains */
602:   double                  *recvCoords;      /* Coordinates of ghost nodes for this domains */
603:   int                     *sendMarkers;     /* Markers of ghost nodes for other domains */
604:   int                     *recvMarkers;     /* Markers of ghost nodes for this domains */
605:   int                     *sendDegrees;     /* Degrees of ghost nodes for other domains */
606:   int                     *recvDegrees;     /* Degrees of ghost nodes for this domains */
607:   int                     *offsets;         /* Offsets into the send array for each destination proc */
608:   int                     *temp;
609:   double                  *temp2;
610:   int                      proc, node, locNode, i;
611:   int                      ierr;

614:   PetscMalloc(numProcs * sizeof(int), &sumSendNodes);
615:   PetscMalloc(numProcs * sizeof(int), &sumRecvNodes);
616:   PetscMalloc(numProcs * sizeof(int), &sendGhostCoords);
617:   PetscMalloc(numProcs * sizeof(int), &recvGhostCoords);
618:   PetscMalloc(numProcs * sizeof(int), &sumSendCoords);
619:   PetscMalloc(numProcs * sizeof(int), &sumRecvCoords);
620:   PetscMalloc(numProcs * sizeof(int), &offsets);
621:   PetscMemzero(sumSendNodes, numProcs * sizeof(int));
622:   PetscMemzero(sumRecvNodes, numProcs * sizeof(int));
623:   PetscMemzero(offsets,      numProcs * sizeof(int));

625:   /* Compute new ghost node offsets */
626:   for(proc = 1; proc < numProcs; proc++) {
627:     sumSendNodes[proc] = sumSendNodes[proc-1] + sendGhostNodes[proc-1];
628:     sumRecvNodes[proc] = sumRecvNodes[proc-1] + recvGhostNodes[proc-1];
629:   }
630:   numSendNodes = sumSendNodes[numProcs-1] + sendGhostNodes[numProcs-1];
631:   numRecvNodes = sumRecvNodes[numProcs-1] + recvGhostNodes[numProcs-1];

633:   /* Get numbers of ghost nodes to provide */
634:   MPI_Alltoallv(sendNodes, sendGhostNodes, sumSendNodes, MPI_INT,
635:                        recvNodes, recvGhostNodes, sumRecvNodes, MPI_INT, p->comm);
636: 

638:   /* Get node coordinates, markers, and degrees */
639:   for(proc = 0; proc < numProcs; proc++) {
640:     sendGhostCoords[proc] = sendGhostNodes[proc]*2;
641:     recvGhostCoords[proc] = recvGhostNodes[proc]*2;
642:     sumSendCoords[proc]   = sumSendNodes[proc]*2;
643:     sumRecvCoords[proc]   = sumRecvNodes[proc]*2;
644:   }
645:   if (numSendNodes) {
646:     PetscMalloc(numSendNodes*2 * sizeof(double), &recvCoords);
647:     PetscMalloc(numSendNodes   * sizeof(int),    &recvMarkers);
648:     PetscMalloc(numSendNodes   * sizeof(int),    &recvDegrees);
649:   }
650:   if (numRecvNodes) {
651:     PetscMalloc(numRecvNodes*2 * sizeof(double), &sendCoords);
652:     PetscMalloc(numRecvNodes   * sizeof(int),    &sendMarkers);
653:     PetscMalloc(numRecvNodes   * sizeof(int),    &sendDegrees);
654:     for(node = 0; node < numRecvNodes; node++) {
655:       locNode = recvNodes[node] - firstNode[rank];
656: #ifdef PETSC_USE_BOPT_g
657:       if ((locNode < 0) || (locNode >= numLocNodes)) {
658:         SETERRQ2(PETSC_ERR_PLIB, "Invalid ghost node %d should be in [0,%d)", locNode, numLocNodes);
659:       }
660: #endif
661:       for(i = 0; i < 2; i++)
662:         sendCoords[node*2+i] = nodes[locNode*2+i];
663:       sendMarkers[node] = markers[locNode];
664:       sendDegrees[node] = degrees[locNode];
665:     }
666:   }

668:   /* Communicate node coordinates and markers and degrees */
669:   MPI_Alltoallv(sendCoords,  recvGhostCoords, sumRecvCoords, MPI_DOUBLE,
670:                        recvCoords,  sendGhostCoords, sumSendCoords, MPI_DOUBLE, p->comm);
671: 
672:   MPI_Alltoallv(sendMarkers, recvGhostNodes,  sumRecvNodes,  MPI_INT,
673:                        recvMarkers, sendGhostNodes,  sumSendNodes,  MPI_INT, p->comm);
674: 
675:   MPI_Alltoallv(sendDegrees, recvGhostNodes,  sumRecvNodes,  MPI_INT,
676:                        recvDegrees, sendGhostNodes,  sumSendNodes,  MPI_INT, p->comm);
677: 

679:   /* Add in new ghost nodes */
680:   numGhostNodes = q->numOverlapNodes - numLocNodes;
681:   if (numSendNodes > 0) {
682:     PetscMalloc((numGhostNodes + numSendNodes) * sizeof(int), &temp);
683:     PetscMemcpy(temp, q->ghostNodes, numGhostNodes * sizeof(int));
684:     for(node = 0; node < numSendNodes; node++)
685:       temp[numGhostNodes+node] = sendNodes[node];
686:     if (q->ghostNodes != PETSC_NULL) {
687:       PetscFree(q->ghostNodes);
688:     }
689:     q->ghostNodes = temp;

691:     PetscMalloc((numGhostNodes + numSendNodes) * sizeof(int), &temp);
692:     PetscMemcpy(temp, q->ghostNodeProcs, numGhostNodes * sizeof(int));
693:     for(proc = 0, node = 0; proc < numProcs; proc++) {
694:       for(i = 0; i < sendGhostNodes[proc]; i++)
695:         temp[numGhostNodes+(node++)] = proc;
696:     }
697:     if (q->ghostNodeProcs != PETSC_NULL) {
698:       PetscFree(q->ghostNodeProcs);
699:     }
700:     q->ghostNodeProcs = temp;

702:     PetscMalloc((q->numOverlapNodes + numSendNodes)*2 * sizeof(double), &temp2);
703:     PetscMemcpy(temp2, nodes, q->numOverlapNodes*2 * sizeof(double));
704:     for(node = 0; node < numSendNodes*2; node++)
705:       temp2[q->numOverlapNodes*2+node] = recvCoords[node];
706:     PetscFree(nodes);
707:     tri->nodes = temp2;

709:     PetscMalloc((q->numOverlapNodes + numSendNodes) * sizeof(int), &temp);
710:     PetscMemcpy(temp, markers, q->numOverlapNodes * sizeof(int));
711:     for(node = 0; node < numSendNodes; node++)
712:       temp[q->numOverlapNodes+node] = recvMarkers[node];
713:     PetscFree(markers);
714:     tri->markers = temp;

716:     PetscMalloc((q->numOverlapNodes + numSendNodes) * sizeof(int), &temp);
717:     PetscMemcpy(temp, degrees, q->numOverlapNodes * sizeof(int));
718:     for(node = 0; node < numSendNodes; node++)
719:       temp[q->numOverlapNodes+node] = recvDegrees[node];
720:     PetscFree(degrees);
721:     tri->degrees = temp;
722:   }

724:   /* Resort ghost nodes and remove duplicates */
725:   numGhostNodes     += numSendNodes;
726:   PartitionSortGhosts_Private(p, &numGhostNodes, q->ghostNodes, &nodePerm);
727:   q->numOverlapNodes = numLocNodes + numGhostNodes;

729:   PetscMalloc(numGhostNodes * sizeof(int), &temp);
730:   for(node = 0; node < numGhostNodes; node++)
731:     temp[node] = q->ghostNodeProcs[nodePerm[node]];
732:   for(node = 0; node < numGhostNodes; node++)
733:     q->ghostNodeProcs[node] = temp[node];

735:   for(node = 0; node < numGhostNodes; node++)
736:     temp[node] = tri->markers[mesh->numNodes+nodePerm[node]];
737:   for(node = 0; node < numGhostNodes; node++)
738:     tri->markers[mesh->numNodes+node] = temp[node];

740:   for(node = 0; node < numGhostNodes; node++)
741:     temp[node] = tri->degrees[mesh->numNodes+nodePerm[node]];
742:   for(node = 0; node < numGhostNodes; node++)
743:     tri->degrees[mesh->numNodes+node] = temp[node];
744:   PetscFree(temp);

746:   PetscMalloc(numGhostNodes*2 * sizeof(double), &temp2);
747:   for(node = 0; node < numGhostNodes; node++) {
748:     temp2[node*2]   = tri->nodes[(mesh->numNodes+nodePerm[node])*2];
749:     temp2[node*2+1] = tri->nodes[(mesh->numNodes+nodePerm[node])*2+1];
750:   }
751:   for(node = 0; node < numGhostNodes; node++) {
752:     tri->nodes[(mesh->numNodes+node)*2]   = temp2[node*2];
753:     tri->nodes[(mesh->numNodes+node)*2+1] = temp2[node*2+1];
754:   }
755:   PetscFree(temp2);
756:   PetscFree(nodePerm);

758: #ifdef PETSC_USE_BOPT_g
759:   /* Consistency check for ghost nodes */
760:   for(node = 0; node < numGhostNodes; node++) {
761:     if ((q->ghostNodes[node] <  firstNode[q->ghostNodeProcs[node]]) ||
762:         (q->ghostNodes[node] >= firstNode[q->ghostNodeProcs[node]+1])) {
763:       SETERRQ(PETSC_ERR_PLIB, "Invalid ghost node source processor");
764:     }
765:   }
766: #endif

768:   /* Cleanup */
769:   PetscFree(sumSendNodes);
770:   PetscFree(sendGhostCoords);
771:   PetscFree(sumSendCoords);
772:   PetscFree(offsets);
773:   if (numSendNodes) {
774:     PetscFree(recvCoords);
775:     PetscFree(recvMarkers);
776:     PetscFree(recvDegrees);
777:   }
778:   PetscFree(sumRecvNodes);
779:   PetscFree(recvGhostCoords);
780:   PetscFree(sumRecvCoords);
781:   if (numRecvNodes) {
782:     PetscFree(sendCoords);
783:     PetscFree(sendMarkers);
784:     PetscFree(sendDegrees);
785:   }
786:   return(0);
787: }

789: int PartitionGetNewGhostElements_Serial(Partition p, int *newProcElements, int *newElements)
790: {
791:   int  numLocElements   = p->numLocElements;
792:   int  numGhostElements = p->numOverlapElements - numLocElements;
793:   int  numProcs         = p->numProcs;
794:   int *elemPerm;        /* The new permutation for the sorted ghost elements */
795:   int  numNewElements;  /* Total number of new ghost elements to add */
796:   int *temp;
797:   int  proc, elem, i;
798:   int  ierr;

801:   for(proc = 0, numNewElements = 0; proc < numProcs; proc++)
802:     numNewElements += newProcElements[proc];

804:   /* Add in new ghost elements */
805:   if (numNewElements > 0) {
806:     PetscMalloc((numGhostElements + numNewElements) * sizeof(int), &temp);
807:     PetscMemcpy(temp, p->ghostElements, numGhostElements * sizeof(int));
808:     for(elem = 0; elem < numNewElements; elem++)
809:       temp[numGhostElements+elem] = newElements[elem];
810:     if (p->ghostElements != PETSC_NULL) {
811:       PetscFree(p->ghostElements);
812:     }
813:     p->ghostElements = temp;

815:     PetscMalloc((numGhostElements + numNewElements) * sizeof(int), &temp);
816:     PetscMemcpy(temp, p->ghostElementProcs, numGhostElements * sizeof(int));
817:     for(proc = 0, elem = 0; proc < numProcs; proc++) {
818:       for(i = 0; i < newProcElements[proc]; i++)
819:         temp[numGhostElements+(elem++)] = proc;
820:     }
821:     if (p->ghostElementProcs != PETSC_NULL) {
822:       PetscFree(p->ghostElementProcs);
823:     }
824:     p->ghostElementProcs = temp;

826:     /* Resort ghost elements and remove duplicates */
827:     numGhostElements += numNewElements;
828:     PartitionSortGhosts_Private(p, &numGhostElements, p->ghostElements, &elemPerm);
829:     p->numOverlapElements = numLocElements + numGhostElements;
830:     PetscMalloc(numGhostElements * sizeof(int), &temp);
831:     for(elem = 0; elem < numGhostElements; elem++)
832:       temp[elem] = p->ghostElementProcs[elemPerm[elem]];
833:     PetscFree(p->ghostElementProcs);
834:     p->ghostElementProcs = temp;
835:     PetscFree(elemPerm);
836:   }
837: #ifdef PETSC_USE_BOPT_g
838:   /* Consistency check for ghost elements */
839:   for(elem = 0; elem < numGhostElements; elem++) {
840:     if ((p->ghostElements[elem] <  p->firstElement[p->ghostElementProcs[elem]]) ||
841:         (p->ghostElements[elem] >= p->firstElement[p->ghostElementProcs[elem]+1])) {
842:       SETERRQ(PETSC_ERR_PLIB, "Invalid ghost element source processor");
843:     }
844:   }
845: #endif
846:   return(0);
847: }

849: /*
850:   PartitionCreateElementMap_METIS - This function creates a map from elements to domains,
851:   using METIS to minimize the cut and approximately balance the sizes.

853:   Input Parameters:
854: + p           - The Partition
855: - numElements - The local number of elements

857:   Output Parameter:
858: . elementMap  - The map from nodes to domains

860: .seealso: PartitionNodes_Private()
861: */
862: int PartitionCreateElementMap_METIS(Partition p, int numElements, int **elementMap)
863: {
864: #ifdef PETSC_HAVE_PARMETIS
865:   Mesh mesh = p->mesh;
866:   int       *elemProcs;     /* The processor assigned to each element */
867:   int       *elemOffsets;   /* The offsets into elemNeighbors of each element row for dual in CSR format */
868:   int       *elemNeighbors; /* The list of element neighbors for dual in CSR format */
869:   int       *edgeWeights;   /* The list of edge weights for dual in CSR format */
870:   int        weight;        /* A weight for constrained nodes */
871:   int        options[5];    /* The option flags for METIS */
872:   PetscTruth opt;
873:   int        ierr;

876:   /* Create the dual graph in distributed CSR format */
877:   weight = 0;
878:   ierr   = PetscOptionsGetInt(mesh->prefix, "-mesh_partition_weight", &weight, &opt);
879:   ierr   = MeshCreateDualCSR(mesh, &elemOffsets, &elemNeighbors, &edgeWeights, weight);

881:   /* Partition graph */
882:   if (numElements != p->numLocElements) {
883:     SETERRQ2(PETSC_ERR_ARG_WRONG, "Incorrect input size %d for ParMETIS, should be %d", numElements, p->numLocElements);
884:   }
885:   PetscMalloc(numElements * sizeof(int), &elemProcs);
886:   options[0] = 0;   /* Returns the edge cut */
887:   options[1] = 150; /* The folding factor, 0 = no folding */
888:   options[2] = 1;   /* Serial initial partition */
889:   options[3] = 0;   /* C style numbering */
890:   options[4] = 0;   /* No timing information */
891:   PARKMETIS(p->firstElement, elemOffsets, PETSC_NULL, elemNeighbors, PETSC_NULL, elemProcs, options, p->comm);

893:   /* Destroy dual graph */
894:   MeshDestroyDualCSR(mesh, elemOffsets, elemNeighbors, edgeWeights);

896:   *elementMap = elemProcs;
897:   return(0);
898: #else
899:   SETERRQ(PETSC_ERR_SUP, "You must obtain George Karypis' ParMETIS software")
900: #endif
901: }

903: /*
904:   PartitionCreateElementMap_NodeBased - This function creates a map from elements to domains,
905:   using a previous partition of the nodes.

907:   Input Parameters:
908: + p           - The Partition
909: - numElements - The global number of nodes

911:   Output Parameter:
912: . elementMap  - The map from nodes to domains

914: .seealso: PartitionNodes_Private()
915: */
916: int PartitionCreateElementMap_NodeBased(Partition p, int numElements, int **elementMap)
917: {
918:   Partition_Triangular_2D *q            = (Partition_Triangular_2D *) p->data;
919:   Mesh                     mesh         = p->mesh;
920:   Mesh_Triangular         *tri          = (Mesh_Triangular *) mesh->data;
921:   int                      numCorners   = mesh->numCorners;
922:   int                     *elements     = tri->faces;
923:   int                     *firstElement = p->firstElement;
924:   int                     *firstNode    = q->firstNode;
925:   int                      numProcs     = p->numProcs;
926:   int                      rank         = p->rank;
927:   int                      startElement = firstElement[rank];
928:   int                      endElement   = firstElement[rank+1];
929:   int                     *elemProcs;     /* The processor assigned to each element */
930:   int                      proc, elem, corner, node;
931:   int                      ierr;

934:   if (numElements != p->numLocElements) {
935:     SETERRQ2(PETSC_ERR_ARG_WRONG, "Incorrect input size %d should be %d", numElements, p->numLocElements);
936:   }
937:   /* Count elements on this partition -- keep element if you are the lower numbered domain */
938:   PetscMalloc(numElements * sizeof(int), &elemProcs);
939:   for(elem = 0; elem < numElements; elem++) {
940:     elemProcs[elem] = -1;
941:   }

943:   for(elem = startElement; elem < endElement; elem++) {
944:     for(corner = 0; corner < numCorners; corner++) {
945:       node = elements[elem*numCorners+corner];
946:       if ((node < firstNode[rank]) || (node >= firstNode[rank+1])) {
947:         /* Get the domain of the node */
948:         for(proc = 0; proc < numProcs; proc++) {
949:           if ((node >= firstNode[proc]) && (node < firstNode[proc+1])) break;
950:         }
951:         if ((elemProcs[elem-startElement] < 0) || (proc < elemProcs[elem-startElement]))
952:           elemProcs[elem-startElement] = proc;
953:       }
954:     }
955:     /* If no one else claims it, take the element */
956:     if (elemProcs[elem-startElement] < 0) {
957:       elemProcs[elem-startElement] = rank;
958:     }
959:   }

961:   *elementMap = elemProcs;
962:   return(0);
963: }

965: /*
966:   PartitionCreateElementPartition_Private - This function uses the element map to create
967:   partition structures for element-based data.

969:   Input Parameters:
970: + p              - The Partition
971: - elementMap     - The map from elements to domains

973:   Output Parameters:
974: . ordering       - The new element ordering

976:   Output Parameters in Partition_Triangular_2D:
977: + numLocElements - The number of local elements
978: - firstElement   - The first elemnt in each domain

980: .seealso: PartitionElements_Private()
981: */
982: int PartitionCreateElementPartition_Private(Partition p, int *elementMap, AO *ordering)
983: {
984:   int              numLocElements = p->numLocElements; /* Number of local elements before partitioning */
985:   int             *firstElement   = p->firstElement;
986:   int              numProcs       = p->numProcs;
987:   int              rank           = p->rank;
988:   int             *partSendElements;     /* The number of elements sent to each processor for partitioning */
989:   int             *sumSendElements;      /* The prefix sums of partSendElements */
990:   int             *partRecvElements;     /* The number of elements received from each processor for partitioning */
991:   int             *sumRecvElements;      /* The prefix sums of partRecvElements */
992:   int             *offsets;              /* The offsets into the send and receive arrays */
993:   int             *sendBuffer;
994:   int             *AppOrdering, *PetscOrdering;
995:   int              proc, elem;
996:   int              ierr;

999:   /* Initialize communication */
1000:   PetscMalloc(numProcs * sizeof(int), &partSendElements);
1001:   PetscMalloc(numProcs * sizeof(int), &sumSendElements);
1002:   PetscMalloc(numProcs * sizeof(int), &partRecvElements);
1003:   PetscMalloc(numProcs * sizeof(int), &sumRecvElements);
1004:   PetscMalloc(numProcs * sizeof(int), &offsets);
1005:   PetscMemzero(partSendElements,  numProcs * sizeof(int));
1006:   PetscMemzero(sumSendElements,   numProcs * sizeof(int));
1007:   PetscMemzero(partRecvElements,  numProcs * sizeof(int));
1008:   PetscMemzero(sumRecvElements,   numProcs * sizeof(int));
1009:   PetscMemzero(offsets,           numProcs * sizeof(int));

1011:   /* Get sizes of interior element number blocks to send to each processor */
1012:   for(elem = 0; elem < numLocElements; elem++) {
1013:     partSendElements[elementMap[elem]]++;
1014:   }

1016:   /* Get sizes of interior element number blocks to receive from each processor */
1017:   MPI_Alltoall(partSendElements, 1, MPI_INT, partRecvElements, 1, MPI_INT, p->comm);

1019:   /* Calculate offsets into the interior element number send array */
1020:   for(proc = 1; proc < numProcs; proc++) {
1021:     sumSendElements[proc] = sumSendElements[proc-1] + partSendElements[proc-1];
1022:     offsets[proc]         = sumSendElements[proc];
1023:   }

1025:   /* Calculate offsets into the interior element number receive array */
1026:   for(proc = 1; proc < numProcs; proc++) {
1027:     sumRecvElements[proc] = sumRecvElements[proc-1] + partRecvElements[proc-1];
1028:   }

1030:   /* Send interior element numbers to each processor -- could prevent copying elements already there I think */
1031:   p->numLocElements = sumRecvElements[numProcs-1] + partRecvElements[numProcs-1];
1032:   PetscMalloc(numLocElements    * sizeof(int), &sendBuffer);
1033:   PetscMalloc(p->numLocElements * sizeof(int), &AppOrdering);
1034:   PetscMalloc(p->numLocElements * sizeof(int), &PetscOrdering);
1035:   for(elem = 0; elem < numLocElements; elem++) {
1036:     sendBuffer[offsets[elementMap[elem]]++] = elem + firstElement[rank];
1037:   }
1038:   MPI_Alltoallv(sendBuffer,  partSendElements, sumSendElements, MPI_INT,
1039:                        AppOrdering, partRecvElements, sumRecvElements, MPI_INT, p->comm);
1040: 

1042:   /* If the mesh was intially distributed, we would need to send the elements themselves here */

1044:   /* Recompute size and offset of each domain */
1045:   MPI_Allgather(&p->numLocElements, 1, MPI_INT, &firstElement[1], 1, MPI_INT, p->comm);
1046:   for(proc = 1, firstElement[0] = 0; proc <= numProcs; proc++) {
1047:     firstElement[proc] = firstElement[proc] + firstElement[proc-1];
1048:   }

1050:   /* Create the global element reordering */
1051:   for(elem = 0; elem < p->numLocElements; elem++) {
1052:     /* This would be the time to do RCM on the local graph by reordering PetscOrdering[] */
1053:     PetscOrdering[elem] = elem + firstElement[rank];
1054:   }

1056:   /* Cleanup */
1057:   PetscFree(partSendElements);
1058:   PetscFree(sumSendElements);
1059:   PetscFree(partRecvElements);
1060:   PetscFree(sumRecvElements);
1061:   PetscFree(offsets);
1062:   PetscFree(sendBuffer);

1064:   /* Create the global element reordering */
1065:   AOCreateBasic(p->comm, p->numLocElements, AppOrdering, PetscOrdering, ordering);
1066:   PetscLogObjectParent(p, p->ordering);

1068:   PetscFree(AppOrdering);
1069:   PetscFree(PetscOrdering);
1070:   return(0);
1071: }

1073: /*
1074:   PartitionPermuteElements_Private - This function permutes the data which is implicitly
1075:   indexed by element number

1077:   Input Parameter:
1078: . p         - The Partition

1080:   Output Parameter in Mesh_Triangular:
1081: + faces     - The nodes on each element
1082: - neighbors - The neighbors of each element

1084: .seealso: PartitionElements_Private()
1085: */
1086: int PartitionPermuteElements_Private(Partition p)
1087: {
1088:   Mesh             mesh = p->mesh;
1089:   Mesh_Triangular *tri  = (Mesh_Triangular *) mesh->data;
1090:   int              ierr;

1093:   AOApplicationToPetscPermuteInt(p->ordering, mesh->numCorners, tri->faces);
1094:   AOApplicationToPetscPermuteInt(p->ordering, 3,                tri->neighbors);
1095:   return(0);
1096: }

1098: /*
1099:   PartitionRenumberElements_Private - This function renumbers the element-based data globally in
1100:   order to make the canonical numbers sequential in each domain.

1102:   Input Parameter:
1103: . p         - The Partition

1105:   Output Parameter in Mesh_Triangular:
1106: . neighbors - The neighbors of each element

1108: .seealso: PartitionElements_Private()
1109: */
1110: int PartitionRenumberElements_Private(Partition p)
1111: {
1112:   Mesh_Triangular         *tri         = (Mesh_Triangular *) p->mesh->data;
1113:   int                      numElements = p->numElements;
1114:   int                     *neighbors   = tri->neighbors;
1115:   int                      ierr;

1118:   AOApplicationToPetsc(p->ordering, numElements*3, neighbors);
1119:   return(0);
1120: }

1122: /*
1123:   PartitionCalcGhostElements_Private - This function calculates the ghost elements.

1125:   Input Parameters:
1126: . p         - The Partition

1128:   Output Parameters in Partition_Triangular_2D:

1130: .seealso: PartitionElements_Private()
1131: */
1132: int PartitionCalcGhostElements_Private(Partition p)
1133: {
1134:   Partition_Triangular_2D *q            = (Partition_Triangular_2D *) p->data;
1135:   Mesh                     mesh         = p->mesh;
1136:   Mesh_Triangular         *tri          = (Mesh_Triangular *) mesh->data;
1137:   int                      numCorners   = mesh->numCorners;
1138:   int                      numElements  = p->numElements;
1139:   int                     *elements     = tri->faces;
1140:   int                     *neighbors    = tri->neighbors;
1141:   int                     *firstElement = p->firstElement;
1142:   int                      numProcs     = p->numProcs;
1143:   int                      rank         = p->rank;
1144:   int                      numLocNodes  = q->numLocNodes;
1145:   int                      startNode    = q->firstNode[rank];
1146:   PetscTruth               isNodePart   = q->isNodePartitioned;
1147:   int                     *newProcElements; /* The number of new ghost elements from each processor */
1148:   int                      numNewElements;  /* The number of new ghost elements */
1149:   int                     *newElements;     /* The new ghost elements */
1150:   int                     *offsets;         /* The offsets into the send and receive arrays */
1151:   int                      degree;          /* The degree of a vertex */
1152:   int                     *support;         /* The list of elements in the support of a basis function */
1153:   int                     *elemMap;
1154:   int                      proc, elem, bElem, sElem, nElem, corner, neighbor, node;
1155:   int                      ierr;

1158:   PetscMalloc(numElements  * sizeof(int), &elemMap);
1159:   PetscMalloc(numProcs     * sizeof(int), &newProcElements);
1160:   PetscMalloc((numProcs+1) * sizeof(int), &offsets);
1161:   PetscMemzero(newProcElements,  numProcs     * sizeof(int));
1162:   PetscMemzero(offsets,          (numProcs+1) * sizeof(int));
1163:   for(elem = 0; elem < numElements; elem++) {
1164:     elemMap[elem] = -1;
1165:   }
1166:   for(elem = 0; elem < numElements; elem++) {
1167:     if ((elem >= firstElement[rank]) && (elem < firstElement[rank+1])) {
1168:       /* Find a boundary element */
1169:       for(neighbor = 0; neighbor < 3; neighbor++) {
1170:         bElem = neighbors[elem*3+neighbor];
1171:         if ((bElem >= 0) && ((bElem < firstElement[rank]) || (bElem >= firstElement[rank+1])))
1172:           break;
1173:       }

1175:       if (neighbor < 3) {
1176:         /* Check the support of each vertex for off-processor elements */
1177:         for(corner = 0; corner < numCorners; corner++) {
1178:           node = elements[elem*numCorners+corner];
1179:           MeshGetNodeSupport(mesh, node, elem, &degree, &support);
1180:           for(sElem = 0; sElem < degree; sElem++) {
1181:             nElem = support[sElem];
1182:             if (elemMap[nElem] >= 0) continue;
1183:             for(proc = 0; proc < numProcs; proc++) {
1184:               if ((proc != rank) && (nElem >= firstElement[proc]) && (nElem < firstElement[proc+1])) {
1185:                 elemMap[nElem] = proc;
1186:                 break;
1187:               }
1188:             }
1189:           }
1190:           MeshRestoreNodeSupport(mesh, node, elem, &degree, &support);
1191:         }
1192:       }
1193:     } else if (isNodePart == PETSC_TRUE) {
1194:       if (elemMap[elem] >= 0) continue;
1195:       /* We may also need elements on which we have nodes, but are not attached to */
1196:       for(corner = 0; corner < numCorners; corner++) {
1197:         node = elements[elem*numCorners+corner] - startNode;
1198:         if ((node >= 0) && (node < numLocNodes)) {
1199:           for(proc = 0; proc < numProcs; proc++) {
1200:             if ((elem >= firstElement[proc]) && (elem < firstElement[proc+1])) {
1201:               elemMap[elem] = proc;
1202:               break;
1203:             }
1204:           }
1205:         }
1206:       }
1207:     }
1208:   }

1210:   /* Compute new ghost element offsets */
1211:   for(elem = 0; elem < numElements; elem++) {
1212:     if (elemMap[elem] >= 0) {
1213:       newProcElements[elemMap[elem]]++;
1214:     }
1215:   }
1216:   for(proc = 0, numNewElements = 0; proc < numProcs; proc++) {
1217:     numNewElements  += newProcElements[proc];
1218:     offsets[proc+1]  = offsets[proc] + newProcElements[proc];
1219:   }

1221:   /* Get ghost nodes */
1222:   if (numNewElements > 0) {
1223:     PetscMalloc(numNewElements * sizeof(int), &newElements);
1224:     for(elem = 0; elem < numElements; elem++) {
1225:       if (elemMap[elem] >= 0) {
1226:         newElements[offsets[elemMap[elem]]++] = elem;
1227:       }
1228:     }
1229:   }
1230:   for(proc = 1; proc < numProcs-1; proc++) {
1231:     if (offsets[proc] - offsets[proc-1]  != newProcElements[proc]) {
1232:       SETERRQ3(PETSC_ERR_PLIB, "Invalid number of ghost elements sent %d to proc %d should be %d",
1233:                offsets[proc] - offsets[proc-1], proc, newProcElements[proc]);
1234:     }
1235:   }
1236:   if (offsets[0] != newProcElements[0]) {
1237:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of ghost elements sent %d to proc 0 should be %d",
1238:              offsets[0], newProcElements[0]);
1239:   }

1241:   /* Add new ghosts */
1242:   p->numOverlapElements = p->numLocElements;
1243:   PartitionGetNewGhostElements_Serial(p, newProcElements, newElements);

1245:   /* Cleanup */
1246:   PetscFree(elemMap);
1247:   PetscFree(newProcElements);
1248:   PetscFree(offsets);
1249:   if (numNewElements > 0) {
1250:     PetscFree(newElements);
1251:   }
1252:   return(0);
1253: }

1255: /*
1256:   PartitionDistributeElements_Private - This function distributes the element-based data, and
1257:   permutes arrays which are implicitly indexed by element number.

1259:   Input Parameters:
1260: . p         - The Partition

1262:   Output Parameters in Mesh_Triangular:
1263: + faces     - The nodes on each element
1264: - neighbors - The element neighbors

1266: .seealso: PartitionElements_Private()
1267: */
1268: int PartitionDistributeElements_Private(Partition p)
1269: {
1270:   Mesh             mesh               = p->mesh;
1271:   Mesh_Triangular *tri                = (Mesh_Triangular *) mesh->data;
1272:   int              numLocElements     = p->numLocElements;
1273:   int              numOverlapElements = p->numOverlapElements;
1274:   int              numGhostElements   = numOverlapElements - numLocElements;
1275:   int              numCorners         = mesh->numCorners;
1276:   int             *firstElement       = p->firstElement;
1277:   int             *ghostElements      = p->ghostElements;
1278:   int              rank               = p->rank;
1279:   int             *temp;
1280:   int              elem, corner, neighbor;
1281:   int              ierr;

1283:   /* Note here that we can use PetscMemcpy() for the interior variables because we already permuted the
1284:      arrays so that ghost elements could be computed.
1285:   */
1287:   mesh->numFaces = numLocElements;
1288:   PetscMalloc(numOverlapElements*numCorners * sizeof(int), &temp);
1289:   /* Interior faces */
1290:   PetscMemcpy(temp, &tri->faces[firstElement[rank]*numCorners], numLocElements*numCorners * sizeof(int));
1291:   /* Ghost faces */
1292:   for(elem = 0; elem < numGhostElements; elem++) {
1293:     for(corner = 0; corner < numCorners; corner++) {
1294:       temp[(numLocElements+elem)*numCorners+corner] = tri->faces[ghostElements[elem]*numCorners+corner];
1295:     }
1296:   }
1297:   PetscFree(tri->faces);
1298:   tri->faces = temp;
1299:   PetscLogObjectMemory(p, numGhostElements*numCorners * sizeof(int));

1301:   PetscMalloc(numOverlapElements*3 * sizeof(int), &temp);
1302:   /* Interior neighbors */
1303:   PetscMemcpy(temp, &tri->neighbors[firstElement[rank]*3], numLocElements*3 * sizeof(int));
1304:   /* Ghost neighbors */
1305:   for(elem = 0; elem < numGhostElements; elem++) {
1306:     for(neighbor = 0; neighbor < 3; neighbor++) {
1307:       temp[(numLocElements+elem)*3+neighbor] = tri->neighbors[ghostElements[elem]*3+neighbor];
1308:     }
1309:   }
1310:   PetscFree(tri->neighbors);
1311:   tri->neighbors = temp;
1312:   PetscLogObjectMemory(p, numGhostElements*3 * sizeof(int));

1314:   return(0);
1315: }

1317: int PartitionElementGlobalToLocal_Private(Partition p)
1318: {
1319:   Mesh_Triangular *tri                = (Mesh_Triangular *) p->mesh->data;
1320:   int              numOverlapElements = p->numOverlapElements;
1321:   int             *neighbors          = tri->neighbors;
1322:   int              neighbor;
1323:   int              ierr;

1326:   /* We indicate neighbors which are not interior or ghost by -2 since boundaries are -1 */
1327:   for(neighbor = 0; neighbor < numOverlapElements*3; neighbor++) {
1328:     PartitionGlobalToLocalElementIndex(p, neighbors[neighbor], &neighbors[neighbor]);
1329:   }
1330:   return(0);
1331: }

1333: int PartitionGetNewGhostNodes_Element(Partition p)
1334: {
1335:   Partition_Triangular_2D *q                = (Partition_Triangular_2D *) p->data;
1336:   Mesh                     mesh             = p->mesh;
1337:   Mesh_Triangular         *tri              = (Mesh_Triangular *) mesh->data;
1338:   int                      numCorners       = mesh->numCorners;
1339:   int                     *elements         = tri->faces;
1340:   int                     *firstElement     = p->firstElement;
1341:   int                      numGhostElements = p->numOverlapElements - p->numLocElements;
1342:   int                     *ghostElements    = p->ghostElements;
1343:   int                      numNodes         = q->numNodes;
1344:   int                     *firstNode        = q->firstNode;
1345:   int                      numProcs         = p->numProcs;
1346:   int                      rank             = p->rank;
1347:   int                     *newProcNodes; /* Number of new ghost nodes needed from a given processor */
1348:   int                      numNewNodes;  /* Total number of new ghost nodes to receive */
1349:   int                     *newNodes;     /* New ghost nodes for this domain */
1350:   int                     *offsets;      /* The offsets into newNodes[] */
1351:   int                     *nodeMap;      /* The map of nodes to processors */
1352:   int                      proc, elem, corner, node, gNode;
1353:   int                      ierr;

1356:   if (q->isNodePartitioned == PETSC_FALSE)
1357:     return(0);
1358:   /* Initialize communication */
1359:   PetscMalloc(numProcs     * sizeof(int), &newProcNodes);
1360:   PetscMalloc((numProcs+1) * sizeof(int), &offsets);
1361:   PetscMalloc(numNodes     * sizeof(int), &nodeMap);
1362:   PetscMemzero(newProcNodes, numProcs      * sizeof(int));
1363:   PetscMemzero(offsets,     (numProcs+1)   * sizeof(int));
1364:   for(node = 0; node < numNodes; node++) {
1365:     nodeMap[node] = -1;
1366:   }

1368:   /* Check for new ghost nodes */
1369:   for(elem = firstElement[rank]; elem < firstElement[rank+1]; elem++) {
1370:     for(corner = 0; corner < numCorners; corner++) {
1371:       node = elements[elem*numCorners+corner];
1372:       if (nodeMap[node] >= 0) continue;

1374:       if ((node < firstNode[rank]) || (node >= firstNode[rank+1])) {
1375:         /* Get the domain of the node */
1376:         for(proc = 0; proc < numProcs; proc++) {
1377:           if ((node >= firstNode[proc]) && (node < firstNode[proc+1])) break;
1378:         }
1379:         /* Check for the presence of this node */
1380:         if (PartitionGhostNodeIndex_Private(p, node, &gNode) && ((nodeMap[node] < 0) || (proc < nodeMap[node]))) {
1381:           nodeMap[node] = proc;
1382:         }
1383:       }
1384:     }
1385:   }
1386:   for(elem = 0; elem < numGhostElements; elem++) {
1387:     for(corner = 0; corner < numCorners; corner++) {
1388:       node = elements[ghostElements[elem]*numCorners+corner];
1389:       if (nodeMap[node] >= 0) continue;

1391:       if ((node < firstNode[rank]) || (node >= firstNode[rank+1])) {
1392:         /* Get the domain of the node */
1393:         for(proc = 0; proc < numProcs; proc++) {
1394:           if ((node >= firstNode[proc]) && (node < firstNode[proc+1])) break;
1395:         }
1396:         /* Check for the presence of this node */
1397:         if (PartitionGhostNodeIndex_Private(p, node, &gNode) && ((nodeMap[node] < 0) || (proc < nodeMap[node]))) {
1398:           nodeMap[node] = proc;
1399:         }
1400:       }
1401:     }
1402:   }

1404:   /* Compute new ghost node offsets */
1405:   for(node = 0; node < numNodes; node++) {
1406:     if (nodeMap[node] >= 0) {
1407:       newProcNodes[nodeMap[node]]++;
1408:     }
1409:   }
1410:   for(proc = 0, numNewNodes = 0; proc < numProcs; proc++) {
1411:     numNewNodes   += newProcNodes[proc];
1412:     offsets[proc+1] = offsets[proc] + newProcNodes[proc];
1413:   }

1415:   /* Get ghost nodes */
1416:   if (numNewNodes > 0) {
1417:     PetscMalloc(numNewNodes * sizeof(int), &newNodes);
1418:     for(node = 0; node < numNodes; node++) {
1419:       if (nodeMap[node] >= 0) {
1420:         newNodes[offsets[nodeMap[node]]++] = node;
1421:       }
1422:     }
1423:   }
1424:   for(proc = 1; proc < numProcs-1; proc++) {
1425:     if (offsets[proc] - offsets[proc-1]  != newProcNodes[proc]) {
1426:       SETERRQ3(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc %d should be %d",
1427:                offsets[proc] - offsets[proc-1], proc, newProcNodes[proc]);
1428:     }
1429:   }
1430:   if (offsets[0] != newProcNodes[0]) {
1431:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc 0 should be %d", offsets[0], newProcNodes[0]);
1432:   }

1434:   /* Add new ghosts */
1435:   PartitionGetNewGhostNodes_Serial(p, newProcNodes, newNodes);

1437:   /* Cleanup */
1438:   PetscFree(nodeMap);
1439:   PetscFree(newProcNodes);
1440:   PetscFree(offsets);
1441:   if (numNewNodes) {
1442:     PetscFree(newNodes);
1443:   }
1444:   return(0);
1445: }

1447: int PartitionGetNewGhostNodes_Edge(Partition p)
1448: {
1449:   Partition_Triangular_2D *q         = (Partition_Triangular_2D *) p->data;
1450:   Mesh_Triangular         *tri       = (Mesh_Triangular *) p->mesh->data;
1451:   int                     *edges     = tri->edges;
1452:   int                     *firstEdge = q->firstEdge;
1453:   int                      numNodes  = q->numNodes;
1454:   int                     *firstNode = q->firstNode;
1455:   int                      numProcs  = p->numProcs;
1456:   int                      rank      = p->rank;
1457:   int                     *newProcNodes; /* Number of new ghost nodes needed from a given processor */
1458:   int                      numNewNodes;  /* Total number of new ghost nodes to receive */
1459:   int                     *newNodes;     /* New ghost nodes for this domain */
1460:   int                     *offsets;      /* The offsets into newNodes[] */
1461:   int                     *nodeMap;      /* The map of nodes to processors */
1462:   int                      proc, edge, node, startNode, endNode, ghostNode, gNode;
1463:   int                      ierr;

1466:   /* Initialize communication */
1467:   PetscMalloc(numProcs     * sizeof(int), &newProcNodes);
1468:   PetscMalloc((numProcs+1) * sizeof(int), &offsets);
1469:   PetscMalloc(numNodes     * sizeof(int), &nodeMap);
1470:   PetscMemzero(newProcNodes, numProcs    * sizeof(int));
1471:   PetscMemzero(offsets,     (numProcs+1) * sizeof(int));
1472:   for(node = 0; node < numNodes; node++) {
1473:     nodeMap[node] = -1;
1474:   }

1476:   /* Check for new ghost nodes */
1477:   for(edge = firstEdge[rank]; edge < firstEdge[rank+1]; edge++) {
1478:     /* Check for new ghost node */
1479:     startNode = edges[edge*2];
1480:     endNode   = edges[edge*2+1];
1481:     ghostNode = -1;
1482:     if ((startNode < firstNode[rank]) || (startNode >= firstNode[rank+1])) {
1483:       ghostNode = startNode;
1484:     } else if ((endNode < firstNode[rank]) || (endNode >= firstNode[rank+1])) {
1485:       ghostNode = endNode;
1486:     }
1487:     if (ghostNode >= 0) {
1488:       /* Get the domain of the node */
1489:       for(proc = 0; proc < numProcs; proc++) {
1490:         if ((ghostNode >= firstNode[proc]) && (ghostNode < firstNode[proc+1])) break;
1491:       }
1492:       /* Check for the presence of this ghost node */
1493:       if (PartitionGhostNodeIndex_Private(p, ghostNode, &gNode) && ((nodeMap[ghostNode] < 0) || (proc < nodeMap[ghostNode]))) {
1494:         /* We must add this node as a ghost node */
1495:         nodeMap[ghostNode] = proc;
1496:       }
1497:     }
1498:   }

1500:   /* Compute new ghost node offsets */
1501:   for(node = 0; node < numNodes; node++) {
1502:     if (nodeMap[node] >= 0) {
1503:       newProcNodes[nodeMap[node]]++;
1504:     }
1505:   }
1506:   for(proc = 0, numNewNodes = 0; proc < numProcs; proc++) {
1507:     numNewNodes   += newProcNodes[proc];
1508:     offsets[proc+1] = offsets[proc] + newProcNodes[proc];
1509:   }

1511:   /* Get ghost nodes */
1512:   if (numNewNodes > 0) {
1513:     PetscMalloc(numNewNodes * sizeof(int), &newNodes);
1514:     for(node = 0; node < numNodes; node++) {
1515:       if (nodeMap[node] >= 0) {
1516:         newNodes[offsets[nodeMap[node]]++] = node;
1517:       }
1518:     }
1519:   }
1520:   for(proc = 1; proc < numProcs-1; proc++) {
1521:     if (offsets[proc] - offsets[proc-1]  != newProcNodes[proc]) {
1522:       SETERRQ3(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc %d should be %d",
1523:                offsets[proc] - offsets[proc-1], proc, newProcNodes[proc]);
1524:     }
1525:   }
1526:   if (offsets[0] != newProcNodes[0]) {
1527:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc 0 should be %d", offsets[0], newProcNodes[0]);
1528:   }

1530:   /* Add new ghosts */
1531:   PartitionGetNewGhostNodes_Serial(p, newProcNodes, newNodes);

1533:   /* Cleanup */
1534:   PetscFree(nodeMap);
1535:   PetscFree(newProcNodes);
1536:   PetscFree(offsets);
1537:   if (numNewNodes) {
1538:     PetscFree(newNodes);
1539:   }
1540:   return(0);
1541: }

1543: int PartitionElements_Private(Partition p)
1544: {
1545:   int (*f)(Partition, int, int **);
1546:   int  *elementMap; /* The map from elements to domains */
1547:   int   ierr;

1550:   /* Create a new map of elements to domains */
1551:   PetscObjectQueryFunction((PetscObject) p, "PartitionTriangular2D_CreateElementMap", (void (**)(void)) &f);
1552:   (*f)(p, p->numLocElements, &elementMap);

1554: #if 0
1555:   /* Communicate interior elements */
1556:   GridInteriorExchange(numLocElements, elementMap, p->firstElement);
1557: #endif
1558:   /* Create the element partition */
1559:   PartitionCreateElementPartition_Private(p, elementMap, &p->ordering);
1560:   PetscFree(elementMap);

1562:   /* Permute arrays implicitly indexed by element number */
1563:   PartitionPermuteElements_Private(p);

1565:   /* Globally renumber the elements to make canonical numbers sequential in each domain */
1566:   PartitionRenumberElements_Private(p);

1568:   /* Calculate ghosts */
1569:   PartitionCalcGhostElements_Private(p);
1570: 
1571:   /* Check for new ghost nodes created by the element partition */
1572:   PartitionGetNewGhostNodes_Element(p);

1574:   p->isElementPartitioned = PETSC_TRUE;
1575:   return(0);
1576: }

1578: /*
1579:   PartitionCreateNodeMap_Simple_Seq - This function creates a map from nodes to domains,
1580:   using a reordering of the serial mesh to reduce the bandwidth.

1582:   Input Parameters:
1583: + p        - The Partition
1584: - numNodes - The global number of nodes

1586:   Output Parameter:
1587: . nodeMap  - The map from nodes to domains

1589: .seealso: PartitionNodes_Private()
1590: */
1591: int PartitionCreateNodeMap_Simple_Seq(Partition p, int numNodes, int **nodeMap)
1592: {
1593:   Partition_Triangular_2D *q         = (Partition_Triangular_2D *) p->data;
1594:   int                     *firstNode = q->firstNode;
1595:   int                      rank      = p->rank;
1596:   int                     *nodeProcs; /* The processor which each node will lie on */
1597:   int                      node;
1598:   int                      ierr;

1601:   /* Use the existing interior nodes */
1602:   PetscMalloc(numNodes * sizeof(int), &nodeProcs);
1603:   for(node = 0; node < numNodes; node++) {
1604:     nodeProcs[node] = -1;
1605:   }
1606:   for(node = firstNode[rank]; node < firstNode[rank+1]; node++) {
1607:     nodeProcs[node] = rank;
1608:   }

1610:   *nodeMap = nodeProcs;
1611:   return(0);
1612: }

1614: /*
1615:   PartitionCreateNodeMap_ElementBased - This function creates a map from nodes to domains,
1616:   based upon a prior partition of the elements.

1618:   Input Parameters:
1619: + p        - The Partition
1620: - numNodes - The global number of nodes

1622:   Output Parameter:
1623: . nodeMap  - The map from nodes to domains

1625: .seealso: PartitionNodes_Private()
1626: */
1627: int PartitionCreateNodeMap_ElementBased(Partition p, int numNodes, int **nodeMap)
1628: {
1629:   Mesh             mesh               = p->mesh;
1630:   Mesh_Triangular *tri                = (Mesh_Triangular *) mesh->data;
1631:   int              numLocElements     = p->numLocElements;
1632:   int              numGhostElements   = p->numOverlapElements - p->numLocElements;
1633:   int              numCorners         = mesh->numCorners;
1634:   int             *elements           = tri->faces;
1635:   int             *firstElement       = p->firstElement;
1636:   int             *ghostElements      = p->ghostElements;
1637:   int             *ghostElementProcs  = p->ghostElementProcs;
1638:   int              rank               = p->rank;
1639:   int             *nodeProcs;     /* The processor which each node will lie on */
1640:   int             *support;
1641:   int              nProc, elem, sElem, nElem, nLocElem, gElem, corner, nCorner, node, degree;
1642:   int              ierr;

1645:   /* Count nodes on this partition -- keep node if you are the lower numbered domain */
1646:   PetscMalloc(numNodes * sizeof(int), &nodeProcs);
1647:   for(node = 0; node < numNodes; node++) {
1648:     nodeProcs[node] = -1;
1649:   }

1651:   for(elem = firstElement[rank]; elem < firstElement[rank+1]; elem++) {
1652:     for(corner = 0; corner < numCorners; corner++) {
1653:       node = elements[elem*numCorners+corner];

1655:       /* Check the support of the node */
1656:       MeshGetNodeSupport(mesh, node, elem, &degree, &support);
1657:       for(sElem = 0; sElem < degree; sElem++) {
1658:         nElem = support[sElem];
1659:         /* See if neighbor is in another domain */
1660:         if ((nElem < firstElement[rank]) || (nElem >= firstElement[rank+1])) {
1661:           /* Check to see if node is contained in the neighboring element */
1662:           for(nCorner = 0; nCorner < numCorners; nCorner++) {
1663:             if (elements[nElem*numCorners+nCorner] == node) {
1664:               ierr  = PartitionGlobalToLocalElementIndex(p, nElem, &nLocElem);
1665:               nProc = ghostElementProcs[nLocElem-numLocElements];
1666:               /* Give the node to the lowest numbered domain */
1667:               if ((nProc < rank) && ((nodeProcs[node] < 0) || (nProc < nodeProcs[node]))) {
1668:                 nodeProcs[node] = nProc;
1669:               }
1670:               break;
1671:             }
1672:           }
1673:         }
1674:       }
1675:       MeshRestoreNodeSupport(mesh, node, elem, &degree, &support);

1677:       /* If no one else claims it, take the node */
1678:       if (nodeProcs[node] < 0) {
1679:         nodeProcs[node] = rank;
1680:       }
1681:     }
1682:   }

1684:   /* Now assign the ghost nodes from ghost elements (which we can never own) */
1685:   for(gElem = 0; gElem < numGhostElements; gElem++) {
1686:     for(corner = 0; corner < numCorners; corner++) {
1687:       node = elements[ghostElements[gElem]*numCorners+corner];
1688:       if (nodeProcs[node] < 0)
1689:         nodeProcs[node] = ghostElementProcs[gElem];
1690:     }
1691:   }

1693:   *nodeMap = nodeProcs;
1694:   return(0);
1695: }

1697: /*
1698:   PartitionCreateNodePartition_Private - This function uses the node map to create
1699:   partition structures for node-based data.

1701:   Input Parameters:
1702: + p               - The Partition
1703: - nodeMap         - The map from nodes to domains

1705:   Output Parameter:
1706: . ordering        - The new node ordering

1708:   Output Parameters in Partition_Triangular_2D:
1709: + numLocNodes     - The number of local nodes
1710: . numOverlapNodes - The number of local + ghost nodes
1711: . firstNode       - The first node in each domain
1712: - ghostNodes      - The global number for each ghost node

1714: .seealso: PartitionNodes_Private()
1715: */
1716: int PartitionCreateNodePartition_Private(Partition p, int *nodeMap, AO *ordering)
1717: {
1718:   Partition_Triangular_2D *q        = (Partition_Triangular_2D *) p->data;
1719:   int                      numNodes = q->numNodes;
1720:   int                      numProcs = p->numProcs;
1721:   int                      rank     = p->rank;
1722:   int                      numGhostNodes; /* Number of ghost nodes for this domain */
1723:   int                     *AppOrdering, *PetscOrdering;
1724:   int                      proc, node, index, index2;
1725:   int                      ierr;

1728:   /* Determine local and ghost sizes */
1729:   for(node = 0, q->numLocNodes = 0, numGhostNodes = 0; node < numNodes; node++) {
1730:     if (nodeMap[node] == rank) {
1731:       q->numLocNodes++;
1732:     } else if (nodeMap[node] >= 0) {
1733:       numGhostNodes++;
1734:     }
1735:   }

1737:   /* Recompute size and offset of each domain */
1738:   MPI_Allgather(&q->numLocNodes, 1, MPI_INT, &q->firstNode[1], 1, MPI_INT, p->comm);
1739:   for(proc = 1, q->firstNode[0] = 0; proc <= numProcs; proc++) {
1740:     q->firstNode[proc] = q->firstNode[proc] + q->firstNode[proc-1];
1741:   }
1742:   if (q->firstNode[numProcs] != numNodes) {
1743:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of nodes %d should be %d", q->firstNode[numProcs], numNodes);
1744:   }

1746:   /* Setup ghost node structures */
1747:   q->numOverlapNodes = q->numLocNodes + numGhostNodes;
1748:   if (numGhostNodes > 0) {
1749:     PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodes);
1750:   }

1752:   /* Get indices for reordering */
1753:   PetscMalloc(q->numLocNodes * sizeof(int), &AppOrdering);
1754:   PetscMalloc(q->numLocNodes * sizeof(int), &PetscOrdering);
1755:   for(node = 0; node < q->numLocNodes; node++) {
1756:     /* This would be the time to do RCM on the local graph by reordering PetscOrdering[] */
1757:     PetscOrdering[node] = q->firstNode[rank] + node;
1758:   }
1759:   for(node = 0, index = 0, index2 = 0; node < numNodes; node++) {
1760:     if (nodeMap[node] == rank) {
1761:       AppOrdering[index++]    = node;
1762:     } else if (nodeMap[node] >= 0) {
1763:       q->ghostNodes[index2++] = node;
1764:     }
1765:   }
1766:   if (index  != q->numLocNodes) SETERRQ(PETSC_ERR_PLIB, "Invalid node renumbering");
1767:   if (index2 != numGhostNodes)  SETERRQ(PETSC_ERR_PLIB, "Invalid ghost node renumbering");

1769:   /* Create the global node reordering */
1770:   AOCreateBasic(p->comm, q->numLocNodes, AppOrdering, PetscOrdering, ordering);
1771:   if (ierr) {
1772:     PartitionDebugAO_Private(p, nodeMap);
1773:   }

1775:   PetscFree(AppOrdering);
1776:   PetscFree(PetscOrdering);
1777:   return(0);
1778: }

1780: /*
1781:   PartitionPermuteNodes_Private - This function permutes the data which is implicitly
1782:   indexed by node number

1784:   Input Parameter:
1785: . p       - The Partition

1787:   Output Parameter in Mesh_Triangular:
1788: + nodes   - The coordinates on each node
1789: . markers - The node markers
1790: - degrees - The degree of each node

1792: .seealso: PartitionNodes_Private()
1793: */
1794: int PartitionPermuteNodes_Private(Partition p)
1795: {
1796:   Partition_Triangular_2D *q    = (Partition_Triangular_2D *) p->data;
1797:   Mesh                     mesh = p->mesh;
1798:   Mesh_Triangular         *tri  = (Mesh_Triangular *) mesh->data;
1799:   int                      ierr;

1802:   AOApplicationToPetscPermuteReal(q->nodeOrdering, mesh->dim, tri->nodes);
1803:   AOApplicationToPetscPermuteInt(q->nodeOrdering,    1,         tri->markers);
1804:   AOApplicationToPetscPermuteInt(q->nodeOrdering,    1,         tri->degrees);
1805:   return(0);
1806: }

1808: /*
1809:   PartitionRenumberNodes_Private - This function renumbers the node-based data globally in
1810:   order to make the canonical numbers sequential in each domain.

1812:   Input Parameter:
1813: . p          - The Partition

1815:   Output Parameters in Mesh_Triangular:
1816: + faces      - The nodes in each element
1817: . edges      - The nodes on each edge
1818: - bdNodes    - The nodes on each boundary

1820:   Output Parameter in Partition_Triangular_2D:
1821: . ghostNodes - The global number of each ghost node

1823: .seealso: PartitionNodes_Private()
1824: */
1825: int PartitionRenumberNodes_Private(Partition p)
1826: {
1827:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
1828:   Mesh                     mesh          = p->mesh;
1829:   Mesh_Triangular         *tri           = (Mesh_Triangular *) mesh->data;
1830:   int                      numElements   = p->numElements;
1831:   int                      numCorners    = mesh->numCorners;
1832:   int                     *faces         = tri->faces;
1833:   int                      numEdges      = q->numEdges;
1834:   int                     *edges         = tri->edges;
1835:   int                      numBdNodes    = q->numBdNodes;
1836:   int                     *bdNodes       = tri->bdNodes;
1837:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;
1838:   int                     *ghostNodes    = q->ghostNodes;
1839:   int                      ierr;

1842:   AOApplicationToPetsc(q->nodeOrdering, numEdges*2,             edges);
1843:   AOApplicationToPetsc(q->nodeOrdering, numElements*numCorners, faces);
1844:   AOApplicationToPetsc(q->nodeOrdering, numBdNodes,             bdNodes);
1845:   AOApplicationToPetsc(q->nodeOrdering, numGhostNodes,          ghostNodes);
1846:   return(0);
1847: }

1849: /*
1850:   PartitionDistributeNodes_Private - This function distributes the node-based data, and
1851:   permutes arrays which are implicitly indexed by node number.

1853:   Input Parameter:
1854: . p       - The Partition

1856:   Output Parameters in Mesh_Triangular:
1857: + nodes   - The node coordinates
1858: . markers - The node markers
1859: - degrees - The node degrees

1861: .seealso: PartitionNodes_Private()
1862: */
1863: int PartitionDistributeNodes_Private(Partition p)
1864: {
1865:   Partition_Triangular_2D *q               = (Partition_Triangular_2D *) p->data;
1866:   Mesh                     mesh            = p->mesh;
1867:   Mesh_Triangular         *tri             = (Mesh_Triangular *) mesh->data;
1868:   int                      dim             = mesh->dim;
1869:   int                      numLocNodes     = q->numLocNodes;
1870:   int                      numOverlapNodes = q->numOverlapNodes;
1871:   int                      numGhostNodes   = numOverlapNodes - numLocNodes;
1872:   int                     *firstNode       = q->firstNode;
1873:   int                     *ghostNodes      = q->ghostNodes;
1874:   int                      rank            = p->rank;
1875:   int                     *temp;
1876:   double                  *temp2;
1877:   int                      node, c;
1878:   int                      ierr;

1881:   mesh->numNodes = numLocNodes;
1882:   PetscMalloc(numOverlapNodes*dim * sizeof(double), &temp2);
1883:   /* Interior nodes */
1884:   PetscMemcpy(temp2, &tri->nodes[firstNode[rank]*dim], numLocNodes*dim * sizeof(double));
1885:   /* Ghost nodes */
1886:   for(node = 0; node < numGhostNodes; node++) {
1887:     for(c = 0; c < dim; c++) {
1888:       temp2[(numLocNodes+node)*dim+c] = tri->nodes[ghostNodes[node]*dim+c];
1889:     }
1890:   }
1891:   PetscFree(tri->nodes);
1892:   tri->nodes = temp2;
1893:   PetscLogObjectMemory(p, numGhostNodes*dim * sizeof(double));

1895:   PetscMalloc(numOverlapNodes * sizeof(int), &temp);
1896:   /* Interior markers */
1897:   PetscMemcpy(temp, &tri->markers[firstNode[rank]], numLocNodes * sizeof(int));
1898:   /* Ghost markers */
1899:   for(node = 0; node < numGhostNodes; node++) {
1900:     temp[numLocNodes+node] = tri->markers[ghostNodes[node]];
1901:   }
1902:   PetscFree(tri->markers);
1903:   tri->markers = temp;
1904:   PetscLogObjectMemory(p, numGhostNodes * sizeof(int));

1906:   PetscMalloc(numOverlapNodes * sizeof(int), &temp);
1907:   /* Interior degrees */
1908:   PetscMemcpy(temp, &tri->degrees[firstNode[rank]], numLocNodes * sizeof(int));
1909:   /* Ghost degrees */
1910:   for(node = 0; node < numGhostNodes; node++) {
1911:     temp[numLocNodes+node] = tri->degrees[ghostNodes[node]];
1912:   }
1913:   PetscFree(tri->degrees);
1914:   tri->degrees = temp;
1915:   PetscLogObjectMemory(p, numGhostNodes * sizeof(int));

1917:   return(0);
1918: }

1920: /*
1921:   PartitionNodeCalcGhostProcs_Private - This function determines the processor from
1922:   which each ghost node comes.

1924:   Input Parameter:
1925: . p              - The Partition

1927:   Output Parameter in Partition_Triangular_2D:
1928: . ghostNodeProcs - The domain of each ghost node

1930: .seealso: PartitionNodes_Private()
1931: */
1932: int PartitionNodeCalcGhostProcs_Private(Partition p)
1933: {
1934:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
1935:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;
1936:   int                     *ghostNodes    = q->ghostNodes;
1937:   int                     *firstNode     = q->firstNode;
1938:   int                      numProcs      = p->numProcs;
1939:   int                     *nodePerm;
1940:   int                      proc, node;
1941:   int                      ierr;

1944:   if (numGhostNodes == 0)
1945:     return(0);

1947:   /* Resort ghost nodes after renumbering */
1948:   PartitionSortGhosts_Private(p, &numGhostNodes, ghostNodes, &nodePerm);
1949:   PetscFree(nodePerm);
1950:   q->numOverlapNodes = q->numLocNodes + numGhostNodes;

1952:   /* calculate ghost node domains */
1953:   PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodeProcs);
1954:   for(node = 0; node < numGhostNodes; node++) {
1955:     for(proc = 0; proc < numProcs; proc++) {
1956:       if ((ghostNodes[node] >= firstNode[proc]) && (ghostNodes[node] <  firstNode[proc+1])) {
1957:         q->ghostNodeProcs[node] = proc;
1958:         break;
1959:       }
1960:     }
1961:     if (proc == numProcs) SETERRQ2(PETSC_ERR_PLIB, "Invalid ghost node %d, global number %d", node, ghostNodes[node]);
1962:   }
1963: #ifdef PETSC_USE_BOPT_g
1964:   for(node = 0; node < numGhostNodes; node++) {
1965:     if ((ghostNodes[node] < firstNode[q->ghostNodeProcs[node]]) || (ghostNodes[node] >= firstNode[q->ghostNodeProcs[node]+1]))
1966:       SETERRQ2(PETSC_ERR_LIB, "Invalid source processor %d on ghost node %d", q->ghostNodeProcs[node], node);
1967:   }
1968: #endif
1969:   return(0);
1970: }

1972: int PartitionNodes_Private(Partition p)
1973: {
1974:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
1975:   int                    (*f)(Partition, int, int **);
1976:   int                     *nodeMap; /* The map from nodes to domains */
1977:   int                      ierr;

1980:   /* Create a new map of nodes to domains */
1981:   PetscObjectQueryFunction((PetscObject) p, "PartitionTriangular2D_CreateNodeMap", (void (**)(void)) &f);
1982:   (*f)(p, q->numNodes, &nodeMap);

1984:   /* Create the node partition */
1985:   PartitionCreateNodePartition_Private(p, nodeMap, &q->nodeOrdering);
1986:   PetscFree(nodeMap);

1988:   /* Permute arrays implicitly indexed by node number */
1989:   PartitionPermuteNodes_Private(p);

1991:   /* Globally renumber the nodes to make canonical numbers sequential in each domain */
1992:   /* WARNING: We must resort ghost nodes after renumbering, but this is done anyway in edge partitioning */
1993:   PartitionRenumberNodes_Private(p);

1995:   /* Assign ghost node source processors */
1996:   PartitionNodeCalcGhostProcs_Private(p);

1998:   q->isNodePartitioned = PETSC_TRUE;
1999:   return(0);
2000: }

2002: /*
2003:   PartitionCreateEdgeMap_NodeBased - This function creates a map from edges to domains,
2004:   using a previous partition of the nodes.

2006:   Input Parameters:
2007: + p        - The Partition
2008: - numEdges - The global number of edges

2010:   Output Parameter:
2011: . edgeMap  - The map from edges to domains

2013: .seealso: PartitionEdges_Private()
2014: */
2015: int PartitionCreateEdgeMap_NodeBased(Partition p, int numEdges, int **edgeMap)
2016: {
2017:   Partition_Triangular_2D *q         = (Partition_Triangular_2D *) p->data;
2018:   Mesh                     mesh      = p->mesh;
2019:   Mesh_Triangular         *tri       = (Mesh_Triangular *) mesh->data;
2020:   int                     *edges     = tri->edges;
2021:   int                     *firstNode = q->firstNode;
2022:   int                      numProcs  = p->numProcs;
2023:   int                      rank      = p->rank;
2024:   int                      startProc = -1;
2025:   int                      endProc   = -1;
2026:   int                     *edgeProcs;     /* The processor assigned to each edge */
2027:   int                      proc, edge, startNode, endNode;
2028:   int                      ierr;

2031:   PetscMalloc(numEdges * sizeof(int), &edgeProcs);
2032:   for(edge = 0; edge < numEdges; edge++) {
2033:     edgeProcs[edge] = -1;
2034:   }

2036:   /* Count edges on this partition -- keep edge if you are the lower numbered domain */
2037:   for(edge = 0; edge < numEdges; edge++) {
2038:     startNode = edges[edge*2];
2039:     endNode   = edges[edge*2+1];

2041:     if ((startNode >= firstNode[rank]) && (startNode < firstNode[rank+1])) {
2042:       /* startNode is local */
2043:       if ((endNode >= firstNode[rank]) && (endNode < firstNode[rank+1])) {
2044:         /* endNode is local */
2045:         edgeProcs[edge] = rank;
2046:       } else {
2047:         /* endNode is not local */
2048:         for(proc = 0; proc < numProcs; proc++) {
2049:           if ((endNode >= firstNode[proc]) && (endNode < firstNode[proc+1])) {
2050:             endProc = proc;
2051:             break;
2052:           }
2053:         }
2054:         if (rank < endProc) {
2055:           edgeProcs[edge] = rank;
2056:         }
2057:       }
2058:     } else {
2059:       /* startNode is not local */
2060:       if ((endNode >= firstNode[rank]) && (endNode < firstNode[rank+1])) {
2061:         /* endNode is local */
2062:         for(proc = 0; proc < numProcs; proc++) {
2063:           if ((startNode >= firstNode[proc]) && (startNode < firstNode[proc+1])) {
2064:             startProc = proc;
2065:             break;
2066:           }
2067:         }
2068:         if (rank < startProc) {
2069:           edgeProcs[edge] = rank;
2070:         }
2071:       }
2072:     }
2073:   }

2075:   *edgeMap = edgeProcs;
2076:   return(0);
2077: }

2079: int PartitionCreateEdgePartition_Private(Partition p, int *edgeMap, AO *ordering)
2080: {
2081:   Mesh                     mesh          = p->mesh;
2082:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2083:   int                      numEdges      = mesh->numEdges;
2084:   int                      numProcs      = p->numProcs;
2085:   int                      rank          = p->rank;
2086:   int                     *AppOrdering   = PETSC_NULL;
2087:   int                     *PetscOrdering = PETSC_NULL;
2088:   int                      proc, edge, index;
2089:   int                      ierr;

2092:   /* Determine local edges and new ghost nodes */
2093:   for(edge = 0, q->numLocEdges = 0; edge < numEdges; edge++) {
2094:     if (edgeMap[edge] == rank) {
2095:       q->numLocEdges++;
2096:     }
2097:   }

2099:   /* Recompute size and offset of each domain */
2100:   MPI_Allgather(&q->numLocEdges, 1, MPI_INT, &q->firstEdge[1], 1, MPI_INT, p->comm);
2101:   for(proc = 1, q->firstEdge[0] = 0; proc <= numProcs; proc++)
2102:     q->firstEdge[proc] = q->firstEdge[proc] + q->firstEdge[proc-1];
2103:   if (q->firstEdge[numProcs] != q->numEdges) {
2104:     SETERRQ2(PETSC_ERR_PLIB, "Invalid global number of edges %d should be %d", q->firstEdge[numProcs], q->numEdges);
2105:   }

2107:   /* Get indices for reordering */
2108:   if (q->numLocEdges > 0) {
2109:     PetscMalloc(q->numLocEdges * sizeof(int), &AppOrdering);
2110:     PetscMalloc(q->numLocEdges * sizeof(int), &PetscOrdering);
2111:   }
2112:   for(edge = 0; edge < q->numLocEdges; edge++) {
2113:     PetscOrdering[edge] = q->firstEdge[rank] + edge;
2114:   }
2115:   for(edge = 0, index = 0; edge < numEdges; edge++) {
2116:     if (edgeMap[edge] == rank) {
2117:       AppOrdering[index++] = edge;
2118:     }
2119:   }
2120:   if (index != q->numLocEdges) {
2121:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of local edges %d should be %d", index, q->numLocEdges);
2122:   }

2124:   /* Create the global edge reordering */
2125:   AOCreateBasic(p->comm, q->numLocEdges, AppOrdering, PetscOrdering, ordering);

2127:   if (q->numLocEdges > 0) {
2128:     PetscFree(AppOrdering);
2129:     PetscFree(PetscOrdering);
2130:   }
2131:   return(0);
2132: }

2134: /*
2135:   PartitionDistributeEdges_Private - This function distributes the edge-based data, and
2136:   permutes arrays which are implicitly indexed by edge number.

2138:   Input Parameter:
2139: . p           - The Partition

2141:   Output Parameters in Mesh_Triangular:
2142: + edges       - The nodes on each edge
2143: - edgemarkers - The edge markers

2145: .seealso: PartitionEdges_Private()
2146: */
2147: int PartitionDistributeEdges_Private(Partition p) {
2148:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
2149:   Mesh                     mesh        = p->mesh;
2150:   Mesh_Triangular         *tri         = (Mesh_Triangular *) mesh->data;
2151:   int                      numLocEdges = q->numLocEdges;
2152:   int                     *firstEdge   = q->firstEdge;
2153:   int                      rank        = p->rank;
2154:   int                     *temp        = PETSC_NULL;
2155:   int                      ierr;

2158:   mesh->numEdges = numLocEdges;
2159:   if (numLocEdges > 0) {
2160:     PetscMalloc(numLocEdges*2 * sizeof(int), &temp);
2161:     PetscMemcpy(temp, &tri->edges[firstEdge[rank]*2], numLocEdges*2 * sizeof(int));
2162:     PetscFree(tri->edges);
2163:   }
2164:   tri->edges = temp;

2166:   if (numLocEdges > 0) {
2167:     PetscMalloc(numLocEdges * sizeof(int), &temp);
2168:     PetscMemcpy(temp, &tri->edgemarkers[firstEdge[rank]], numLocEdges * sizeof(int));
2169:     PetscFree(tri->edgemarkers);
2170:   }
2171:   tri->edgemarkers = temp;

2173:   return(0);
2174: }

2176: /*
2177:   PartitionPermuteEdges_Private - This function permutes the data which is implicitly
2178:   indexed by edge number

2180:   Input Parameter:
2181: . p           - The Partition

2183:   Output Parameter in Mesh_Triangular:
2184: + edges       - The nodes on each edge
2185: - edgemarkers - The edge markers

2187: .seealso: PartitionEdges_Private()
2188: */
2189: int PartitionPermuteEdges_Private(Partition p)
2190: {
2191:   Partition_Triangular_2D *q   = (Partition_Triangular_2D *) p->data;
2192:   Mesh_Triangular         *tri = (Mesh_Triangular *) p->mesh->data;
2193:   int                      ierr;

2196:   AOApplicationToPetscPermuteInt(q->edgeOrdering, 2, tri->edges);
2197:   AOApplicationToPetscPermuteInt(q->edgeOrdering, 1, tri->edgemarkers);
2198:   return(0);
2199: }

2201: /*
2202:   PartitionRenumberEdges_Private - This function renumbers the edge-based data globally in
2203:   order to make the canonical numbers sequential in each domain.

2205:   Input Parameter:
2206: . p       - The Partition

2208:   Output Parameter in Mesh_Triangular:
2209: . bdEdges - The edges on each boundary

2211: .seealso: PartitionEdges_Private()
2212: */
2213: int PartitionRenumberEdges_Private(Partition p)
2214: {
2215:   Partition_Triangular_2D *q          = (Partition_Triangular_2D *) p->data;
2216:   Mesh                     mesh       = p->mesh;
2217:   Mesh_Triangular         *tri        = (Mesh_Triangular *) mesh->data;
2218:   int                      numBdEdges = mesh->numBdEdges;
2219:   int                     *bdEdges    = tri->bdEdges;
2220:   int                      ierr;

2223:   AOApplicationToPetsc(q->edgeOrdering, numBdEdges, bdEdges);
2224:   return(0);
2225: }

2227: int PartitionEdgeGlobalToLocal_Private(Partition p)
2228: {
2229:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
2230:   Mesh_Triangular         *tri         = (Mesh_Triangular *) p->mesh->data;
2231:   int                      numLocEdges = q->numLocEdges;
2232:   int                     *edges       = tri->edges;
2233:   int                      node;
2234:   int                      ierr;

2237:   for(node = 0; node < numLocEdges*2; node++) {
2238:     PartitionGlobalToLocalNodeIndex(p, edges[node], &edges[node]);
2239:   }
2240:   return(0);
2241: }

2243: int PartitionEdges_Private(Partition p)
2244: {
2245:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
2246:   int                    (*f)(Partition, int, int **);
2247:   int                     *edgeMap; /* The map from edges to domains */
2248:   int                      ierr;

2251:   /* Create a new map of nodes to domains */
2252:   PetscObjectQueryFunction((PetscObject) p, "PartitionTriangular2D_CreateEdgeMap", (void (**)(void)) &f);
2253:   (*f)(p, q->numEdges, &edgeMap);

2255:   /* Create the edge partition */
2256:   PartitionCreateEdgePartition_Private(p, edgeMap, &q->edgeOrdering);
2257:   PetscFree(edgeMap);

2259:   /* Permute arrays implicitly indexed by edge number */
2260:   PartitionPermuteEdges_Private(p);

2262:   /* Globally renumber the edges to make canonical numbers sequential in each domain */
2263:   PartitionRenumberEdges_Private(p);

2265:   /* Check for new ghost nodes created by the element partition */
2266:   PartitionGetNewGhostNodes_Edge(p);

2268:   q->isEdgePartitioned = PETSC_TRUE;
2269:   return(0);
2270: }

2272: int PartitionBoundaryNodes_Private(Partition p)
2273: {
2274:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2275:   Mesh                     mesh          = p->mesh;
2276:   Mesh_Triangular         *tri           = (Mesh_Triangular *) mesh->data;
2277:   int                      numBdNodes    = mesh->numBdNodes;
2278:   int                     *bdNodes       = tri->bdNodes;
2279:   int                     *firstNode     = q->firstNode;
2280:   int                      numProcs      = p->numProcs;
2281:   int                      rank          = p->rank;
2282:   int                      proc, node;
2283:   int                      ierr;

2286:   q->numLocBdNodes = 0;
2287:   for(node = 0; node < numBdNodes; node++) {
2288:     if ((bdNodes[node] >= firstNode[rank]) && (bdNodes[node] < firstNode[rank+1]))
2289:       q->numLocBdNodes++;
2290:   }
2291:   MPI_Allgather(&q->numLocBdNodes, 1, MPI_INT, &q->firstBdNode[1], 1, MPI_INT, p->comm);
2292:   q->firstBdNode[0] = 0;
2293:   for(proc = 1; proc <= numProcs; proc++) {
2294:     q->firstBdNode[proc] = q->firstBdNode[proc] + q->firstBdNode[proc-1];
2295:   }
2296:   if (q->firstBdNode[numProcs] != q->numBdNodes) {
2297:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of boundary nodes %d should be %d", q->firstBdNode[numProcs], q->numBdNodes);
2298:   }
2299:   return(0);
2300: }

2302: /*
2303:   PartitionDistributeBdNodes_Private - This function distributes the edge-based data, and
2304:   permutes arrays which are implicitly indexed by edge number.

2306:   Input Parameter:
2307: . p           - The Partition

2309:   Output Parameters in Mesh_Triangular:
2310: + edges       - The nodes on each edge
2311: - edgemarkers - The edge markers

2313: .seealso: PartitionBdNodes_Private()
2314: */
2315: int PartitionDistributeBdNodes_Private(Partition p)
2316: {
2317:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2318:   Mesh_Triangular         *tri           = (Mesh_Triangular *) p->mesh->data;
2319:   int                      numLocNodes   = q->numLocNodes;
2320:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;
2321:   int                     *markers       = tri->markers;
2322:   int                      numLocBdNodes = q->numLocBdNodes;
2323:   int                      node, bdNode;
2324:   int                      ierr;

2327:   /* Process ghost boundary nodes */
2328:   q->numOverlapBdNodes = numLocBdNodes;
2329:   for(node = 0; node < numGhostNodes; node++) {
2330:     if (markers[numLocNodes+node] != 0)
2331:       q->numOverlapBdNodes++;
2332:   }
2333:   if (q->numOverlapBdNodes > numLocBdNodes) {
2334:     PetscMalloc((q->numOverlapBdNodes - numLocBdNodes) * sizeof(int), &q->ghostBdNodes);
2335:     for(node = 0, bdNode = 0; node < numGhostNodes; node++) {
2336:       if (markers[numLocNodes+node] != 0)
2337:         q->ghostBdNodes[bdNode++] = node;
2338:     }
2339:   }
2340:   return(0);
2341: }

2343: int PartitionDistribute_Private(Partition p)
2344: {

2348:   /* Redistribute the elements and arrays implicitly numbered by element numbers */
2349:   PartitionDistributeElements_Private(p);

2351:   /* Redistribute the nodes and permute arrays implicitly numbered by node numbers */
2352:   PartitionDistributeNodes_Private(p);

2354:   /* Redistribute the edges and permute arrays implicitly numbered by edge numbers */
2355:   PartitionDistributeEdges_Private(p);

2357:   /* Store ghost boundary nodes */
2358:   PartitionDistributeBdNodes_Private(p);
2359:   return(0);
2360: }

2362: int PartitionGlobalToLocal_Private(Partition p)
2363: {
2364:   Partition_Triangular_2D *q                  = (Partition_Triangular_2D *) p->data;
2365:   Mesh                     mesh               = p->mesh;
2366:   Mesh_Triangular         *tri                = (Mesh_Triangular *) mesh->data;
2367:   int                      numOverlapElements = p->numOverlapElements;
2368:   int                      numCorners         = mesh->numCorners;
2369:   int                     *faces              = tri->faces;
2370:   int                     *neighbors          = tri->neighbors;
2371:   int                      numLocEdges        = q->numLocEdges;
2372:   int                     *edges              = tri->edges;
2373:   int                      corner, neighbor, node;
2374:   int                      ierr;

2377:   for(corner = 0; corner < numOverlapElements*numCorners; corner++) {
2378:     PartitionGlobalToLocalNodeIndex(p, faces[corner], &faces[corner]);
2379:   }
2380:   /* We indicate neighbors which are not interior or ghost by -2 since boundaries are -1 */
2381:   for(neighbor = 0; neighbor < numOverlapElements*3; neighbor++) {
2382:     PartitionGlobalToLocalElementIndex(p, neighbors[neighbor], &neighbors[neighbor]);
2383:   }
2384:   for(node = 0; node < numLocEdges*2; node++) {
2385:     PartitionGlobalToLocalNodeIndex(p, edges[node], &edges[node]);
2386:   }
2387:   return(0);
2388: }

2390: /*@
2391:   PartitionCreateTriangular2D - Creates a partition of a two dimensional unstructured grid.

2393:   Collective on Mesh

2395:   Input Parameters:
2396: . mesh      - The mesh to be partitioned

2398:   Output Paramter:
2399: . partition - The partition

2401:   Level: beginner

2403: .keywords unstructured mesh, partition
2404: .seealso MeshCreateTriangular2D
2405: @*/
2406: int PartitionCreateTriangular2D(Mesh mesh, Partition *part)
2407: {
2408:   int        numProcs;
2409:   PetscTruth opt;
2410:   int        ierr;

2413:   MPI_Comm_size(mesh->comm, &numProcs);
2414:   PartitionCreate(mesh, part);
2415:   if (numProcs == 1) {
2416:     PetscObjectComposeFunction((PetscObject) *part, "PartitionTriangular2D_Create_C", "PartitionCreate_Uni",
2417:                                       (void (*)(void)) PartitionCreate_Uni);
2418: 
2419:   } else {
2420:     PetscObjectComposeFunction((PetscObject) *part, "PartitionTriangular2D_Create_C", "PartitionCreate_ElementBased",
2421:                                       (void (*)(void)) PartitionCreate_ElementBased);
2422: 
2423:     PetscOptionsHasName(mesh->prefix, "-part_node_based", &opt);
2424:     if (opt == PETSC_TRUE) {
2425:       PetscObjectComposeFunction((PetscObject) *part, "PartitionTriangular2D_Create_C", "PartitionCreate_NodeBased",
2426:                                         (void (*)(void)) PartitionCreate_NodeBased);
2427: 
2428:     }
2429:   }
2430:   PartitionSetType(*part, PARTITION_TRIANGULAR_2D);

2432:   PartitionViewFromOptions_Private(*part, "Partition");
2433:   return(0);
2434: }

2436: EXTERN_C_BEGIN
2437: int PartitionSerialize_Triangular_2D(Mesh mesh, Partition *part, PetscViewer viewer, PetscTruth store)
2438: {
2439:   Partition                p;
2440:   Partition_Triangular_2D *q;
2441:   int                      fd;
2442:   int                      numGhostElements, numGhostNodes, numGhostBdNodes, hasOrdering;
2443:   int                      numProcs, rank;
2444:   int                      one  = 1;
2445:   int                      zero = 0;
2446:   int                      ierr;

2449:   PetscViewerBinaryGetDescriptor(viewer, &fd);
2450:   if (store == PETSC_TRUE) {
2451:     p = *part;
2452:     numProcs = p->numProcs;
2453:     numGhostElements = p->numOverlapElements - p->numLocElements;
2454:     PetscBinaryWrite(fd, &p->numProcs,           1,                PETSC_INT, 0);
2455:     PetscBinaryWrite(fd, &p->rank,               1,                PETSC_INT, 0);
2456:     PetscBinaryWrite(fd, &p->numLocElements,     1,                PETSC_INT, 0);
2457:     PetscBinaryWrite(fd, &p->numElements,        1,                PETSC_INT, 0);
2458:     PetscBinaryWrite(fd, &p->numOverlapElements, 1,                PETSC_INT, 0);
2459:     PetscBinaryWrite(fd,  p->firstElement,       numProcs+1,       PETSC_INT, 0);
2460:     PetscBinaryWrite(fd,  p->ghostElements,      numGhostElements, PETSC_INT, 0);
2461:     PetscBinaryWrite(fd,  p->ghostElementProcs,  numGhostElements, PETSC_INT, 0);
2462:     if (p->ordering != PETSC_NULL) {
2463:       PetscBinaryWrite(fd, &one,                 1,                PETSC_INT, 0);
2464:       AOSerialize(p->comm, &p->ordering, viewer, store);
2465:     } else {
2466:       PetscBinaryWrite(fd, &zero,                1,                PETSC_INT, 0);
2467:     }

2469:     q    = (Partition_Triangular_2D *) (*part)->data;
2470:     numGhostNodes   = q->numOverlapNodes   - q->numLocNodes;
2471:     numGhostBdNodes = q->numOverlapBdNodes - q->numLocBdNodes;
2472:     PetscBinaryWrite(fd, &q->numLocNodes,        1,                PETSC_INT, 0);
2473:     PetscBinaryWrite(fd, &q->numNodes,           1,                PETSC_INT, 0);
2474:     PetscBinaryWrite(fd, &q->numOverlapNodes,    1,                PETSC_INT, 0);
2475:     PetscBinaryWrite(fd,  q->firstNode,          numProcs+1,       PETSC_INT, 0);
2476:     PetscBinaryWrite(fd,  q->ghostNodes,         numGhostNodes,    PETSC_INT, 0);
2477:     PetscBinaryWrite(fd,  q->ghostNodeProcs,     numGhostNodes,    PETSC_INT, 0);
2478:     PetscBinaryWrite(fd, &q->numLocEdges,        1,                PETSC_INT, 0);
2479:     PetscBinaryWrite(fd, &q->numEdges,           1,                PETSC_INT, 0);
2480:     PetscBinaryWrite(fd,  q->firstEdge,          numProcs+1,       PETSC_INT, 0);
2481:     PetscBinaryWrite(fd, &q->numLocBdNodes,      1,                PETSC_INT, 0);
2482:     PetscBinaryWrite(fd, &q->numBdNodes,         1,                PETSC_INT, 0);
2483:     PetscBinaryWrite(fd, &q->numOverlapBdNodes,  1,                PETSC_INT, 0);
2484:     PetscBinaryWrite(fd,  q->firstBdNode,        numProcs+1,       PETSC_INT, 0);
2485:     PetscBinaryWrite(fd,  q->ghostBdNodes,       numGhostBdNodes,  PETSC_INT, 0);
2486:   } else {
2487:     /* Create the partition context */
2488:     PartitionCreate(mesh, &p);
2489:     PetscNew(Partition_Triangular_2D, &q);
2490:     PetscLogObjectMemory(p, sizeof(Partition_Triangular_2D));
2491:     PetscMemcpy(p->ops, &POps, sizeof(struct _PartitionOps));
2492:     PetscStrallocpy(PARTITION_TRIANGULAR_2D, &p->type_name);
2493:     p->data = (void *) q;

2495:     MPI_Comm_size(p->comm, &numProcs);
2496:     MPI_Comm_rank(p->comm, &rank);
2497:     PetscBinaryRead(fd, &p->numProcs,           1,                PETSC_INT);
2498:     PetscBinaryRead(fd, &p->rank,               1,                PETSC_INT);
2499:     if (p->numProcs != numProcs) {
2500:       SETERRQ2(PETSC_ERR_FILE_UNEXPECTED, "Invalid number of processors %d should be %d", numProcs, p->numProcs);
2501:     }
2502:     if (p->rank != rank) {
2503:       SETERRQ2(PETSC_ERR_FILE_UNEXPECTED, "Invalid processor rank %d should be %d", rank, p->rank);
2504:     }
2505:     PetscBinaryRead(fd, &p->numLocElements,     1,                PETSC_INT);
2506:     PetscBinaryRead(fd, &p->numElements,        1,                PETSC_INT);
2507:     PetscBinaryRead(fd, &p->numOverlapElements, 1,                PETSC_INT);
2508:     PetscMalloc((numProcs+1) * sizeof(int), &p->firstElement);
2509:     PetscBinaryRead(fd,  p->firstElement,       numProcs+1,       PETSC_INT);
2510:     numGhostElements = p->numOverlapElements - p->numLocElements;
2511:     if (numGhostElements > 0) {
2512:       PetscMalloc(numGhostElements * sizeof(int), &p->ghostElements);
2513:       PetscMalloc(numGhostElements * sizeof(int), &p->ghostElementProcs);
2514:     }
2515:     PetscBinaryRead(fd,  p->ghostElements,      numGhostElements, PETSC_INT);
2516:     PetscBinaryRead(fd,  p->ghostElementProcs,  numGhostElements, PETSC_INT);
2517:     PetscBinaryRead(fd, &hasOrdering,           1,                PETSC_INT);
2518:     if (hasOrdering) {
2519:       AOSerialize(p->comm, &p->ordering, viewer, store);
2520:     }

2522:     q->ghostNodes        = PETSC_NULL;
2523:     q->ghostNodeProcs    = PETSC_NULL;
2524:     q->ghostBdNodes      = PETSC_NULL;
2525:     q->nodeOrdering      = PETSC_NULL;
2526:     q->edgeOrdering      = PETSC_NULL;

2528:     PetscBinaryRead(fd, &q->numLocNodes,        1,                PETSC_INT);
2529:     PetscBinaryRead(fd, &q->numNodes,           1,                PETSC_INT);
2530:     PetscBinaryRead(fd, &q->numOverlapNodes,    1,                PETSC_INT);
2531:     PetscMalloc((numProcs+1) * sizeof(int), &q->firstNode);
2532:     PetscBinaryRead(fd,  q->firstNode,          numProcs+1,       PETSC_INT);
2533:     numGhostNodes   = q->numOverlapNodes - q->numLocNodes;
2534:     if (numGhostNodes > 0) {
2535:       PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodes);
2536:       PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodeProcs);
2537:     }
2538:     PetscBinaryRead(fd,  q->ghostNodes,         numGhostNodes,    PETSC_INT);
2539:     PetscBinaryRead(fd,  q->ghostNodeProcs,     numGhostNodes,    PETSC_INT);
2540:     PetscBinaryRead(fd, &q->numLocEdges,        1,                PETSC_INT);
2541:     PetscBinaryRead(fd, &q->numEdges,           1,                PETSC_INT);
2542:     PetscMalloc((numProcs+1) * sizeof(int), &q->firstEdge);
2543:     PetscBinaryRead(fd,  q->firstEdge,          numProcs+1,       PETSC_INT);
2544:     PetscBinaryRead(fd, &q->numLocBdNodes,      1,                PETSC_INT);
2545:     PetscBinaryRead(fd, &q->numBdNodes,         1,                PETSC_INT);
2546:     PetscBinaryRead(fd, &q->numOverlapBdNodes,  1,                PETSC_INT);
2547:     PetscMalloc((numProcs+1) * sizeof(int), &q->firstBdNode);
2548:     PetscBinaryRead(fd,  q->firstBdNode,        numProcs+1,       PETSC_INT);
2549:     numGhostBdNodes = q->numOverlapBdNodes - q->numLocBdNodes;
2550:     if (numGhostBdNodes) {
2551:       PetscMalloc(numGhostBdNodes * sizeof(int), &q->ghostBdNodes);
2552:     }
2553:     PetscBinaryRead(fd,  q->ghostBdNodes,       numGhostBdNodes,  PETSC_INT);
2554:     PetscLogObjectMemory(p, ((numProcs+1)*4 + numGhostElements*2 + numGhostNodes*2 + numGhostBdNodes)* sizeof(int));
2555:     *part = p;
2556:   }
2557:   if (p->numProcs > 1) {
2558:     AOSerialize(p->comm, &q->nodeOrdering, viewer, store);
2559:     AOSerialize(p->comm, &q->edgeOrdering, viewer, store);
2560:   }

2562:   return(0);
2563: }
2564: EXTERN_C_END

2566: EXTERN_C_BEGIN
2567: int PartitionCreate_ElementBased(Partition p)
2568: {
2569: #ifdef PETSC_USE_BOPT_g
2570:   int cut; /* The number of edges of the dual crossing the partition */
2571: #endif


2576:   /* Partition elements */
2577:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateElementMap",
2578:                                     "PartitionCreateElementMap_METIS", (void (*)(void)) PartitionCreateElementMap_METIS);
2579: 
2580:   PartitionElements_Private(p);

2582:   /* Partition the nodes */
2583:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateNodeMap",
2584:                                     "PartitionCreateNodeMap_ElementBased", (void (*)(void)) PartitionCreateNodeMap_ElementBased);
2585: 
2586:   PartitionNodes_Private(p);

2588:   /* Partition the edges -- Changes the ghost nodes */
2589:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateEdgeMap",
2590:                                     "PartitionCreateEdgeMap_NodeBased", (void (*)(void)) PartitionCreateEdgeMap_NodeBased);
2591: 
2592:   PartitionEdges_Private(p);

2594:   /* Partition boundary nodes */
2595:   PartitionBoundaryNodes_Private(p);

2597:   /* Redistribute structures and arrays implicitly numbered by canonical numbers */
2598:   PartitionDistribute_Private(p);

2600:   /* Change to local node numbers */
2601:   PartitionGlobalToLocal_Private(p);

2603: #ifdef PETSC_USE_BOPT_g
2604:   /* Compute the size of the cut */
2605:   PartitionCalcCut_Private(p, &cut);
2606:   PetscLogInfo(p, "Size of cut: %dn", cut);
2607: #endif

2609:   return(0);
2610: }
2611: EXTERN_C_END

2613: EXTERN_C_BEGIN
2614: int PartitionCreate_Uni(Partition p)
2615: {
2616:   Partition_Triangular_2D *q    = (Partition_Triangular_2D *) p->data;
2617:   Mesh                     mesh = p->mesh;
2618:   Mesh_Triangular         *tri  = (Mesh_Triangular *) mesh->data;
2619:   PetscTruth               opt;
2620:   int                      ierr;

2623:   PetscOptionsHasName(p->prefix, "-part_mesh_reorder", &opt);
2624:   if (opt == PETSC_TRUE) {
2625:     MeshGetOrdering(mesh, MESH_ORDER_TRIANGULAR_2D_RCM, &q->nodeOrdering);
2626:     PetscLogObjectParent(p, q->nodeOrdering);

2628:     /* Permute arrays implicitly numbered by node numbers */
2629:     AOApplicationToPetscPermuteReal(q->nodeOrdering, 2, tri->nodes);
2630:     AOApplicationToPetscPermuteInt(q->nodeOrdering, 1, tri->markers);
2631:     AOApplicationToPetscPermuteInt(q->nodeOrdering, 1, tri->degrees);
2632:     /* Renumber arrays dependent on the canonical node numbering */
2633:     AOApplicationToPetsc(q->nodeOrdering, mesh->numEdges*2,                       tri->edges);
2634:     AOApplicationToPetsc(q->nodeOrdering, p->numOverlapElements*mesh->numCorners, tri->faces);
2635:     AOApplicationToPetsc(q->nodeOrdering, mesh->numBdNodes,                       tri->bdNodes);
2636:   }
2637:   return(0);
2638: }
2639: EXTERN_C_END

2641: EXTERN_C_BEGIN
2642: int PartitionCreate_NodeBased(Partition p) {
2643: #ifdef PETSC_USE_BOPT_g
2644:   int cut; /* The number of edges of the dual crossing the partition */
2645: #endif

2649:   /* Partition Nodes */
2650:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateNodeMap",
2651:                                     "PartitionCreateNodeMap_Simple_Seq", (void (*)(void)) PartitionCreateNodeMap_Simple_Seq);
2652: 
2653:   PartitionNodes_Private(p);

2655:   /* Partition elements */
2656:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateElementMap",
2657:                                     "PartitionCreateElementMap_NodeBased", (void (*)(void)) PartitionCreateElementMap_NodeBased);
2658: 
2659:   PartitionElements_Private(p);

2661:   /* Partition edges */
2662:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateEdgeMap",
2663:                                     "PartitionCreateEdgeMap_NodeBased", (void (*)(void)) PartitionCreateEdgeMap_NodeBased);
2664: 
2665:   PartitionEdges_Private(p);

2667:   /* Partition boundary nodes */
2668:   PartitionBoundaryNodes_Private(p);

2670:   /* Redistribute structures and arrays implicitly numbered by canonical numbers */
2671:   PartitionDistribute_Private(p);

2673:   /* Change to local node numbers */
2674:   PartitionGlobalToLocal_Private(p);

2676: #ifdef PETSC_USE_BOPT_g
2677:   /* Compute the size of the cut */
2678:   PartitionCalcCut_Private(p, &cut);
2679:   PetscLogInfo(p, "Size of cut: %dn", cut);
2680: #endif

2682:   return(0);
2683: }
2684: EXTERN_C_END


2687: EXTERN_C_BEGIN
2688: int PartitionCreate_Triangular_2D(Partition p) {
2689:   Partition_Triangular_2D *q;
2690:   Mesh                     mesh = p->mesh;
2691:   int                    (*f)(Partition);
2692:   int                      numProcs, rank, rem;
2693:   int                      proc;
2694:   int                      ierr;

2697:   MPI_Comm_size(p->comm, &numProcs);
2698:   MPI_Comm_rank(p->comm, &rank);

2700:   PetscNew(Partition_Triangular_2D, &q);
2701:   PetscLogObjectMemory(p, sizeof(Partition_Triangular_2D));
2702:   PetscMemcpy(p->ops, &POps, sizeof(struct _PartitionOps));
2703:   p->data = (void *) q;
2704:   PetscStrallocpy(PARTITION_SER_TRIANGULAR_2D_BINARY, &p->serialize_name);
2705:   PetscLogObjectParent(mesh, p);

2707:   /* Initialize structure */
2708:   p->numProcs             = numProcs;
2709:   p->rank                 = rank;
2710:   p->isElementPartitioned = PETSC_FALSE;
2711:   p->ordering             = PETSC_NULL;
2712:   p->ghostElements        = PETSC_NULL;
2713:   p->ghostElementProcs    = PETSC_NULL;
2714:   q->isNodePartitioned    = PETSC_FALSE;
2715:   q->isEdgePartitioned    = PETSC_FALSE;
2716:   q->nodeOrdering         = PETSC_NULL;
2717:   q->ghostNodes           = PETSC_NULL;
2718:   q->ghostNodeProcs       = PETSC_NULL;
2719:   q->edgeOrdering         = PETSC_NULL;
2720:   q->ghostBdNodes         = PETSC_NULL;
2721:   PetscMalloc((numProcs+1) * sizeof(int), &p->firstElement);
2722:   PetscMalloc((numProcs+1) * sizeof(int), &q->firstNode);
2723:   PetscMalloc((numProcs+1) * sizeof(int), &q->firstBdNode);
2724:   PetscMalloc((numProcs+1) * sizeof(int), &q->firstEdge);
2725:   PetscLogObjectMemory(p, (numProcs+1)*4 * sizeof(int));
2726:   PetscMemzero(p->firstElement, (numProcs+1) * sizeof(int));
2727:   PetscMemzero(q->firstNode,    (numProcs+1) * sizeof(int));
2728:   PetscMemzero(q->firstBdNode,  (numProcs+1) * sizeof(int));
2729:   PetscMemzero(q->firstEdge,    (numProcs+1) * sizeof(int));

2731:   /* Setup crude preliminary partition */
2732:   for(proc = 0; proc < numProcs; proc++) {
2733:     rem                   = (mesh->numFaces%numProcs);
2734:     p->firstElement[proc] = (mesh->numFaces/numProcs)*proc + PetscMin(rem, proc);
2735:     rem                   = (mesh->numNodes%numProcs);
2736:     q->firstNode[proc]    = (mesh->numNodes/numProcs)*proc + PetscMin(rem, proc);
2737:     rem                   = (mesh->numEdges%numProcs);
2738:     q->firstEdge[proc]    = (mesh->numEdges/numProcs)*proc + PetscMin(rem, proc);
2739:     rem                   = (mesh->numBdNodes%numProcs);
2740:     q->firstBdNode[proc]  = (mesh->numBdNodes/numProcs)*proc + PetscMin(rem, proc);
2741:   }
2742:   p->firstElement[numProcs] = mesh->numFaces;
2743:   q->firstNode[numProcs]    = mesh->numNodes;
2744:   q->firstEdge[numProcs]    = mesh->numEdges;
2745:   q->firstBdNode[numProcs]  = mesh->numBdNodes;

2747:   p->numLocElements            = p->firstElement[rank+1] - p->firstElement[rank];
2748:   p->numElements               = p->firstElement[numProcs];
2749:   p->numOverlapElements        = p->numLocElements;
2750:   q->numLocNodes               = q->firstNode[rank+1] - q->firstNode[rank];
2751:   q->numNodes                  = q->firstNode[numProcs];
2752:   q->numOverlapNodes           = q->numLocNodes;
2753:   q->numLocEdges               = q->firstEdge[rank+1] - q->firstEdge[rank];
2754:   q->numEdges                  = q->firstEdge[numProcs];
2755:   q->numLocBdNodes             = q->firstBdNode[rank+1] - q->firstBdNode[rank];
2756:   q->numBdNodes                = q->firstBdNode[numProcs];
2757:   q->numOverlapBdNodes         = q->numLocBdNodes;

2759:   /* Partition the mesh */
2760:   PetscObjectQueryFunction((PetscObject) p,"PartitionTriangular2D_Create_C",(PetscVoidFunction) &f);
2761:   (*f)(p);

2763:   /* Recalculate derived quantites */
2764:   MeshTriangular2DCalcAreas(mesh, PETSC_FALSE);
2765:   MeshTriangular2DCalcAspectRatios(mesh, PETSC_FALSE);

2767:   return(0);
2768: }
2769: EXTERN_C_END