Actual source code: ex2.c
1: /*$Id: ex2.c,v 1.46 2001/09/11 16:34:32 bsmith Exp $*/
3: static char help[] = "Reads a a simple unstructured grid from a file. Partitions it,n
4: and distributes the grid data accordinglynn";
6: /*T
7: Concepts: Mat^partitioning a matrix;
8: Processors: n
9: T*/
11: /*
12: Updates of this example MAY be found at
13: http://www.mcs.anl.gov/petsc/src/dm/ao/examples/tutorials/ex2.c
15: This is a very basic, even crude, example of managing an unstructured
16: grid in parallel.
18: This particular code is for a Galerkin-style finite element method.
20: After the calls below, each processor will have
21: 1) a list of elements it "owns"; for each "owned" element it will have the global
22: numbering of the three vertices; stored in gdata->ele;
23: 2) a list of vertices it "owns". For each owned it will have the x and y
24: coordinates; stored in gdata->vert
26: It will not have
27: 1) list of ghost elements (since they are not needed for traditional
28: Galerkin style finite element methods). For various finite volume methods
29: you may need the ghost element lists, these may be generated using the
30: element neighbor information given in the file database.
32: To compute the local element stiffness matrix and load vector, each processor
33: will need the vertex coordinates for all of the vertices on the locally
34: "owned" elements. This data could be obtained by doing the appropriate vector
35: scatter on the data stored in gdata->vert; we haven't had time to demonstrate this.
37: Clearly writing a complete parallel unstructured grid code with PETSc is still
38: a good deal of work and requires a lot of application level coding. BUT, at least
39: PETSc can manage all of the nonlinear and linear solvers (including matrix assembly
40: etc.), which allows the programmer to concentrate his or her efforts on managing
41: the unstructured grid. The PETSc team is developing additional library objects
42: to help manage parallel unstructured grid computations. Unfortunately we have
43: not had time to complete these yet, so the application programmer still must
44: manage much of the parallel grid manipulation as indicated below.
46: */
48: /*
49: Include "petscmat.h" so that we can use matrices.
50: automatically includes:
51: petsc.h - base PETSc routines petscvec.h - vectors
52: petscsys.h - system routines petscmat.h - matrices
53: petscis.h - index sets petscviewer.h - viewers
55: Include "petscao.h" allows use of the AO (application ordering) commands,
56: used below for renumbering the vertex numbers after the partitioning.
58: Include "petscbt.h" for managing logical bit arrays that are used to
59: conserve space. Note that the code does use order N bit arrays on each
60: processor so is theoretically not scalable, but even with 64 million
61: vertices it will only need temporarily 8 megabytes of memory for the
62: bit array so one can still do very large problems with this approach,
63: since the bit arrays are freed before the vectors and matrices are
64: created.
65: */
66: #include petscmat.h
67: #include petscao.h
68: #include petscbt.h
70: /*
71: This is the user-defined grid data context
72: */
73: typedef struct {
74: int n_vert,n_ele;
75: int mlocal_vert,mlocal_ele;
76: int *ele;
77: PetscScalar *vert;
78: int *ia,*ja;
79: IS isnewproc;
80: int *localvert,nlocal; /* used to stash temporarily old global vertex number of new vertex */
81: } GridData;
83: /*
84: Variables on all processors:
85: n_vert - total number of vertices
86: mlocal_vert - number of vertices on this processor
87: vert - x,y coordinates of local vertices
89: n_ele - total number of elements
90: mlocal_ele - number of elements on this processor
91: ele - vertices of elements on this processor
93: ia, ja - adjacency graph of elements (for partitioning)
94:
95: Variables on processor 0 during data reading from file:
96: mmlocal_vert[i] - number of vertices on each processor
97: tmpvert - x,y coordinates of vertices on any processor (as read in)
99: mmlocal_ele[i] - number of elements on each processor
101: tmpia, tmpja - adjacency graph of elements for other processors
103: Notes:
104: The code below has a great deal of IO (print statements). This is to allow one to track
105: the renumbering and movement of data among processors. In an actual
106: production run, IO of this type would be deactivated.
108: To use the ParMETIS partitioner run with the option -mat_partitioning_type parmetis
109: otherwise it defaults to the initial element partitioning induced when the data
110: is read in.
112: To understand the parallel performance of this type of code, it is important
113: to profile the time spent in various events in the code; running with the
114: option -log_summary will indicate how much time is spent in the routines
115: below. Of course, for very small problems, such as the sample grid used here,
116: the profiling results are meaningless.
117: */
119: extern int DataRead(GridData *);
120: extern int DataPartitionElements(GridData *);
121: extern int DataMoveElements(GridData *);
122: extern int DataPartitionVertices(GridData *);
123: extern int DataMoveVertices(GridData *);
124: extern int DataDestroy(GridData *);
126: int main(int argc,char **args)
127: {
128: int ierr;
129: int READ_EVENT,PARTITION_ELEMENT_EVENT,MOVE_ELEMENT_EVENT;
130: int PARTITION_VERTEX_EVENT,MOVE_VERTEX_EVENT;
131: GridData gdata;
133: PetscInitialize(&argc,&args,(char *)0,help);
135: PetscLogEventRegister(&READ_EVENT, "Read Data",0);
136: PetscLogEventRegister(&PARTITION_ELEMENT_EVENT,"Partition elemen",0);
137: PetscLogEventRegister(&MOVE_ELEMENT_EVENT, "Move elements",0);
138: PetscLogEventRegister(&PARTITION_VERTEX_EVENT, "Partition vertic",0);
139: PetscLogEventRegister(&MOVE_VERTEX_EVENT, "Move vertices",0);
141: PetscLogEventBegin(READ_EVENT,0,0,0,0);
142: DataRead(&gdata);
143: PetscLogEventEnd(READ_EVENT,0,0,0,0);
144: PetscLogEventBegin(PARTITION_ELEMENT_EVENT,0,0,0,0);
145: DataPartitionElements(&gdata);
146: PetscLogEventEnd(PARTITION_ELEMENT_EVENT,0,0,0,0);
147: PetscLogEventBegin(MOVE_ELEMENT_EVENT,0,0,0,0);
148: DataMoveElements(&gdata);
149: PetscLogEventEnd(MOVE_ELEMENT_EVENT,0,0,0,0);
150: PetscLogEventBegin(PARTITION_VERTEX_EVENT,0,0,0,0);
151: DataPartitionVertices(&gdata);
152: PetscLogEventEnd(PARTITION_VERTEX_EVENT,0,0,0,0);
153: PetscLogEventBegin(MOVE_VERTEX_EVENT,0,0,0,0);
154: DataMoveVertices(&gdata);
155: PetscLogEventEnd(MOVE_VERTEX_EVENT,0,0,0,0);
156: DataDestroy(&gdata);
158: PetscFinalize();
159: return 0;
160: }
163: /*
164: Reads in the grid data from a file; each processor is naively
165: assigned a continuous chunk of vertex and element data. Later the data
166: will be partitioned and moved to the appropriate processor.
167: */
168: int DataRead(GridData *gdata)
169: {
170: int rank,size,n_vert,*mmlocal_vert,mlocal_vert,i,*ia,*ja,cnt,j;
171: int mlocal_ele,*mmlocal_ele,*ele,*tmpele,n_ele,net,a1,a2,a3;
172: int *iatmp,*jatmp,ierr;
173: char msg[128];
174: PetscScalar *vert,*tmpvert;
175: MPI_Status status;
178: /*
179: Processor 0 opens the file, reads in chunks of data and sends a portion off to
180: each other processor.
182: Note: For a truely scalable IO portion of the code, one would store
183: the grid data in a binary file and use MPI-IO commands to have each
184: processor read in the parts that it needs. However in most circumstances
185: involving up to a say a million nodes and 100 processors this approach
186: here is fine.
187: */
188: MPI_Comm_size(PETSC_COMM_WORLD,&size);
189: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
191: if (!rank) {
192: FILE *fd;
193: fd = fopen("usgdata","r"); if (!fd) SETERRQ(1,"Cannot open grid file");
195: /* read in number of vertices */
196: fgets(msg,128,fd);
197: printf("File msg:%s",msg);
198: fscanf(fd,"Number Vertices = %dn",&n_vert);
199: printf("Number of grid vertices %dn",n_vert);
201: /* broadcast number of vertices to all processors */
202: MPI_Bcast(&n_vert,1,MPI_INT,0,PETSC_COMM_WORLD);
203: mlocal_vert = n_vert/size + ((n_vert % size) > 0);
205: /*
206: allocate enough room for the first processor to keep track of how many
207: vertices are assigned to each processor. Splitting vertices equally amoung
208: all processors.
209: */
210: PetscMalloc(size*sizeof(int),&mmlocal_vert);
211: for (i=0; i<size; i++) {
212: mmlocal_vert[i] = n_vert/size + ((n_vert % size) > i);
213: printf("Processor %d assigned %d verticesn",i,mmlocal_vert[i]);
214: }
216: /*
217: Read in vertices assigned to first processor
218: */
219: PetscMalloc(2*mmlocal_vert[0]*sizeof(PetscScalar),&vert);
220: printf("Vertices assigned to processor 0n");
221: for (i=0; i<mlocal_vert; i++) {
222: fscanf(fd,"%d %lf %lfn",&cnt,vert+2*i,vert+2*i+1);
223: printf("%d %g %gn",cnt,PetscRealPart(vert[2*i]),PetscRealPart(vert[2*i+1]));
224: }
226: /*
227: Read in vertices for all the other processors
228: */
229: PetscMalloc(2*mmlocal_vert[0]*sizeof(PetscScalar),&tmpvert);
230: for (j=1; j<size; j++) {
231: printf("Vertices assigned to processor %dn",j);
232: for (i=0; i<mmlocal_vert[j]; i++) {
233: fscanf(fd,"%d %lf %lfn",&cnt,tmpvert+2*i,tmpvert+2*i+1);
234: printf("%d %g %gn",cnt,tmpvert[2*i],tmpvert[2*i+1]);
235: }
236: MPI_Send(tmpvert,2*mmlocal_vert[j],MPIU_SCALAR,j,0,PETSC_COMM_WORLD);
237: }
238: PetscFree(tmpvert);
239: PetscFree(mmlocal_vert);
241: fscanf(fd,"Number Elements = %dn",&n_ele);
242: printf("Number of grid elements %dn",n_ele);
244: /*
245: Broadcast number of elements to all processors
246: */
247: MPI_Bcast(&n_ele,1,MPI_INT,0,PETSC_COMM_WORLD);
248: mlocal_ele = n_ele/size + ((n_ele % size) > 0);
250: /*
251: Allocate enough room for the first processor to keep track of how many
252: elements are assigned to each processor.
253: */
254: PetscMalloc(size*sizeof(int),&mmlocal_ele);
255: for (i=0; i<size; i++) {
256: mmlocal_ele[i] = n_ele/size + ((n_ele % size) > i);
257: printf("Processor %d assigned %d elementsn",i,mmlocal_ele[i]);
258: }
259:
260: /*
261: read in element information for the first processor
262: */
263: PetscMalloc(3*mmlocal_ele[0]*sizeof(int),&ele);
264: printf("Elements assigned to processor 0n");
265: for (i=0; i<mlocal_ele; i++) {
266: fscanf(fd,"%d %d %d %dn",&cnt,ele+3*i,ele+3*i+1,ele+3*i+2);
267: printf("%d %d %d %dn",cnt,ele[3*i],ele[3*i+1],ele[3*i+2]);
268: }
270: /*
271: Read in elements for all the other processors
272: */
273: PetscMalloc(3*mmlocal_ele[0]*sizeof(int),&tmpele);
274: for (j=1; j<size; j++) {
275: printf("Elements assigned to processor %dn",j);
276: for (i=0; i<mmlocal_ele[j]; i++) {
277: fscanf(fd,"%d %d %d %dn",&cnt,tmpele+3*i,tmpele+3*i+1,tmpele+3*i+2);
278: printf("%d %d %d %dn",cnt,tmpele[3*i],tmpele[3*i+1],tmpele[3*i+2]);
279: }
280: MPI_Send(tmpele,3*mmlocal_ele[j],MPI_INT,j,0,PETSC_COMM_WORLD);
281: }
282: PetscFree(tmpele);
284: /*
285: Read in element neighbors for processor 0
286: We don't know how many spaces in ja[] to allocate so we allocate
287: 3*the number of local elements, this is the maximum it could be
288: */
289: PetscMalloc((mlocal_ele+1)*sizeof(int),&ia);
290: PetscMalloc((3*mlocal_ele+1)*sizeof(int),&ja);
291: net = 0;
292: ia[0] = 0;
293: printf("Element neighbors on processor 0n");
294: fgets(msg,128,fd);
295: for (i=0; i<mlocal_ele; i++) {
296: fscanf(fd,"%d %d %d %dn",&cnt,&a1,&a2,&a3);
297: printf("%d %d %d %dn",cnt,a1,a2,a3);
298: if (a1 >= 0) {ja[net++] = a1;}
299: if (a2 >= 0) {ja[net++] = a2;}
300: if (a3 >= 0) {ja[net++] = a3;}
301: ia[i+1] = net;
302: }
304: printf("ia values for processor 0n");
305: for (i=0; i<mlocal_ele+1; i++) {
306: printf("%d ",ia[i]);
307: }
308: printf("n");
309: printf("ja values for processor 0n");
310: for (i=0; i<ia[mlocal_ele]; i++) {
311: printf("%d ",ja[i]);
312: }
313: printf("n");
315: /*
316: Read in element neighbor information for all other processors
317: */
318: PetscMalloc((mlocal_ele+1)*sizeof(int),&iatmp);
319: PetscMalloc((3*mlocal_ele+1)*sizeof(int),&jatmp);
320: for (j=1; j<size; j++) {
321: net = 0;
322: iatmp[0] = 0;
323: printf("Element neighbors on processor %dn",j);
324: for (i=0; i<mmlocal_ele[j]; i++) {
325: fscanf(fd,"%d %d %d %dn",&cnt,&a1,&a2,&a3);
326: printf("%d %d %d %dn",cnt,a1,a2,a3);
327: if (a1 >= 0) {jatmp[net++] = a1;}
328: if (a2 >= 0) {jatmp[net++] = a2;}
329: if (a3 >= 0) {jatmp[net++] = a3;}
330: iatmp[i+1] = net;
331: }
333: printf("ia values for processor %dn",j);
334: for (i=0; i<mmlocal_ele[j]+1; i++) {
335: printf("%d ",iatmp[i]);
336: }
337: printf("n");
338: printf("ja values for processor %dn",j);
339: for (i=0; i<iatmp[mmlocal_ele[j]]; i++) {
340: printf("%d ",jatmp[i]);
341: }
342: printf("n");
344: /* send graph off to appropriate processor */
345: MPI_Send(iatmp,mmlocal_ele[j]+1,MPI_INT,j,0,PETSC_COMM_WORLD);
346: MPI_Send(jatmp,iatmp[mmlocal_ele[j]],MPI_INT,j,0,PETSC_COMM_WORLD);
347: }
348: PetscFree(iatmp);
349: PetscFree(jatmp);
350: PetscFree(mmlocal_ele);
352: fclose(fd);
353: } else {
354: /*
355: We are not the zeroth processor so we do not open the file
356: rather we wait for processor 0 to send us our data.
357: */
359: /* receive total number of vertices */
360: MPI_Bcast(&n_vert,1,MPI_INT,0,PETSC_COMM_WORLD);
361: mlocal_vert = n_vert/size + ((n_vert % size) > rank);
363: /* receive vertices */
364: PetscMalloc(2*(mlocal_vert+1)*sizeof(PetscScalar),&vert);
365: MPI_Recv(vert,2*mlocal_vert,MPIU_SCALAR,0,0,PETSC_COMM_WORLD,&status);
367: /* receive total number of elements */
368: MPI_Bcast(&n_ele,1,MPI_INT,0,PETSC_COMM_WORLD);
369: mlocal_ele = n_ele/size + ((n_ele % size) > rank);
371: /* receive elements */
372: PetscMalloc(3*(mlocal_ele+1)*sizeof(int),&ele);
373: MPI_Recv(ele,3*mlocal_ele,MPI_INT,0,0,PETSC_COMM_WORLD,&status);
375: /* receive element adjacency graph */
376: PetscMalloc((mlocal_ele+1)*sizeof(int),&ia);
377: MPI_Recv(ia,mlocal_ele+1,MPI_INT,0,0,PETSC_COMM_WORLD,&status);
379: PetscMalloc((ia[mlocal_ele]+1)*sizeof(int),&ja);
380: MPI_Recv(ja,ia[mlocal_ele],MPI_INT,0,0,PETSC_COMM_WORLD,&status);
381: }
383: gdata->n_vert = n_vert;
384: gdata->n_ele = n_ele;
385: gdata->mlocal_vert = mlocal_vert;
386: gdata->mlocal_ele = mlocal_ele;
387: gdata->ele = ele;
388: gdata->vert = vert;
390: gdata->ia = ia;
391: gdata->ja = ja;
393: return(0);
394: }
397: /*
398: Given the grid data spread across the processors, determines a
399: new partitioning of the CELLS (elements) to reduce the number of cut edges between
400: cells (elements).
401: */
402: int DataPartitionElements(GridData *gdata)
403: {
404: Mat Adj; /* adjacency matrix */
405: int *ia,*ja;
406: int mlocal_ele,n_ele,ierr;
407: MatPartitioning part;
408: IS isnewproc;
411: n_ele = gdata->n_ele;
412: mlocal_ele = gdata->mlocal_ele;
414: ia = gdata->ia;
415: ja = gdata->ja;
417: /*
418: Create the adjacency graph matrix
419: */
420: MatCreateMPIAdj(PETSC_COMM_WORLD,mlocal_ele,n_ele,ia,ja,PETSC_NULL,&Adj);
422: /*
423: Create the partioning object
424: */
425: MatPartitioningCreate(PETSC_COMM_WORLD,&part);
426: MatPartitioningSetAdjacency(part,Adj);
427: MatPartitioningSetFromOptions(part);
428: MatPartitioningApply(part,&isnewproc);
429: MatPartitioningDestroy(part);
431: /*
432: isnewproc - indicates for each local element the new processor it is assigned to
433: */
434: PetscPrintf(PETSC_COMM_WORLD,"New processor assignment for each elementn");
435: ISView(isnewproc,PETSC_VIEWER_STDOUT_WORLD);
436: gdata->isnewproc = isnewproc;
438: /*
439: Free the adjacency graph data structures
440: */
441: MatDestroy(Adj);
444: return(0);
445: }
447: /*
448: Moves the grid element data to be on the correct processor for the new
449: element partitioning.
450: */
451: int DataMoveElements(GridData *gdata)
452: {
453: int ierr,*counts,rank,size,i,*idx;
454: Vec vele,veleold;
455: PetscScalar *array;
456: IS isscat,isnum;
457: VecScatter vecscat;
461: MPI_Comm_size(PETSC_COMM_WORLD,&size);
462: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
464: /*
465: Determine how many elements are assigned to each processor
466: */
467: PetscMalloc(size*sizeof(int),&counts);
468: ierr = ISPartitioningCount(gdata->isnewproc,counts);
470: /*
471: Create a vector to contain the newly ordered element information
473: Note: we use vectors to communicate this data since we can use the powerful
474: VecScatter routines to get the data to the correct location. This is a little
475: wasteful since the vectors hold double precision numbers instead of integers,
476: but since this is just a setup phase in the entire numerical computation that
477: is only called once it is not a measureable performance bottleneck.
478: */
479: VecCreate(PETSC_COMM_WORLD,&vele);
480: VecSetSizes(vele,3*counts[rank],PETSC_DECIDE);
481: VecSetFromOptions(vele);
483: /*
484: Create an index set from the isnewproc index set to indicate the mapping TO
485: */
486: ISPartitioningToNumbering(gdata->isnewproc,&isnum);
487: ISDestroy(gdata->isnewproc);
488: /*
489: There are three data items per cell (element), the integer vertex numbers of its three
490: coordinates (we convert to double to use the scatter) (one can think
491: of the vectors of having a block size of 3 and there is one index in idx[] for each block)
492: */
493: ISGetIndices(isnum,&idx);
494: for (i=0; i<gdata->mlocal_ele; i++) {
495: idx[i] *= 3;
496: }
497: ISCreateBlock(PETSC_COMM_WORLD,3,gdata->mlocal_ele,idx,&isscat);
498: ISRestoreIndices(isnum,&idx);
499: ISDestroy(isnum);
501: /*
502: Create a vector to contain the old ordered element information
503: */
504: VecCreateSeq(PETSC_COMM_SELF,3*gdata->mlocal_ele,&veleold);
505: VecGetArray(veleold,&array);
506: for (i=0; i<3*gdata->mlocal_ele; i++) {
507: array[i] = gdata->ele[i];
508: }
509: VecRestoreArray(veleold,&array);
510:
511: /*
512: Scatter the element vertex information to the correct processor
513: */
514: VecScatterCreate(veleold,PETSC_NULL,vele,isscat,&vecscat);
515: ISDestroy(isscat);
516: VecScatterBegin(veleold,vele,INSERT_VALUES,SCATTER_FORWARD,vecscat);
517: VecScatterEnd(veleold,vele,INSERT_VALUES,SCATTER_FORWARD,vecscat);
518: VecScatterDestroy(vecscat);
519: VecDestroy(veleold);
521: /*
522: Put the element vertex data into a new allocation of the gdata->ele
523: */
524: PetscFree(gdata->ele);
525: gdata->mlocal_ele = counts[rank];
526: PetscFree(counts);
527: PetscMalloc(3*gdata->mlocal_ele*sizeof(int),&gdata->ele);
528: VecGetArray(vele,&array);
529: for (i=0; i<3*gdata->mlocal_ele; i++) {
530: gdata->ele[i] = (int)PetscRealPart(array[i]);
531: }
532: VecRestoreArray(vele,&array);
533: VecDestroy(vele);
535: PetscPrintf(PETSC_COMM_WORLD,"Old vertex numbering in new element orderingn");
536: PetscSynchronizedPrintf(PETSC_COMM_WORLD,"Processor %dn",rank);
537: for (i=0; i<gdata->mlocal_ele; i++) {
538: PetscSynchronizedPrintf(PETSC_COMM_WORLD,"%d %d %d %dn",i,gdata->ele[3*i],gdata->ele[3*i+1],
539: gdata->ele[3*i+2]);
540: }
541: PetscSynchronizedFlush(PETSC_COMM_WORLD);
543: return(0);
544: }
546: /*
547: Given the newly partitioned cells (elements), this routine partitions the
548: vertices.
550: The code is not completely scalable since it requires
551: 1) O(n_vert) bits per processor memory
552: 2) uses O(size) stages of communication; each of size O(n_vert) bits
553: 3) it is sequential (each processor marks it vertices ONLY after all processors
554: to the left have marked theirs.
555: 4) the amount of work on the last processor is O(n_vert)
557: The algorithm also does not take advantage of vertices that are "interior" to a
558: processors elements (that is; is not contained in any element on another processor).
559: A better algorithm would first have all processors determine "interior" vertices and
560: make sure they are retained on that processor before listing "boundary" vertices.
562: The algorithm is:
563: a) each processor waits for a message from the left containing mask of all marked vertices
564: b) it loops over all local elements, generating a list of vertices it will
565: claim (not claiming ones that have already been marked in the bit-array)
566: it claims at most n_vert/size vertices
567: c) it sends to the right the mask
569: This is a quick-and-dirty implementation; it should work fine for many problems,
570: but will need to be replaced once profiling shows that it takes a large amount of
571: time. An advantage is it requires no searching or sorting.
572:
573: */
574: int DataPartitionVertices(GridData *gdata)
575: {
576: int n_vert = gdata->n_vert,ierr,*localvert;
577: int rank,size,mlocal_ele = gdata->mlocal_ele,*ele = gdata->ele,i,j,nlocal = 0,nmax;
578: PetscBT mask;
579: MPI_Status status;
582: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
583: MPI_Comm_size(PETSC_COMM_WORLD,&size);
585: /*
586: Allocated space to store bit-array indicting vertices marked
587: */
588: PetscBTCreate(n_vert,mask);
590: /*
591: All processors except last can have a maximum of n_vert/size vertices assigned
592: (because last processor needs to handle any leftovers)
593: */
594: nmax = n_vert/size;
595: if (rank == size-1) {
596: nmax = n_vert;
597: }
599: /*
600: Receive list of marked vertices from left
601: */
602: if (rank) {
603: MPI_Recv(mask,PetscBTLength(n_vert),MPI_CHAR,rank-1,0,PETSC_COMM_WORLD,&status);
604: }
606: if (rank == size-1) {
607: /* last processor gets all the rest */
608: for (i=0; i<n_vert; i++) {
609: if (!PetscBTLookup(mask,i)) {
610: nlocal++;
611: }
612: }
613: nmax = nlocal;
614: }
616: /*
617: Now we know how many are local, allocated enough space for them and mark them
618: */
619: PetscMalloc((nmax+1)*sizeof(int),&localvert);
621: /* generate local list and fill in mask */
622: nlocal = 0;
623: if (rank < size-1) {
624: /* count my vertices */
625: for (i=0; i<mlocal_ele; i++) {
626: for (j=0; j<3; j++) {
627: if (!PetscBTLookupSet(mask,ele[3*i+j])) {
628: localvert[nlocal++] = ele[3*i+j];
629: if (nlocal >= nmax) goto foundenough2;
630: }
631: }
632: }
633: foundenough2:;
634: } else {
635: /* last processor gets all the rest */
636: for (i=0; i<n_vert; i++) {
637: if (!PetscBTLookup(mask,i)) {
638: localvert[nlocal++] = i;
639: }
640: }
641: }
642: /*
643: Send bit mask on to next processor
644: */
645: if (rank < size-1) {
646: MPI_Send(mask,PetscBTLength(n_vert),MPI_CHAR,rank+1,0,PETSC_COMM_WORLD);
647: }
648: PetscBTDestroy(mask);
650: gdata->localvert = localvert;
651: gdata->nlocal = nlocal;
653: /* print lists of owned vertices */
654: PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] Number vertices assigned %dn",rank,nlocal);
655: PetscSynchronizedFlush(PETSC_COMM_WORLD);
656: PetscIntView(nlocal,localvert,PETSC_VIEWER_STDOUT_WORLD);
658: return(0);
659: }
661: /*
662: Given the partitioning of the vertices; renumbers the element vertex lists for the
663: new vertex numbering and moves the vertex coordinate values to the correct processor
664: */
665: int DataMoveVertices(GridData *gdata)
666: {
667: AO ao;
668: int ierr,rank,i;
669: Vec vert,overt;
670: VecScatter vecscat;
671: IS isscat;
672: PetscScalar *avert;
676: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
678: /* ---------------------------------------------------------------------
679: Create a global reodering of the vertex numbers
680: */
681: AOCreateBasic(PETSC_COMM_WORLD,gdata->nlocal,gdata->localvert,PETSC_NULL,&ao);
683: /*
684: Change the element vertex information to the new vertex numbering
685: */
686: AOApplicationToPetsc(ao,3*gdata->mlocal_ele,gdata->ele);
687: PetscPrintf(PETSC_COMM_WORLD,"New vertex numbering in new element orderingn");
688: PetscSynchronizedPrintf(PETSC_COMM_WORLD,"Processor %dn",rank);
689: for (i=0; i<gdata->mlocal_ele; i++) {
690: PetscSynchronizedPrintf(PETSC_COMM_WORLD,"%d %d %d %dn",i,gdata->ele[3*i],gdata->ele[3*i+1],
691: gdata->ele[3*i+2]);
692: }
693: PetscSynchronizedFlush(PETSC_COMM_WORLD);
695: /*
696: Destroy the AO that is no longer needed
697: */
698: AODestroy(ao);
700: /* --------------------------------------------------------------------
701: Finally ship the vertex coordinate information to its owning process
702: note, we do this in a way very similar to what was done for the element info
703: */
704: /* create a vector to contain the newly ordered vertex information */
705: VecCreateSeq(PETSC_COMM_SELF,2*gdata->nlocal,&vert);
707: /* create a vector to contain the old ordered vertex information */
708: VecCreateMPIWithArray(PETSC_COMM_WORLD,2*gdata->mlocal_vert,PETSC_DECIDE,gdata->vert,
709: &overt);
711: /*
712: There are two data items per vertex, the x and y coordinates (i.e. one can think
713: of the vectors of having a block size of 2 and there is one index in localvert[] for each block)
714: */
715: for (i=0; i<gdata->nlocal; i++) gdata->localvert[i] *= 2;
716: ISCreateBlock(PETSC_COMM_WORLD,2,gdata->nlocal,gdata->localvert,&isscat);
717: PetscFree(gdata->localvert);
719: /*
720: Scatter the element vertex information to the correct processor
721: */
722: VecScatterCreate(overt,isscat,vert,PETSC_NULL,&vecscat);
723: ISDestroy(isscat);
724: VecScatterBegin(overt,vert,INSERT_VALUES,SCATTER_FORWARD,vecscat);
725: VecScatterEnd(overt,vert,INSERT_VALUES,SCATTER_FORWARD,vecscat);
726: VecScatterDestroy(vecscat);
728: VecDestroy(overt);
729: PetscFree(gdata->vert);
730:
731: /*
732: Put resulting vertex information into gdata->vert array
733: */
734: PetscMalloc(2*gdata->nlocal*sizeof(PetscScalar),&gdata->vert);
735: VecGetArray(vert,&avert);
736: PetscMemcpy(gdata->vert,avert,2*gdata->nlocal*sizeof(PetscScalar));
737: VecRestoreArray(vert,&avert);
738: gdata->mlocal_vert = gdata->nlocal;
739: VecDestroy(vert);
741: PetscPrintf(PETSC_COMM_WORLD,"Vertex coordinates in new numberingn");
742: for (i=0; i<2*gdata->mlocal_vert; i++) {
743: PetscSynchronizedPrintf(PETSC_COMM_WORLD,"%gn",gdata->vert[i]);
744: }
745: PetscSynchronizedFlush(PETSC_COMM_WORLD);
747: return(0);
748: }
751: int DataDestroy(GridData *gdata)
752: {
756: PetscFree(gdata->ele);
757: PetscFree(gdata->vert);
758: return(0);
759: }